query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Return a list of windows for the current desktop. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to look for windows. | def list(desktop=None):
root_window = root(desktop)
window_list = [window for window in root_window.descendants() if window.displayed()]
window_list.insert(0, root_window)
return window_list | [
"def user32_EnumDesktopWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\", \"lpfn\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_EnumDesktops(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hwinsta\", \"lpEnumFunc\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def enumerate_windows():\n import ctypes.wintypes\n monitors = []\n\n def callback(_monitor, _dc, rect, _data):\n \"\"\"\n Callback for the ctypes EnumDisplayMonitors win32 function.\n \"\"\"\n rct = rect.contents\n monitors.append(Monitor(\n rct.left,\n rct.top,\n rct.right - rct.left,\n rct.bottom - rct.top))\n return 1\n\n monitor_enum_proc = ctypes.WINFUNCTYPE(\n ctypes.c_int,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.POINTER(ctypes.wintypes.RECT),\n ctypes.c_double)\n\n ctypes.windll.user32.EnumDisplayMonitors(\n 0, 0, monitor_enum_proc(callback), 0)\n\n return monitors",
"def windowsDesktopEntries():\n majorVersion, minorVersion = sys.version_info[:2]\n scriptsDir = sysconfig.get_path(\"scripts\")\n entriesTemplates = [\n (\"eric6 (Python {0}.{1}).lnk\",\n os.path.join(scriptsDir, \"eric6.exe\"),\n os.path.join(scriptsDir, \"eric6.ico\")\n ),\n (\"eric6 Browser (Python {0}.{1}).lnk\",\n os.path.join(scriptsDir, \"eric6_browser.exe\"),\n os.path.join(scriptsDir, \"ericWeb48.ico\")\n ),\n ]\n \n return [\n (e[0].format(majorVersion, minorVersion), e[1], e[2])\n for e in entriesTemplates\n ]",
"def get_windows_from_session(sess_name):\n cmd = (CMD_LIST_WINDOWS % sess_name).split(config.CMD_SEP)\n s = util.exec_cmd(cmd)\n return s.split('\\n')",
"def list(self, sleep=1):\n\n if sleep > 0:\n time.sleep(sleep)\n\n if self.useUiAutomator:\n raise Exception(\"Not implemented yet: listing windows with UiAutomator\")\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((VIEW_SERVER_HOST, self.localPort))\n except socket.error, ex:\n raise RuntimeError(\"ERROR: Connecting to %s:%d: %s\" % (VIEW_SERVER_HOST, self.localPort, ex))\n s.send('list\\r\\n')\n received = \"\"\n doneRE = re.compile(\"DONE\")\n while True:\n received += s.recv(1024)\n if doneRE.search(received[-7:]):\n break\n s.close()\n\n self.windows = {}\n for line in received.split('\\n'):\n if not line:\n break\n if doneRE.search(line):\n break\n values = line.split()\n if len(values) > 1:\n package = values[1]\n else:\n package = \"UNKNOWN\"\n if len(values) > 0:\n wid = values[0]\n else:\n wid = '00000000'\n self.windows[int('0x' + wid, 16)] = package\n return self.windows",
"def user32_OpenDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def _get_window_list(self):\n if not self.workspace:\n logger.debug(\"Getting list of windows.\")\n leaves = self.tree.leaves()\n if self.scratch:\n return [\n leave\n for leave in leaves\n if leave.parent.scratchpad_state in [\"changed\", \"fresh\"]\n ]\n else:\n return leaves\n else:\n logger.debug(\n \"Getting list of windows on workspace: {}.\".format(self.workspace)\n )\n workspaces = self.tree.workspaces()\n for workspace in workspaces:\n if workspace.name == self.workspace:\n return workspace.leaves()\n return []",
"def current_desktop(self) -> int:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_CURRENT_DESKTOP\"]\n )\n return cast(List[int], result)[0]",
"def check_desktop_thread(self, addr_space):\n \n ret = dict()\n for windowstation in windowstations.WndScan(self._config).calculate():\n for desktop in windowstation.desktops():\n for thread in desktop.threads():\n process = thread.ppi.Process.dereference()\n if process == None:\n continue\n ret[process.obj_vm.vtop(process.obj_offset)] = process\n \n return ret",
"def user32_SwitchDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def find_windows_for_process(process_id):\n pids = utils.get_process_ids(process_id)\n if not pids:\n return []\n\n visible_windows = []\n for pid in pids:\n app = application.Application()\n try:\n app.connect(process=pid)\n except:\n logs.log_warn('Unable to connect to process.')\n continue\n\n try:\n windows = app.windows()\n except:\n logs.log_warn('Unable to get application windows.')\n continue\n\n for window in windows:\n try:\n window.type_keys('')\n except:\n continue\n\n visible_windows.append(window)\n\n return visible_windows",
"def get_clients(self) -> List[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_CLIENT_LIST\"], type=self.atom[\"WINDOW\"] or 0\n )\n return [] if not result else [self.create_window(window_id=x) for x in cast(List[int], result)]",
"def user32_CreateDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CreateDesktopEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\", \"ulHeapSize\", \"pvoid\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_stacked_clients(self) -> List[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy,\n window=self.root,\n property=self.atom[\"_NET_CLIENT_LIST_STACKING\"],\n type=self.atom[\"WINDOW\"] or 0,\n )\n return [] if not result else [self.create_window(window_id=r) for r in cast(List[int], result)]",
"def test_windows() -> None:\n assert windows([3, 4, 6, 2, 3], 2) == [[3, 4], [4, 6], [6, 2], [2, 3]]\n assert windows(['a', 1, 6.0, False], 3) == [['a', 1, 6.0], [1, 6.0, False]]\n assert windows([], 1) == []",
"def get_window_by_prefix(prefix, list=None):\n\n result = [x for x in WidgetTree(list) if\n isinstance(x, Gtk.Window) and\n x.get_title() and\n x.get_title().startswith(prefix)]\n if result:\n return result[0]\n else:\n return result",
"def cmd_internal_windows(self):\r\n return [\r\n i.info() for i in self.windowMap.values()\r\n if isinstance(i, window.Internal)\r\n ]",
"def workspacesByProductType(self, *args) -> \"adsk::core::Ptr< adsk::core::WorkspaceList >\" :\n return _core.UserInterface_workspacesByProductType(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the root window for the current desktop. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to look for windows. | def root(desktop=None):
# NOTE: The desktop parameter is currently ignored and X11 is tested for
# NOTE: directly.
if _is_x11():
return Window(None)
else:
raise OSError("Desktop '%s' not supported" % use_desktop(desktop)) | [
"def current_desktop(self) -> int:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_CURRENT_DESKTOP\"]\n )\n return cast(List[int], result)[0]",
"def _desktopwidget(self):\r\n if self.__desktopwidget is None:\r\n self.__desktopwidget = QApplication.desktop()\r\n return self.__desktopwidget",
"def user32_OpenDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def current_window(self) -> Optional[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_ACTIVE_WINDOW\"], type=self.atom[\"WINDOW\"]\n )\n return None if not result else self.create_window(window_id=cast(List[xlib.Window], result)[0])",
"def get_focused_window(self):\n focus = self.display.get_input_focus()\n if focus.focus.get_wm_class() is None:\n # TODO Climb the tree until find something with a class property\n # (The immediate parent works well enough for now, for the few\n # cases I've encountered.)\n query = focus.focus.query_tree()\n window = query.parent if query else None\n else:\n window = focus.focus\n if not window:\n return (None, None)\n return (window.get_wm_class(), window.get_wm_name())",
"def user32_SwitchDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_EnumDesktopWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\", \"lpfn\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def main_window():\n if FindWindow(None, 'MainWindow'):\n main = Application(backend = 'uia').connect(title_re = 'MainWindow')\n return main['MainWindow']\n else:\n return None",
"def user32_GetThreadDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwThreadId\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def desktop_file_path() -> Path:\n return Path(APPLICATIONS_FOLDER).expanduser().joinpath(\"fuzzlecheck.desktop\")",
"def user32_CreateDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CreateDesktopEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\", \"ulHeapSize\", \"pvoid\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def _get_main_window(self):\n return self._window_ref()",
"def change_window_desktop(self, window: xlib.Window, desktop: int) -> None:\n if desktop < 0:\n return\n\n self._send_event(window=window, mtype=self.atom[\"_NET_WM_DESKTOP\"], data=[desktop])\n self._flush()",
"def desktop_name(self):\n self.writeCommand('desktop_name')\n return self",
"def user32_GetTopWindow(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def find_app(self) -> Optional[Gio.AppInfo]:\n for desktop_id in self.desktop_ids:\n try:\n # pygobject raises a type error if new returns NULL, for whatever reason\n return Gio.DesktopAppInfo.new(desktop_id)\n except TypeError:\n continue\n return None",
"def user32_SetThreadDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_OpenInputDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def detect_window_system():\n if os.getenv('WAYLAND_DISPLAY') is not None and os.getenv('GDK_BACKEND') is None:\n return \"wayland\"\n elif os.getenv('DISPLAY') is not None:\n return \"x11\"\n else:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find and return windows using the given 'callable' for the current desktop. If the optional 'desktop' parameter is specified then attempt to use that particular desktop environment's mechanisms to look for windows. | def find(callable, desktop=None):
return root(desktop).find(callable) | [
"def user32_EnumDesktopWindows(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\", \"lpfn\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_OpenDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_EnumDesktops(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hwinsta\", \"lpEnumFunc\", \"lParam\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_FindWindowEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hwndParent\", \"hwndChildAfter\", \"lpszClass\", \"lpszWindow\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CreateDesktopEx(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\", \"ulHeapSize\", \"pvoid\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_CreateDesktop(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDesktop\", \"lpszDevice\", \"pDevmode\", \"dwFlags\", \"dwDesiredAccess\", \"lpsa\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def check_desktop_thread(self, addr_space):\n \n ret = dict()\n for windowstation in windowstations.WndScan(self._config).calculate():\n for desktop in windowstation.desktops():\n for thread in desktop.threads():\n process = thread.ppi.Process.dereference()\n if process == None:\n continue\n ret[process.obj_vm.vtop(process.obj_offset)] = process\n \n return ret",
"def _find_window(self, predicate, timeout = 10.0):\n window_handle = None\n end_time = time.time() + timeout\n while window_handle is None and end_time > time.time():\n for handle in self.driver.window_handles:\n if predicate(handle):\n window_handle = handle\n break\n\n return window_handle",
"def user32_OpenInputDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwFlags\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def user32_SwitchDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hDesktop\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def get_window_by_prefix(prefix, list=None):\n\n result = [x for x in WidgetTree(list) if\n isinstance(x, Gtk.Window) and\n x.get_title() and\n x.get_title().startswith(prefix)]\n if result:\n return result[0]\n else:\n return result",
"def workspace_screen(workspace, screens):\n for screen in screens:\n screen_ws = screen[\"current_workspace\"]\n if workspace in screen_ws:\n # FIXME: How to handle numbers higher than 9? 10 will match 1, right?\n return screen\n return None",
"def user32_GetThreadDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"dwThreadId\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def _find_tmux_window(self, session) -> i3ipc.Con:\n session_name = escape(session.name)\n window_name = escape(session.attached_window.name)\n rgx = self._conf['tmux_title_rgx'].format_map(defaultdict(str,\n session = session_name,\n window = window_name\n ))\n\n tmux_win = self._i3.get_tree().find_named(rgx)\n # just in case filter by container type - we want regular & floating window containers:\n tmux_win = list(filter(lambda c: c.type.endswith('con'), tmux_win))\n\n if tmux_win:\n if len(tmux_win) > 1:\n self.logger.debug('found [{}] windows using regex [{}], but expected 1'\n .format(len(tmux_win), rgx))\n self.logger.debug('you should likely make conf.tmux_title_rgx more limiting')\n return tmux_win[0]\n\n self.logger.debug('found no windows using regex [{}]'.format(rgx))\n return None",
"def load_desktop(name, data):\n for m in Module_Metaclass.modules:\n child = m()._load_desktop(name, data)\n if child:\n return child",
"def enumerate_windows():\n import ctypes.wintypes\n monitors = []\n\n def callback(_monitor, _dc, rect, _data):\n \"\"\"\n Callback for the ctypes EnumDisplayMonitors win32 function.\n \"\"\"\n rct = rect.contents\n monitors.append(Monitor(\n rct.left,\n rct.top,\n rct.right - rct.left,\n rct.bottom - rct.top))\n return 1\n\n monitor_enum_proc = ctypes.WINFUNCTYPE(\n ctypes.c_int,\n ctypes.c_ulong,\n ctypes.c_ulong,\n ctypes.POINTER(ctypes.wintypes.RECT),\n ctypes.c_double)\n\n ctypes.windll.user32.EnumDisplayMonitors(\n 0, 0, monitor_enum_proc(callback), 0)\n\n return monitors",
"def user32_PaintDesktop(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hdc\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def windowsDesktopEntries():\n majorVersion, minorVersion = sys.version_info[:2]\n scriptsDir = sysconfig.get_path(\"scripts\")\n entriesTemplates = [\n (\"eric6 (Python {0}.{1}).lnk\",\n os.path.join(scriptsDir, \"eric6.exe\"),\n os.path.join(scriptsDir, \"eric6.ico\")\n ),\n (\"eric6 Browser (Python {0}.{1}).lnk\",\n os.path.join(scriptsDir, \"eric6_browser.exe\"),\n os.path.join(scriptsDir, \"ericWeb48.ico\")\n ),\n ]\n \n return [\n (e[0].format(majorVersion, minorVersion), e[1], e[2])\n for e in entriesTemplates\n ]",
"def user32_OpenWindowStation(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszWinSta\", \"fInherit\", \"dwDesiredAccess\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def find_windows_for_process(process_id):\n pids = utils.get_process_ids(process_id)\n if not pids:\n return []\n\n visible_windows = []\n for pid in pids:\n app = application.Application()\n try:\n app.connect(process=pid)\n except:\n logs.log_warn('Unable to connect to process.')\n continue\n\n try:\n windows = app.windows()\n except:\n logs.log_warn('Unable to get application windows.')\n continue\n\n for window in windows:\n try:\n window.type_keys('')\n except:\n continue\n\n visible_windows.append(window)\n\n return visible_windows"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a decorator that attaches a callback to a hook. Three hooks | def hook(self, name):
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper | [
"def hook(func: Callable):\n parameters, return_annotation = _extract_params(func, extract_return=True)\n return Hook(str(func), parameters, return_annotation)",
"def set_hook(f: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n @wraps(f)\n def set_hook_wrapper(self, **kwargs):\n f(self, **kwargs)\n self.attribution_model.is_hooked = True\n\n return set_hook_wrapper",
"def hooked(fn):\n\n @functools.wraps(fn)\n def hooked_inner(*args, **kwargs):\n hs = HookedState(\n key=fn.func_name,\n fn=fn,\n args=args,\n kwargs=kwargs\n )\n return hs()\n\n return hooked_inner",
"def logging_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available logging hooks.\n\n :param type cls: logging hook class.\n\n :returns: logging hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n logging_services.register_hook(instance)\n\n return cls\n\n return decorator",
"def on_hook(self, hook: \"Hook\") -> None:\n try:\n if self.hooked is not None:\n func, args_gen = self.hooked[type(hook)]\n else:\n return\n except KeyError:\n return\n else:\n hook(func, args_gen)",
"def build_decorator(cls, what):\n def _decorator(self, func):\n \"\"\"\n Actual hook decorator\n \"\"\"\n HookRegistry().register(self._when, what, func) # pylint: disable=protected-access\n return func\n _decorator.__name__ = _decorator.fn_name = what\n setattr(cls, what, _decorator)",
"def addCallback(*args, **kwargs):\n \n pass",
"def get_advice(**kwds):\n def decorate(f):\n for k in kwds:\n if k == 'pre_fn' or k == 'post_fn' or k == 'pre_process_fn' or k == 'post_processs_fn' or k == 'init_fn':\n setattr(f, k, kwds[k])\n return f\n return decorate",
"def decorator(hookable: Union[Route, Callable]):\n nonlocal self, hook_function\n hook_function = _prepare_async_hook_function(\n hook_function, *args, **kwargs\n )\n\n if isinstance(hookable, self.route_class):\n route = hookable\n self.store_hook(hook, hook_function, route)\n return route\n else:\n return _with_hook(hookable, hook, hook_function)",
"def decorator_with_args(decorator_to_enhance):\n\n # We use the same trick we did to pass arguments\n def decorator_maker(*args, **kwargs):\n\n # We create on the fly a decorator that accepts only a function\n # but keeps the passed arguments from the maker.\n def decorator_wrapper(func):\n\n # We return the result of the original decorator, which, after all,\n # IS JUST AN ORDINARY FUNCTION (which returns a function).\n # Only pitfall: the decorator must have this specific signature or it won't work:\n return decorator_to_enhance(func, *args, **kwargs)\n\n return decorator_wrapper\n\n return decorator_maker",
"def decorator(cls):\n\n instance = cls()\n logging_services.register_hook(instance)\n\n return cls",
"def _before(hook):\n return wraps(hook)(hooks.before(hook))",
"def decorate_exception_hook(func: Callable) -> Callable:\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n with exception_hook():\n return func(*args, **kwargs)\n return wrapped",
"def register_hook(self, hook, function):\n if hook in self.hooks:\n self.hooks[hook].append(function)\n else:\n self.hooks[hook] = [ function ]",
"def hook_decorator(f):\n\n def wrapper(*args, **kwargs):\n try:\n f(*args, **kwargs)\n except Exception, e:\n import traceback\n print traceback.print_exc()\n print >>sys.stderr, WEIRD_STUFF_MSG\n # Do not raise\n return wrapper",
"def add_hook(function: Callable[[Any], Any],\n pre_exec_hook: Optional[Callable[[Any], Any]] = None,\n post_exec_hook: Optional[Callable[[Any], Any]] = None):\n if pre_exec_hook is None and post_exec_hook is None:\n raise Exception('Some hooks must be included')\n\n @functools.wraps(function)\n def run(*args, **kwargs):\n sanitizer_log(f'Hook start {str(function)}', LOG_DEBUG)\n\n # Call hook\n if pre_exec_hook is not None:\n pre_exec_hook(*args, **kwargs)\n\n # Call the original function in the even the hook did not indicate\n # failure.\n ret = function(*args, **kwargs)\n\n # Post execution hook. Overwrite return value if anything is returned\n # by post hook.\n if post_exec_hook is not None:\n tmp_ret = post_exec_hook(ret, *args, **kwargs)\n if tmp_ret is not None:\n sanitizer_log('Overwriting return value', LOG_DEBUG)\n ret = tmp_ret\n sanitizer_log(f'Hook end {str(function)}', LOG_DEBUG)\n return ret\n\n return run",
"def add_post_hook(fn):\n def step(testdef):\n old_hook = testdef.post_hook\n def new_hook(*args, **kwargs):\n if callable(old_hook):\n old_hook(*args, **kwargs)\n fn(*args, **kwargs)\n testdef.post_hook = new_hook\n yield\n testdef.post_hook = old_hook\n return step",
"def wraps(app, **kw):\n def wrap(func):\n return maybe_rewrap(app, kw and lite(**kw)(func) or lite(func))\n return wrap",
"def dispatch_hook(key, hooks, hook_data, **kwargs):\n ...",
"def __call__(self, function=None, hookwrapper=False, optionalhook=False,\n tryfirst=False, trylast=False):\n def setattr_hookimpl_opts(func):\n setattr(func, self.project_name + \"_impl\",\n dict(hookwrapper=hookwrapper, optionalhook=optionalhook,\n tryfirst=tryfirst, trylast=trylast))\n return func\n\n if function is None:\n return setattr_hookimpl_opts\n else:\n return setattr_hookimpl_opts(function)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A list of all IPs that were involved in this request, starting with the client IP and followed by zero or more proxies. This does only work if all proxies support the ```XForwardedFor`` header. Note that this information can be forged by malicious clients. | def remote_route(self):
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else [] | [
"def get_ips(self):\n \n # Get the IP from each interface\n output = [i.interface_ip for i in self.interfaces if is_ip(i)]\n \n # Add any other IP's it has\n output.extend(self.other_ips)\n \n return output",
"def proxies(self):\r\n url = \"%s/sharing/rest/content/users/%s/items/%s/proxies\" % (self._portal.url,\r\n self._user_id,\r\n self.id)\r\n params = {\"f\" : \"json\"}\r\n ps = []\r\n try:\r\n res = self._portal.con.get(url, params)\r\n if 'appProxies' in res:\r\n for p in res['appProxies']:\r\n ps.append(p)\r\n except:\r\n return []\r\n return ps",
"def _get_local_ips(self):\n addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0)\n # Returns IPv4 and IPv6 addresses, ordered by protocol family\n addr_info.sort()\n index = 0\n host_ips = []\n for one_addr_info in addr_info:\n # the data structure of addr_info returned by the method\n # getaddrinfo is (family, socktype, proto, canonname, sockaddr).\n # Fox example:\n # (2, 1, 6, '', ('82.94.164.162', 80))\n # (10, 1, 6, '', ('2001:888:2000:d::a2', 80, 0, 0))\n host_ips[index] = one_addr_info[4][0]\n index = index + 1\n return host_ips",
"def ip_addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"ip_addresses\")",
"def get_indicators_ip(self) -> List[dict]:\n return [\n self._process_item(item, self._tags, self.tlp_color)\n for item in self._build_iterator_ip()\n ]",
"def get_proxy_list():\n proxy_list = []\n for plugin in PLUGINS:\n proxy_list.extend(plugin().scrape())\n # remove duplicate ip\n unique_proxies = list({v[\"ip\"]:v for v in proxy_list}.values())\n return unique_proxies",
"def get_all_proxies(self):\n proxies = []\n for addr in self._address_pool:\n proxy = {\"http\": addr, \"https\": addr}\n proxies.append(proxy)\n return proxies",
"def ip(self) -> str:\n if self._flask_request.headers.getlist(\"X-Forwarded-For\"):\n return self._flask_request.headers.getlist(\"X-Forwarded-For\")[0]\n return self._flask_request.remote_addr",
"def list_nodes_ips(self):\n\t\treturn self.stats_colector.list_nodes_ips()",
"def inet_visible_ip(self):\n def handle(results):\n ips = [result[1][0] for result in results if result[0]]\n logger.debug(\"other nodes think our ip is %s\", ips)\n return ips\n\n ds = []\n for neighbor in self.bootstrappable_neighbors():\n ds.append(self.protocol.stun(neighbor))\n future_list(ds, handle)",
"def addresses(self):\n addrs = {u.recieved_raw['ingress-address']\n for u in self.all_joined_units}\n return list(sorted(addrs))",
"def client_addr(self):\n e = self.environ\n xff = e.get('HTTP_X_FORWARDED_FOR')\n if xff is not None:\n addr = xff.split(',')[0].strip()\n else:\n addr = e.get('REMOTE_ADDR')\n return addr",
"def request_ip_address(request):\n x_forward_for = request.META.get('HTTP_X_FORWARDED_FOR', None)\n return x_forward_for if x_forward_for else request.META.get('REMOTE_ADDR')",
"def ip(self):\n\t\tfor ip in self.__ip.keys():\n\t\t\tyield ip",
"def get_untrusted_ips(self):\n ips = [ip for ip in self.get_header_ips()\n if ip not in self.ctxt.networks.trusted]\n return ips",
"def client_ip_address(request):\n\n if request.headers.getlist(\"X-PNG-Query-For\"):\n ip_addr = request.headers.getlist(\"X-PNG-Query-For\")[0]\n if ip_addr.startswith('::ffff:'):\n ip_addr = ip_addr[7:]\n elif request.headers.getlist(\"X-Forwarded-For\"):\n ip_addr = request.headers.getlist(\"X-Forwarded-For\")[0]\n if ip_addr.startswith('::ffff:'):\n ip_addr = ip_addr[7:]\n else:\n ip_addr = request.remote_addr\n\n return ip_addr",
"def all_fixed_ips(self) -> Sequence[str]:\n return pulumi.get(self, \"all_fixed_ips\")",
"def get_ip_addresses(self, task):\n return []",
"def ex_static_ip_list(self):\r\n response = self.connection.request(action='/resources/ip/list',\r\n method='GET')\r\n\r\n if response.status != 200:\r\n raise CloudSigmaException('Could not retrieve IP list')\r\n\r\n ips = str2list(response.body)\r\n return ips",
"def _get_client_ip(request) -> str:\n ip: str\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None | def parse_auth(header):
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None | [
"def parse_basic_auth(header):\n b64value = header[len(basic_prefix):]\n value = b64decode(b64value).decode()\n return value.split(':', 1)",
"def parse_basic_auth(header_value):\n\n if not header_value:\n return None\n\n parts = header_value.split(\" \")\n if len(parts) != 2 or parts[0].lower() != \"basic\":\n return None\n\n try:\n basic_parts = base64.b64decode(parts[1]).split(\":\", 1)\n if len(basic_parts) != 2:\n return None\n\n return basic_parts\n except ValueError:\n return None",
"def parse_authenticate_header(header):\n if not header:\n return\n try:\n auth_type, auth_info = header.split(None, 1)\n auth_type = auth_type.lower()\n except ValueError as e:\n print(e)\n return\n return WWWAuthenticate(auth_type, parse_dict_header(auth_info))",
"def get_user_and_password(auth_header):\n if auth_header is None:\n raise Exception('No Authorization header')\n auth_data = auth_header.split()\n if len(auth_data) < 2 or auth_data[0] != 'Basic':\n raise Exception('Authorization header not for HTTP Basic')\n return base64.b64decode(auth_data[1]).decode('ascii').split(':', 1)",
"def getBasicHttpAuthenticationData(self):\n request = self.getRequest()\n if \"HTTP_AUTHORIZATION\" not in request.META:\n return None\n\n auth = request.META[\"HTTP_AUTHORIZATION\"].split()\n if len(auth) != 2:\n return None\n\n if auth[0].lower() != \"basic\":\n return None\n\n auth = base64.b64decode(auth[1])\n auth = auth.decode(\"utf-8\")\n username, password = auth.split(':', 1)\n return username, password",
"def auth_basic(self, auth_header):\n\n # The authorization header is base64 encoded, we need it decoded\n auth_decoded = base64.decodebytes(auth_header.encode('utf8')).decode()\n # Decoded format is <username>:<password> so we need to split it\n userstring, password = auth_decoded.split(':', maxsplit=1)\n try:\n # If the user specifies a realm in the username verify\n # it matches the configured SPNEGO realm so we\n # don't open ourselves up to KDC spoofing\n username, realm = userstring.split('@', maxsplit=1)\n if realm != settings.SPNEGO_REALM:\n raise NotAuthorized\n except ValueError:\n username = userstring\n\n kerberos.checkPassword(\n username, password,\n kerberos.getServerPrincipalDetails(\n 'HTTP', settings.SPNEGO_HOSTNAME),\n settings.SPNEGO_REALM\n )\n\n return username",
"def parse_auth_header(header):\n try:\n to_return = dict(\n map(\n lambda x: x.strip().split('='),\n header.split(' ')\n )\n )\n except (IndexError, ValueError):\n return None\n return to_return",
"def parse_authorization_header(header):\n if not header:\n return\n try:\n auth_type, auth_info = header.split(None, 1) # separate auth type and values\n auth_type = auth_type.lower()\n except ValueError as e:\n print(e)\n return\n\n if auth_type == 'basic':\n try:\n username, password = auth_info.decode('base64').split(':', 1)\n except Exception as e:\n return\n return Authorization('basic', {'username': username,\n 'password': password})\n elif auth_type == 'digest':\n auth_map = parse_dict_header(auth_info)\n\n required_map = {\n 'auth': (\"username\", \"realm\", \"nonce\", \"uri\", \"response\", \"opaque\"),\n 'auth-int': (\"realm\", \"nonce\", \"uri\", \"qop\", \"nc\", \"cnonce\", \"response\", \"opaque\")}\n required = required_map.get(auth_map.get('qop', 'auth'))\n\n for key in required:\n if not key in auth_map:\n return\n return Authorization('digest', auth_map)\n elif auth_type == 'oauth':\n auth_map = parse_dict_header(auth_info)\n return Authorization('oauth', auth_map)\n else:\n raise ValueError(\"Unknown auth type %s\" % auth_type)",
"def parse_credentials(username: str, password: str) -> tuple:\n return username, password",
"def parse_auth_credentials(value):\n # type: (str) -> Optional[Tuple[str, str]]\n if not value:\n return None\n\n if len(value.split(\":\")) != 2:\n raise ValueError(\n \"--codespeed-auth argument must be in the following format: \"\n \"--codespeed_auth=<username:password>\"\n )\n\n # Split it into (username, password) tuple\n split = value.split(\":\")[:2] # type: List[str]\n codespeed_auth = (split[0], split[1])\n\n return codespeed_auth",
"def get_credentials(self, request):\n auth_header = request.headers.get('Authorization')\n\n if auth_header:\n (scheme, base64_raw) = auth_header.split(' ')\n\n if scheme == 'Basic':\n return b64decode(base64_raw).split(':')\n return (None, None)",
"def parse_authorization_header(value):\n if not value:\n return\n value = wsgi_to_bytes(value)\n try:\n auth_type, auth_info = value.split(None, 1)\n auth_type = auth_type.lower()\n except ValueError:\n return\n if auth_type == b\"basic\":\n try:\n username, password = base64.b64decode(auth_info).split(b\":\", 1)\n except Exception:\n return\n return Authorization(\n \"basic\",\n {\n \"username\": to_unicode(username, _basic_auth_charset),\n \"password\": to_unicode(password, _basic_auth_charset),\n },\n )\n elif auth_type == b\"digest\":\n auth_map = parse_dict_header(auth_info)\n for key in \"username\", \"realm\", \"nonce\", \"uri\", \"response\":\n if key not in auth_map:\n return\n if \"qop\" in auth_map:\n if not auth_map.get(\"nc\") or not auth_map.get(\"cnonce\"):\n return\n return Authorization(\"digest\", auth_map)",
"def get_auth_from_url(url):\n parsed = urlparse(url)\n\n try:\n auth = (unquote(parsed.username), unquote(parsed.password))\n except (AttributeError, TypeError):\n auth = ('', '')\n\n return auth",
"def basic_auth_credentials(self) -> tuple[str, str] | None:\n return (self.username, self.password) if self.username or self.password else None",
"def parse_www_authenticate_header(value, on_update=None):\n if not value:\n return WWWAuthenticate(on_update=on_update)\n try:\n auth_type, auth_info = value.split(None, 1)\n auth_type = auth_type.lower()\n except (ValueError, AttributeError):\n return WWWAuthenticate(value.strip().lower(), on_update=on_update)\n return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)",
"def _parse_auth_data(packet: bytes) -> Tuple[str, str]:\n spn_len = struct.unpack(\"<H\", packet[:2])[0]\n packet = packet[2:]\n\n spn = struct.unpack(f\"<{spn_len}s\", packet[:spn_len])[0]\n packet = packet[spn_len:]\n\n realm_len = struct.unpack(\"<H\", packet[:2])[0]\n realm = struct.unpack(f\"<{realm_len}s\", packet[2:])[0]\n\n return spn.decode(), realm.decode()",
"def valid_auth(auth):\n try:\n user, password = auth.split(\":\")\n except:\n raise argparse.ArgumentTypeError(\n \"'{0}' is not a valid auth\".format(auth))\n return user, password",
"def get_auth_type_and_token_from_header(raw_auth_header_val):\n if not raw_auth_header_val:\n raise AuthenticationError(\"No Header Value Given\")\n return raw_auth_header_val.split(' ', 1)",
"def basic_auth_header(user, password):\n return {'Authorization': BasicAuth(user, password).encode()}",
"def create_auth_header(self):\n encode_password = base64.b64encode(self._module.paramgram[\"username\"] + \":\" +\n self._module.paramgram[\"password\"])\n auth = \"Basic %s\" % encode_password\n return auth"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function | def yieldroutes(func):
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path | [
"def _get_generator(self, path: str) -> Union[_GEN_FUNCTION_TYPE, None]:\n callback = self._routes.get(path)\n if callback:\n return callback\n\n items = reversed(tuple(self._re_routes.items())) # Thread safety\n for pattern, callback in items:\n if pattern.fullmatch(path):\n return callback\n\n return None",
"def list_routes(self, **args):\n return self._list(Route, **args)",
"def make_route(self, *args, **kargs):\n return Route(*args, **kargs)",
"def get_route_match(self, path):\n for route_pattern, methods, group, view_function in self.routes:\n m = route_pattern.match(path)\n if m:\n return m.groupdict(), methods, group, view_function\n \n return None",
"async def test_more_than_one_function():\n routes = Routes()\n\n with pytest.raises(PolicyError):\n\n @routes.get(\"/func\")\n @policy(AdministratorRoutePolicy(AdministratorRole.BASE))\n @policy(PublicRoutePolicy)\n async def get(_):\n \"\"\"An example public route.\"\"\"\n return json_response({\"test_get_func\": True}, status=200)",
"def find_routes(start_end_points):\n pass",
"def searchRoute(request):\r\n\t\tfunction, args = None, None\r\n\r\n\t\tif request.method == b\"PUT\":\r\n\t\t\tdirectory, file = useful.split(useful.tostrings(request.path))\r\n\t\t\tfound = HttpServer.routes.get(useful.tobytes(directory),None)\r\n\t\t\tif found:\r\n\t\t\t\tfunction, args = found\r\n\t\t\treturn function, args\r\n\t\telse:\r\n\t\t\tfound = HttpServer.routes.get(request.path,None)\r\n\t\t\tif found is None:\r\n\t\t\t\tfor route, func in HttpServer.wildroutes:\r\n\t\t\t\t\tif re.match(useful.tostrings(route), useful.tostrings(request.path)):\r\n\t\t\t\t\t\tfound = func\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tif found is None:\r\n\t\t\t\t\tstaticRe = re.compile(\"^/(\"+useful.tostrings(HttpServer.wwwDir)+\"/.+|.+)\")\r\n\t\t\t\t\tif staticRe.match(useful.tostrings(request.path)):\r\n\t\t\t\t\t\tfunction, args = HttpServer.staticPages, {}\r\n\t\t\t\telse:\r\n\t\t\t\t\tfunction, args = found\r\n\t\t\telse:\r\n\t\t\t\tfunction, args = found\r\n\t\treturn function, args",
"def regexes(self) -> List[str]:\n routes = []\n for route in self.routes():\n if isinstance(route, str):\n routes.append(route)\n elif isinstance(route, tuple):\n routes.append(route[0])\n else:\n raise ValueError('Invalid route type')\n return routes",
"def find_matches(overloads, argtypes, constraints=()):\n input = T.Function(*argtypes + [T.TypeVar('R')])\n for func, sig, kwds in overloads:\n assert isinstance(sig, T.Function), sig\n\n # -------------------------------------------------\n # Error checking\n l1, l2 = len(sig.argtypes), len(argtypes)\n if l1 != l2:\n raise TypeError(\n \"Expected %d args, got %d for function %s\" % (l1, l2, func))\n\n # -------------------------------------------------\n # Unification\n\n equations = list(chain([(input, sig)], constraints))\n broadcasting = [True] * l1\n\n try:\n result, remaining = unify(equations, broadcasting)\n except error.UnificationError:\n continue\n else:\n dst_sig = result[0]\n yield Overload(dst_sig, sig, func, remaining, kwds)",
"def getRoute(self,path,view_function_ptr,**kwargs):\n regex = r\"^{0}\".format(path)\n route = url(regex,view_function_ptr,kwargs)\n return route",
"def get_factory_with_routes(myroutes):\r\n def factory_method(sock, address):\r\n return AsyncRoutedHttpHandler(sock, address, myroutes)\r\n return factory_method",
"def xs(name, parser_args, list_args):\n for args, kwargs in list_args:\n if len(set(args) & parser_args) > 0:\n yield args, kwargs\n\n else:\n if 'dest' in kwargs:\n if kwargs['dest'] == name:\n yield args, kwargs",
"def gen_func_with_args( gen_func, partial_get_files, logging_dir ):\n\tfor func in gen_func:\n\t\tdelete_log_files( pattern_str = 'failure.txt', default_start_walk = logging_dir )\n\t\tfor _, csv_filename, issuer in partial_get_files():\n\t\t\tyield csv_filename, issuer, func",
"def inspect_routes(app: App) -> 'List[RouteInfo]':\n router = app._router\n\n inspect_function = _supported_routers.get(type(router))\n if inspect_function is None:\n raise TypeError(\n 'Unsupported router class {}. Use \"register_router\" '\n 'to register a function that can inspect the router '\n 'used by the provided application'.format(type(router))\n )\n return inspect_function(router)",
"def _get_route_data(route, registry):\n\n pattern = _get_pattern(route)\n\n request_iface = registry.queryUtility(\n IRouteRequest,\n name=route.name\n )\n\n route_intr = registry.introspector.get(\n 'routes', route.name\n )\n\n if request_iface is None:\n return\n\n if route_intr.get('static', False) is True:\n return\n\n view_intr = registry.introspector.related(route_intr)\n\n if view_intr:\n for view in view_intr:\n yield route.name, pattern, view",
"def match(routes: typing.List[Route],\n request: server.HTTPRequest) -> typing.Optional[RouteMatch]:\n for route in routes:\n result = route.match(request.path)\n if result:\n return RouteMatch(\n route.handler,\n route.name,\n route.pattern,\n route.suppress_logging,\n route.kwargs,\n result.group(),\n result.groupdict())",
"def get_routes(flask_app, url, context):\n url_prefix = context.url_prefix\n\n rules = flask_app.url_map.iter_rules()\n\n found_rules = []\n\n pattern = re.compile('/'.join([url_prefix, url]).replace('/', r'\\/'))\n\n # Find needed route rules\n for rule in rules:\n rule_url = rule.rule\n result = pattern.search(rule_url)\n\n if result is not None:\n found_rules.append(rule)\n\n # Rules can exist multiple time for the same url inside flask\n # Therefore we need to combine at least the methods of identical rules\n merged_rules = {}\n for found_rule in found_rules:\n found_rule_name = found_rule.rule\n if found_rule_name not in merged_rules.keys():\n # We need to create a coy of the rule, as we manipulate it later\n # for our needs (e.g. merging methods).\n # And this must not be done on the original routes/rules, which would affect\n # the flask app behavior.\n merged_rules[found_rule_name] = copy.copy(found_rule)\n else:\n current_methods = list(found_rule.methods)\n existing_methods = list(merged_rules[found_rule_name].methods)\n new_methods = [i for i in current_methods if i not in existing_methods]\n merged_rules[found_rule_name].methods = existing_methods + new_methods\n\n # Create needed list and element structure\n routes = []\n for key, rule in merged_rules.items():\n route = {\n 'url': rule.rule.replace(url_prefix, '', 1),\n 'methods': rule.methods,\n 'name': rule.rule.replace('/', '', 1).replace('/', '_').rstrip('_'),\n 'context': context.name\n }\n\n parameters = re.findall('<([a-zA-Z0-9_]*)>', rule.rule)\n route_params = {}\n for param in parameters:\n route_params[param] = {\n 'name': param,\n 'type': None,\n 'description': None\n }\n\n route['parameters'] = route_params\n\n routes.append(route)\n\n return routes",
"def generatefunc(self, linelst):\n i = 0\n while(i<4):\n yield linelst[(i*2) + 0], linelst[(i*2) + 1]\n i += 1\n return",
"def gen_directions():\n result = None\n while result is None:\n result = recv_directions(input())\n return result.group()",
"def route(self,\n commands: List[str] = None,\n messages: List[str] = None,\n states: List[str] = None,\n roles: List[str] = None) -> Callable:\n if states is None:\n states = []\n if roles is None:\n roles = []\n if not commands and not messages:\n messages = ['(?s).*'] # matches every message\n def decorator(func: Callable) -> Callable:\n if commands:\n for command in commands:\n self.__router.register_command_route(\n CommandRoute(command, func, states, roles))\n if messages:\n for message in messages:\n self.__router.register_message_route(\n MessageRoute(message, func, states, roles))\n return func\n\n return decorator"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate | def load_app(target):
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old | [
"def load_app(module_name, objects=None):\n from importlib import import_module\n if objects:\n return import_module(module_name + '.' + objects)\n else:\n return import_module(module_name)",
"def app_from_config(config: RunConfig) -> Bottle:\n # The _app module instantiates a Bottle instance directly when it is\n # imported. That is `_app.app`. We directly mutate some global variables\n # on the imported `_app` module so that its endpoints will behave as\n # we expect.\n _app = __import__(\"_app\", globals(), locals(), [\".\"], 1)\n # Because we're about to mutate our import, we pop it out of the imported\n # modules map, so that any future imports do not receive our mutated version\n sys.modules.pop(\"pypiserver._app\", None)\n _app.config = config\n # Add a reference to our config on the Bottle app for easy access in testing\n # and other contexts.\n _app.app._pypiserver_config = config\n return _app.app",
"def getApplication():",
"def set_application(app):",
"def get_app(relpath=None):\n global app\n if app:\n return app\n\n if relpath is None:\n relpath = os.getcwd()\n elif not os.path.isdir(relpath):\n relpath = os.path.dirname(relpath)\n\n path = os.path.abspath(relpath)\n\n while path != '/':\n try:\n module = imp.find_module('app', [path])\n except ImportError:\n path = os.path.abspath(os.path.join(path, '..'))\n else:\n sys.path.insert(0, path)\n app = imp.load_module('app', *module).app\n configfile = os.path.join(path, SETTINGS_FILENAME)\n settings._load(filename=configfile)\n app._path = path\n return app\n\n raise ImportError(\"Couldn't find Nucleon app to import\")",
"def Flask(module_name, path=None):\n from .base import Kit\n return Kit(path).get_flask_app(module_name)",
"def app(par=None):\n\n return Miniweb.get_instance(par)",
"def app():\n return app",
"def app():\n\n application = create_app()\n application.test_client_class = JSON_Client\n application.response_class = Load_JSON_Response\n return application",
"def app():\n app = create_app(TestConfig)\n return app",
"def configure_app(app) -> NoReturn:\n ma.init_app(app)",
"def load(module):\n mod =module.split(\".\")\n return apache.import_module(mod[1], path=[_applicationRoot + mod[0]])",
"def application(config={}):\n from kitero.web import app\n configure(app, config)\n return app",
"def app(request):\n from kivy.interactive import InteractiveLauncher\n from {{cookiecutter.repo_name}}.{{cookiecutter.repo_name}} import {{cookiecutter.app_class_name}}\n launcher = InteractiveLauncher({{cookiecutter.app_class_name}}('en'))\n\n def stop_launcher():\n launcher.safeOut()\n launcher.stop()\n\n request.addfinalizer(stop_launcher)\n\n launcher.run()\n launcher.safeIn()\n return launcher.app",
"def load_and_run_shell() -> None:\n sys.path.append(os.getcwd())\n\n parser = argparse.ArgumentParser(\n description=\"Open a shell for a Thrift service with app configuration loaded.\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"--debug\", action=\"store_true\", default=False, help=\"enable extra-verbose debug logging\"\n )\n parser.add_argument(\n \"--app-name\",\n default=\"main\",\n metavar=\"NAME\",\n help=\"name of app to load from config_file (default: main)\",\n )\n parser.add_argument(\n \"config_file\", type=argparse.FileType(\"r\"), help=\"path to a configuration file\"\n )\n\n args = parser.parse_args(sys.argv[1:])\n with args.config_file:\n config = read_config(args.config_file, server_name=None, app_name=args.app_name)\n logging.basicConfig(level=logging.INFO)\n\n env: Dict[str, Any] = {}\n env_banner = {\n \"app\": \"This project's app instance\",\n \"context\": \"The context for this shell instance's span\",\n }\n\n app = make_app(config.app)\n env[\"app\"] = app\n\n baseplate: Baseplate = app.baseplate # type: ignore\n context = baseplate.make_context_object()\n span = baseplate.make_server_span(context, \"shell\")\n env[\"context\"] = span.context\n\n if config.shell and \"setup\" in config.shell:\n setup = _load_factory(config.shell[\"setup\"])\n setup(env, env_banner)\n\n configure_logging(config, args.debug)\n\n # generate banner text\n banner = \"Available Objects:\\n\"\n for var in sorted(env_banner.keys()):\n banner += f\"\\n {var:<12} {env_banner[var]}\"\n\n console_logpath = _get_shell_log_path()\n\n try:\n # try to use IPython if possible\n from IPython import start_ipython\n\n try:\n # IPython 5.x+\n from traitlets.config.loader import Config\n except ImportError:\n # IPython 4 and below\n from IPython import Config\n\n ipython_config = Config()\n ipython_config.InteractiveShellApp.exec_lines = [\n # monkeypatch IPython's log-write() to enable formatted input logging, copying original code:\n # https://github.com/ipython/ipython/blob/a54bf00feb5182fa821bd5457897b3b30a313436/IPython/core/logger.py#L187-L201\n f\"\"\"\n ip = get_ipython()\n from functools import partial\n def log_write(self, data, kind=\"input\", message_id=\"IEXC\"):\n import datetime, os\n if self.log_active and data:\n write = self.logfile.write\n if kind=='input':\n # Generate an RFC 5424 compliant syslog format\n write(f'<13>1 {{datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")}} {{os.uname().nodename}} baseplate-shell {{os.getpid()}} {{message_id}} - {{data}}')\n elif kind=='output' and self.log_output:\n odata = u'\\\\n'.join([u'#[Out]# %s' % s\n for s in data.splitlines()])\n write(u'%s\\\\n' % odata)\n self.logfile.flush()\n ip.logger.logstop = None\n ip.logger.log_write = partial(log_write, ip.logger)\n ip.magic('logstart {console_logpath} append')\n ip.logger.log_write(data=\"Start IPython logging\\\\n\", message_id=\"ISTR\")\n \"\"\"\n ]\n ipython_config.TerminalInteractiveShell.banner2 = banner\n ipython_config.LoggingMagics.quiet = True\n start_ipython(argv=[], user_ns=env, config=ipython_config)\n raise SystemExit\n except ImportError:\n pass\n\n newbanner = f\"Baseplate Interactive Shell\\nPython {sys.version}\\n\\n\"\n banner = newbanner + banner\n\n try:\n import readline\n\n readline.set_completer(Completer(env).complete)\n readline.parse_and_bind(\"tab: complete\")\n\n except ImportError:\n pass\n\n shell = LoggedInteractiveConsole(_locals=env, logpath=console_logpath)\n shell.interact(banner)",
"def create_application(**kwargs):\n app = Flask(__name__)\n\n app.config.from_object('pybel_tools.web.config.Config')\n\n if 'PYBEL_WEB_CONFIG' in os.environ:\n log.info('importing config from %s', os.environ['PYBEL_WEB_CONFIG'])\n app.config.from_json(os.path.expanduser(os.environ['PYBEL_WEB_CONFIG']))\n\n app.config.update(kwargs)\n\n # Initialize extensions\n bootstrap_extension.init_app(app)\n pybel_extension.init_app(app)\n\n app.register_blueprint(async_blueprint)\n\n return app",
"def Celery(module_name, path=None):\n from .base import Kit\n return Kit(path).get_celery_app(module_name)",
"def init_app():\n\n # __name__ is the path of the current Python module, Flask needs to know\n # where it is located to setup paths.\n # instance_relative_config tells the app that config files are not relative\n # to the instance folder.\n app = Flask(__name__, instance_relative_config=False)\n\n # gets the config information from the Config class that is stored in the\n # config.py file. This class gets the variables from the .env file\n app.config.from_object(\"config.Config\")\n\n # Creates an Environment object from flask_assets to hold a collection of\n # bundles and configuration. If initialised with an instance of Flask app\n # then webassets Jinja2 extention is automatically registered.\n assets = Environment()\n\n # the app is passed to Envoronment.init_app to allow usage by multiple\n # applications rather than passing a fixed application object, see url below:\n # https://flask-assets.readthedocs.io/en/latest/#flask_assets.Environment\n assets.init_app(app)\n\n # gets the context of the current app, in case there are multiple flask apps\n # running at the same time.\n # Import parts of our core Flask app\n with app.app_context():\n\n # imports and executes routes.py which assigns different URLs to\n # different functions which can render HTML pages from jinja2 templates\n from . import routes\n\n # import the compile_static_assets function from the assets.py file.\n # This function compiles a bunch of stylesheets when the app variable\n # FLASK_ENV is set to \"development\"\n from .assets import compile_static_assets\n\n # Import Dash application init_dashboard(server) function\n from .plotlydash.dashboard import init_dashboard\n\n # Give the init_dashboard function the existing flask object (app) to be\n # used as the main server that this sub-app will run on.\n app = init_dashboard(app)\n\n # Compile static assets -\n # THIS WAS TURNED OFF AS IT WAS BREAKING GOOGLE APP ENGINE\n # compile_static_assets(assets)\n\n # return the fully configured/setup app to the wsgi.py file to be run\n return app",
"def load_app(self, app_name, can_postpone=False):\n self.handled[app_name] = None\n self.nesting_level += 1\n app_module = import_module(app_name)\n try:\n models = import_module('.models', app_name)\n except ImportError:\n self.nesting_level -= 1\n # If the app doesn't have a models module, we can just ignore the\n # ImportError and return no models for it.\n if not module_has_submodule(app_module, 'models'):\n return None\n # But if the app does have a models module, we need to figure out\n # whether to suppress or propagate the error. If can_postpone is\n # True then it may be that the package is still being imported by\n # Python and the models module isn't available yet. So we add the\n # app to the postponed list and we'll try it again after all the\n # recursion has finished (in populate). If can_postpone is False\n # then it's time to raise the ImportError.\n else:\n if can_postpone:\n self.postponed.append(app_name)\n return None\n else:\n raise\n\n self.nesting_level -= 1\n if models not in self.app_store:\n self.app_store[models] = len(self.app_store)\n self.app_labels[self._label_for(models)] = models\n return models",
"def create_app():\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object(config[SELECTED_CONFIG])\n db.init_app(app)\n app.register_blueprint(recipes)\n\n ma.init_app(app)\n Bootstrap(app)\n\n app.before_request(create_before_request(app))\n return app"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter stores a list containing directory paths. The encoding parameter should be used to decode byte strings or files. The settings parameter contains a dict for enginespecific settings. | def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings) | [
"def load(self,\n template_source,\n template_filename='',\n template_identifier='',\n template_encoding='utf-8',\n template_standard='xhtml',\n parser_parameters={}):\n assert template_standard in ('xml', 'xhtml')\n \n self.template_standard = template_standard\n \n # Determine the default template file name if possible\n if (not template_filename and\n not isinstance(template_source, basestring) and\n hasattr(template_source, 'name')):\n \n # Take the file name from the file object\n template_filename = template_source.name\n \n # Determine the template's identifier if possible\n if template_filename and not template_identifier:\n template_identifier = os.path.basename(template_filename).split('.', 1)[0].replace('-', '_')\n if not util.is_identifier(template_identifier):\n template_identifier = None\n \n # Store template names and encoding\n self.template_filename = template_filename or 'unnamed_template'\n self.template_identifier = template_identifier or 'unnamed_template'\n self.template_encoding = template_encoding\n \n # Load the template from a file object if needed\n if not isinstance(template_source, basestring):\n template_source = template_source.read()\n\n if constants.GENERATE_DEBUG_COMMENTS:\n self.template_lines = template_source.splitlines()\n # Allow indexing template lines from 1, since the element.sourceline\n # values are starting from 1, not zero\n self.template_lines.insert(0, '')\n\n # Create the appropriate parser and configure it\n kws = dict(\n encoding=template_encoding,\n resolve_entities=False,\n ns_clean=True)\n kws.update(parser_parameters)\n parser = etree.XMLParser(**kws)\n\n if self.template_standard == 'xhtml':\n #kws['load_dtd'] = True\n \n # Fail on existing DOCTYPE\n assert not template_source.lstrip().startswith('<!'), (\n \"Please remove the current <!DOCTYPE > definition or \"\n \"set the template_standard to 'xml'!\")\n \n # Prepend the DTD for the entities\n # FIXME: It would be faster to feed it to the parser before the document.\n template_source = constants.DOCTYPE_AND_HTML_ENTITIES + template_source\n \n # Parse and store the template\n self.template = etree.fromstring(template_source, parser)\n \n # Prepare namespace map and reverse map based on the actual\n # namespace declarations of the template loaded\n self.namespace_map = dict(\n (url, prefix)\n for prefix, url in self.template.nsmap.iteritems()\n if url not in constants.XML_NAMESPACES_PROCESSED)",
"def from_string(cls, name: str, argnames: Iterable[str], source: str) -> \"DefTemplate\":\n kind = inspect.Parameter.POSITIONAL_OR_KEYWORD\n parameters = [inspect.Parameter(name, kind=kind) for name in argnames]\n signature = inspect.Signature(parameters)\n\n return cls._from_signature_and_body(name, signature, source)",
"def from_string(cls, raw: str) -> \"TemplatedFile\":\n return cls(source_str=raw, fname=\"<string>\")",
"def from_template(cls, filename, context, *args, mode='r', encoding=None,\n loader=json, **kwargs):\n\n with open(filename, mode=mode, encoding=encoding) as file:\n s = file.read(encoding=encoding)\n data = loader.loads(s.format(**context))\n\n return cls(data, *args, **kwargs)",
"def set_source_template(template):",
"def __init__(\n self,\n source_str: str,\n fname: str,\n templated_str: Optional[str] = None,\n sliced_file: Optional[List[TemplatedFileSlice]] = None,\n raw_sliced: Optional[List[RawFileSlice]] = None,\n ):\n self.source_str = source_str\n # An empty string is still allowed as the templated string.\n self.templated_str = source_str if templated_str is None else templated_str\n # If no fname, we assume this is from a string or stdin.\n self.fname = fname\n # Assume that no sliced_file, means the file is not templated\n self.sliced_file: List[TemplatedFileSlice]\n if sliced_file is None:\n if self.templated_str != self.source_str: # pragma: no cover\n raise ValueError(\"Cannot instantiate a templated file unsliced!\")\n # If we get here and we don't have sliced files,\n # then it's raw, so create them.\n self.sliced_file = [\n TemplatedFileSlice(\n \"literal\", slice(0, len(source_str)), slice(0, len(source_str))\n )\n ]\n assert (\n raw_sliced is None\n ), \"Templated file was not sliced, but not has raw slices.\"\n self.raw_sliced: List[RawFileSlice] = [\n RawFileSlice(source_str, \"literal\", 0)\n ]\n else:\n self.sliced_file = sliced_file\n assert raw_sliced is not None, \"Templated file was sliced, but not raw.\"\n self.raw_sliced = raw_sliced\n\n # Precalculate newlines, character positions.\n self._source_newlines = list(iter_indices_of_newlines(self.source_str))\n self._templated_newlines = list(iter_indices_of_newlines(self.templated_str))\n\n # Consistency check raw string and slices.\n pos = 0\n rfs: RawFileSlice\n for rfs in self.raw_sliced:\n assert rfs.source_idx == pos, (\n \"TemplatedFile. Consistency fail on running source length\"\n f\": {pos} != {rfs.source_idx}\"\n )\n pos += len(rfs.raw)\n assert pos == len(self.source_str), (\n \"TemplatedFile. Consistency fail on total source length\"\n f\": {pos} != {len(self.source_str)}\"\n )\n\n # Consistency check templated string and slices.\n previous_slice = None\n tfs: Optional[TemplatedFileSlice] = None\n for tfs in self.sliced_file:\n if previous_slice:\n if tfs.templated_slice.start != previous_slice.templated_slice.stop:\n raise SQLFluffSkipFile( # pragma: no cover\n \"Templated slices found to be non-contiguous. \"\n f\"{tfs.templated_slice} (starting\"\n f\" {self.templated_str[tfs.templated_slice]!r})\"\n f\" does not follow {previous_slice.templated_slice} \"\n \"(starting \"\n f\"{self.templated_str[previous_slice.templated_slice]!r}\"\n \")\"\n )\n else:\n if tfs.templated_slice.start != 0:\n raise SQLFluffSkipFile( # pragma: no cover\n \"First Templated slice not started at index 0 \"\n f\"(found slice {tfs.templated_slice})\"\n )\n previous_slice = tfs\n if self.sliced_file and templated_str is not None:\n if tfs.templated_slice.stop != len(templated_str):\n raise SQLFluffSkipFile( # pragma: no cover\n \"Length of templated file mismatch with final slice: \"\n f\"{len(templated_str)} != {tfs.templated_slice.stop}.\"\n )",
"def _create_from_template(self, src_filename, dest_filename, **kwargs):\n info(\"creating {dest} from {src}\".format(dest=dest_filename, src=src_filename))\n with open(src_filename) as in_file:\n template = in_file.read()\n\n new_filename = None\n try:\n # we just want the unique temp file name, we will delete it in the finally block\n tf = tempfile.NamedTemporaryFile(delete=False)\n new_filename = tf.name\n tf.close()\n\n rendered = template.format(**kwargs)\n with open(new_filename, 'w') as out_file:\n try:\n out_file.write(rendered)\n # catching all exceptions\n # pylint: disable=W0703\n except Exception as ex:\n error(ex)\n\n # if there is a dest_filename, then handle backing it up\n if os.path.isfile(dest_filename):\n # new_filename contains the just rendered template\n # dest_filename contains the original content\n\n # if new_filename contents equal dest_filename contents, then we are done\n if md5sum(new_filename)[0] == md5sum(dest_filename)[0]:\n return\n\n # new_filename content and dest_filename content differ\n\n # so if there is a backup file and if the backup file contents diff from the dest_filename contents,\n # then we rename the dest_filename to then incremented backup_filename (one past the highest\n # existing value)\n backup_filename = next_backup_filename(name=dest_filename)\n\n os.rename(dest_filename, backup_filename)\n\n # next we remove the dest_filename then move new_filename to dest_filename\n if os.path.isfile(dest_filename):\n os.remove(dest_filename)\n\n shutil.copyfile(new_filename, dest_filename)\n\n except Exception as ex:\n error(\"Error rendering template ({file}) - {err}\\n{trace}\".format(file=src_filename,\n err=str(ex),\n trace=traceback.format_exc()))\n error(\"kwargs:\\n{kwargs}\".format(kwargs=pformat(kwargs)))\n finally:\n if new_filename is not None:\n if os.path.isfile(new_filename):\n os.remove(new_filename)",
"def get_source_template():",
"def lookupTemplate(self, request):\n if self.template:\n return microdom.parseString(self.template, caseInsensitive=0, preserveCase=0)\n if not self.templateDirectory:\n mod = sys.modules[self.__module__]\n if hasattr(mod, '__file__'):\n self.templateDirectory = os.path.split(mod.__file__)[0]\n # First see if templateDirectory + templateFile is a file\n templatePath = os.path.join(self.templateDirectory, self.templateFile)\n if not os.path.exists(templatePath):\n raise RuntimeError, \"The template %r was not found.\" % templatePath\n # Check to see if there is an already parsed copy of it\n mtime = os.path.getmtime(templatePath)\n cachedTemplate = templateCache.get(templatePath, None)\n compiledTemplate = None\n\n if cachedTemplate is not None:\n if cachedTemplate[0] == mtime:\n compiledTemplate = templateCache[templatePath][1].cloneNode(deep=1)\n \n if compiledTemplate is None:\n compiledTemplate = microdom.parse(templatePath, caseInsensitive=0, preserveCase=0)\n templateCache[templatePath] = (mtime, compiledTemplate.cloneNode(deep=1))\n return compiledTemplate",
"def _compile(self, source=None, file=None, compilerSettings=Unspecified,\n moduleName=None, mainMethodName=None):\n if compilerSettings is Unspecified:\n compilerSettings = self._getCompilerSettings(source, file) or {}\n mainMethodName = mainMethodName or self._CHEETAH_defaultMainMethodName\n self._fileMtime = None\n self._fileDirName = None\n self._fileBaseName = None\n if file and isinstance(file, string_type):\n file = self.serverSidePath(file)\n self._fileMtime = os.path.getmtime(file)\n self._fileDirName, self._fileBaseName = os.path.split(file)\n self._filePath = file\n templateClass = self.compile(source, file,\n moduleName=moduleName,\n mainMethodName=mainMethodName,\n compilerSettings=compilerSettings,\n keepRefToGeneratedCode=True)\n\n if not self.__class__ == Template:\n # Only propogate attributes if we're in a subclass of\n # Template\n for k, v in self.__class__.__dict__.items():\n if not v or k.startswith('__'):\n continue\n # Propogate the class attributes to the instance\n # since we're about to obliterate self.__class__\n # (see: cheetah.Tests.Tepmlate.SubclassSearchListTest)\n setattr(self, k, v)\n\n self.__class__ = templateClass\n # must initialize it so instance attributes are accessible\n templateClass.__init__(self,\n # _globalSetVars=self._CHEETAH__globalSetVars,\n # _preBuiltSearchList=self._CHEETAH__searchList\n )\n if not hasattr(self, 'transaction'):\n self.transaction = None",
"def _create_template(self):\n Template.objects.create(\n resume=\"a test\",\n shortcut='atest',\n subject=\"a subject\",\n body=\"A body {{ testme }}\"\n )",
"def load_template(self, templatename, template_string=None):\r\n if template_string is not None:\r\n return self.template_class(template_string)\r\n\r\n if self.use_package_naming:\r\n divider = templatename.rfind('.')\r\n if divider >= 0:\r\n from pkg_resources import resource_filename\r\n package = templatename[:divider]\r\n basename = templatename[divider + 1:] + self.extension\r\n templatename = resource_filename(package, basename)\r\n\r\n return self.loader.load(templatename)",
"def load_template(name: str) -> Template:\n if name not in _templates:\n with open(join(dirname(__file__), 'templates', name + '.j2'), 'r') as f:\n return Template(f.read())\n return _templates[name]",
"def __init__(self, template_file, vals, image_folder,\n uniq_id, raw_report='', stylefile=None):\n #self.template_file = template_file\n self.vals = vals\n self.raw_report = raw_report\n self.image_folder = image_folder\n self.stylefile = stylefile\n self.template_file = template_file\n self.uniq_id = uniq_id\n \n self.template = Template(filename=template_file)\n\n # flag to indicate if stored raw report is used\n self.STORED_RAW = False\n \n # create a new report only if there isnt one stored\n if self.raw_report == '':\n self.STORED_RAW = True\n self.generate_raw()",
"def __init__(self, template_dir, title, endpoint, result_template=\"result_default.html\", maker=set_result):\n self._tr = abspath(template_dir.rstrip(\"/\"))\n self._title = title\n self._endpoint = endpoint\n self._html = open(self._tr + \"/result/\" + result_template, \"r\").read()\n self.results = []\n self._maker = maker",
"def __init__(self, template):\n self.template = template\n \n with open(template) as f:\n logging.info(\"HTMLExport has opened the file {}\".format(template))\n self.text = f.read()",
"def callTemplate(env, source, args, kwargs):\n args, kwargs = list(args), dict(kwargs)\n variables = parseVariables(env, source)\n context = drainParameters(variables, args, kwargs)\n template = env.from_string(source)\n\n try:\n return template.render(**context)\n except jinja2.exceptions.UndefinedError as e:\n # extract the missing variable from the error message\n attribute = e.message.split()[0].replace(\"'\", \"\")\n msg = \"Template requires missing `{}` variable.\"\n error = TemplateVariableError(msg.format(attribute))\n error.variable = attribute\n raise error",
"def load(self, filename, relative_to=None, cls=None, encoding=None):\r\n if cls is None:\r\n cls = self.default_class\r\n if encoding is None:\r\n encoding = self.default_encoding\r\n if relative_to and not os.path.isabs(relative_to):\r\n filename = os.path.join(os.path.dirname(relative_to), filename)\r\n filename = os.path.normpath(filename)\r\n\r\n self._lock.acquire()\r\n try:\r\n # First check the cache to avoid reparsing the same file\r\n try:\r\n tmpl = self._cache[filename]\r\n if not self.auto_reload or \\\r\n os.path.getmtime(tmpl.filepath) == self._mtime[filename]:\r\n return tmpl\r\n except KeyError:\r\n pass\r\n\r\n search_path = self.search_path\r\n isabs = False\r\n\r\n if os.path.isabs(filename):\r\n # Bypass the search path if the requested filename is absolute\r\n search_path = [os.path.dirname(filename)]\r\n isabs = True\r\n\r\n elif relative_to and os.path.isabs(relative_to):\r\n # Make sure that the directory containing the including\r\n # template is on the search path\r\n dirname = os.path.dirname(relative_to)\r\n if dirname not in search_path:\r\n search_path = search_path + [dirname]\r\n isabs = True\r\n\r\n elif not search_path:\r\n # Uh oh, don't know where to look for the template\r\n raise TemplateError('Search path for templates not configured')\r\n\r\n for dirname in search_path:\r\n filepath = os.path.join(dirname, filename)\r\n try:\r\n fileobj = open(filepath, 'U')\r\n try:\r\n if isabs:\r\n # If the filename of either the included or the \r\n # including template is absolute, make sure the\r\n # included template gets an absolute path, too,\r\n # so that nested include work properly without a\r\n # search path\r\n filename = os.path.join(dirname, filename)\r\n dirname = ''\r\n tmpl = cls(fileobj, basedir=dirname, filename=filename,\r\n loader=self, lookup=self.variable_lookup,\r\n encoding=encoding)\r\n if self.callback:\r\n self.callback(tmpl)\r\n self._cache[filename] = tmpl\r\n self._mtime[filename] = os.path.getmtime(filepath)\r\n finally:\r\n fileobj.close()\r\n return tmpl\r\n except IOError:\r\n continue\r\n\r\n raise TemplateNotFound(filename, search_path)\r\n\r\n finally:\r\n self._lock.release()",
"def create(self, template):\n raise NotImplementedError('Create Template not implemented')",
"def loadTemplate(self, filename, path):\n # TODO what should \"path\" be relative to? I vote the Template file.\n relPath = os.path.join(self._templatePath, path)\n templateFile = os.path.join(os.path.normpath(relPath), filename)\n self._template, _ = xmlUtils.loadToTree(templateFile)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. | def search(cls, name, lookup=[]):
if not lookup:
depr('The template lookup path list should not be empty.')
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.')
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext) | [
"def compare(self):\n\n # search result storage\n results_paths = self.result_paths\n to_search = []\n\n # for each search path, get results\n # this now is one list of all results from each search directory\n search_files = self.dir_search(self.ext_1)\n\n # drop ext, so we can compare with search for docs\n to_search_paths = []\n\n for item in search_files:\n p = Path(item)\n item = str(p.stem)\n if item in to_search:\n # don't add duplicate search terms\n continue\n else:\n to_search.append(item)\n to_search_paths.append(p)\n\n duplicates = {}\n complete = {}\n\n files = self.dir_search(self.ext_2)\n\n for file in files:\n p = Path(file)\n item = str(p.stem)\n if item in to_search and str(p.suffix).lower() != str(Path(self.ext_1)):\n # if already complete, then it is duplicate\n if file.name in complete:\n duplicates[file.name] = file\n else:\n complete[file.name] = file\n\n complete_list = []\n\n for file in complete:\n name = complete[file]\n complete_list.append(str(name.stem))\n\n incomplete = {}\n # we want to get incomplete ext1\n for file in to_search_paths:\n p = Path(file)\n item = str(p.stem)\n if item in complete_list:\n continue\n else:\n if p.name in incomplete:\n continue\n else:\n incomplete[p.name] = p\n\n results_paths.duplicates = duplicates\n results_paths.complete = complete\n # remove from search if complete, so to_search contains incomplete files\n results_paths.incomplete = incomplete",
"def find(needle, matcher):\n for root,dirs,files in os.walk(\".\"):\n for dir in dirs:\n if matcher(needle,dir):\n path = join(root, dir)\n print path\n return\n\n for file in files:\n if matcher(needle,file):\n print root\n return\n print \".\"",
"def filefind(fname,alt_dirs = None):\n\n if alt_dirs is None:\n try:\n alt_dirs = get_home_dir()\n except HomeDirError:\n alt_dirs = os.getcwd()\n search = [fname] + list_strings(alt_dirs)\n search = map(os.path.expanduser,search)\n #print 'search list for',fname,'list:',search # dbg\n fname = search[0]\n if os.path.isfile(fname):\n return fname\n for direc in search[1:]:\n testname = os.path.join(direc,fname)\n #print 'testname',testname # dbg\n if os.path.isfile(testname):\n return testname\n raise IOError,'File' + `fname` + \\\n ' not found in current or supplied directories:' + `alt_dirs`",
"def search(\r\n root: str,\r\n terms: Iterable[str],\r\n exts: str = '',\r\n case: bool = False,\r\n negative: bool = False,\r\n dirs: int = 0,\r\n strict: int = 1,\r\n regex: bool = False,\r\n names: bool = True,\r\n) -> Iterator[str]:\r\n func = {0: files, 1: walk, 2: folders}[dirs]\r\n kwargs = {\r\n 0: {\"exts\": exts, \"negative\": negative, \"absolute\": True},\r\n 1: {\"dirs\": True, \"absolute\": True},\r\n 2: {\"absolute\": True},\r\n }[dirs]\r\n\r\n yield from search_iter(\r\n (i for i in func(root, **kwargs)),\r\n terms=terms,\r\n exts=exts,\r\n case=case,\r\n negative=negative,\r\n dirs=dirs,\r\n strict=strict,\r\n names=names,\r\n )",
"def get_files(self, file_lookup: str, storage_path: str) -> List[str]:\n all_categories_files = [\n f for f in listdir(storage_path) \n if isfile(join(storage_path, f))\n ]\n categories_files = []\n for file in all_categories_files:\n if file_lookup in file:\n categories_files.append(file)\n return categories_files",
"def find_file(directories, filename, extensions):\n\tfor directory in directories:\n\t\tfor extension in extensions:\n\t\t\tpath = os.path.join(directory, filename + extension)\n\t\t\tif os.path.isfile(path):\n\t\t\t\treturn path",
"def find(topdir,name=[],exclude=[]): \n from os import walk\n import re\n if type(name) == str: name = [name]\n if type(exclude) == str: exclude = [exclude]\n name = [re.compile(glob_to_regex(pattern)) for pattern in name]\n exclude = [re.compile(glob_to_regex(pattern)) for pattern in exclude]\n \n file_list = []\n for (directory,subdirs,files) in walk(topdir):\n for file in files:\n pathname = directory+\"/\"+file\n match = any([pattern.match(pathname) for pattern in name]) and\\\n not any([pattern.match(pathname) for pattern in exclude])\n if match: file_list += [pathname] \n return file_list",
"def locate(name):\n result_path = \"\"\n for directory in SEARCH_ORDER:\n research_exp = directory + '*' + name\n try:\n result = subprocess.run([\"locate\", \"--limit=1\", research_exp],\n stdout=subprocess.PIPE, check=True)\n result_path = result.stdout.splitlines()[0]\n break\n except:\n continue\n return result_path.decode()",
"def look_in_list(filenames):\n\t\t\tfor filename in filenames:\n\t\t\t\tparts = os.path.splitext(filename)\n\t\t\t\tif parts[1].lower() == '.jsonl' and \\\n\t\t\t\t\tnot os.path.basename(filename).startswith('.'):\n\t\t\t\t\treturn filename\n\t\t\treturn None",
"def find(pth: str, contains: str = None) -> Sequence[str]:\n from os import listdir\n from os.path import isfile\n fs0 = listdir(pth)\n fs0 = [os.path.join(pth, v) for v in fs0]\n fs0 = [v for v in fs0 if isfile(v)]\n if contains is None:\n return fs0\n\n fs = []\n for f in fs0:\n vs = os.path.splitext(os.path.split(f)[1])[0].split('+')\n if np.isin(contains, vs):\n fs.append(f)\n return fs",
"def find(self):\n extensions = [\".dll\", \".exe\", \".drv\", \".cpl\", \".ocx\", \".mui\"]\n for path, dirs, files in os.walk(self.disk):\n for filename in files:\n name = filename.lower()\n if name[-4:] in extensions:\n yield name, os.path.join(path, filename)",
"def find_file_or_subdir_in_dir(pattern, base_dir, matching_names):\n matching_names = fnmatch.filter(matching_names, pattern)\n assert len(matching_names) == 1\n return os.path.join(base_dir, matching_names[0])",
"def find_files( file_extension=config.g_ratt_file_extension , search_paths=None):\n\n # Dictionary of found files: key=file name, value=path found at\n d = {}\n\n # function to add found files to the dictionary\n def search_directory( d, file_extension, path ):\n if ( path != None ):\n for file in os.listdir(path):\n if file.endswith(file_extension):\n if ( file in d ):\n pass\n else:\n d[file] = path\n\n return d\n\n # No search paths...\n if (search_paths == None or len(search_paths) == 0 ):\n d = search_directory( d, file_extension, \".\")\n\n # Search the search paths\n else:\n num_paths = len(search_paths)\n for p in search_paths:\n d = search_directory( d, file_extension, p )\n\n\n # Return the list of found scripts\n return d",
"def findFileInDir(folder, namekeys):\n for filename in os.listdir(folder):\n status = True\n for key in namekeys:\n if key not in filename:\n status = False\n break\n if status:\n return os.path.join(folder, filename)",
"def search_for_extensions(path_root: str, extensions: Iterable[str] = None) -> Generator[str, None, None]:\n for root, dirs, files in os.walk(path_root):\n for filename in files:\n extension = os.path.splitext(filename)[1]\n if extensions is None or extension.lower() in extensions:\n yield join(root, filename)",
"def find(top, filename_glob, skip_glob_list):\n\n\tfile_list = []\n\tfor path, dirs, files in os.walk(top):\n\t\tfor glob in skip_glob_list:\n\t\t\tfor match in fnmatch.filter(dirs, glob):\n\t\t\t\tdirs.remove(match)\n\t\tfor filename in fnmatch.filter(files, filename_glob):\n\t\t\tif filename == os.path.basename(__file__):\n\t\t\t\tcontinue\n\t\t\tfile_list.append(os.path.join(path, filename))\n\treturn file_list",
"def find(filename, tree):\n import os\n result = []\n\n for root, dirs, files in os.walk(tree):\n if filename.startswith('.'):\n for f in files:\n if f.endswith(filename):\n result.append(os.path.join(root, f))\n else: \n if filename in files:\n result.append(os.path.join(root, filename))\n\n return result",
"def _find_files(root_dir, search):\n matches = []\n for root, _, filenames in os.walk(os.path.normpath(root_dir)):\n for filename in fnmatch.filter(filenames, search):\n matches.append(os.path.join(root, filename))\n return matches",
"def find_file(filename, string1, string2):\n\n name = get_name_file(filename)\n if string1 in name:\n index = name.find(string1)\n rest = name[index + len(string1):]\n else:\n return ''\n\n directory = os.path.dirname(filename)\n try:\n return [os.path.abspath(directory + '/' + f) for f in os.listdir(directory) if string2 + rest in f][0]\n except IndexError:\n return ''",
"def _ask_ld_for_paths(self):\n\n try:\n ld = Popen(['ld', '--verbose'], stdin=DEVNULL, stdout=PIPE)\n output = ld.stdout.read().decode()\n except:\n return []\n\n search_dirs = re.compile(r'SEARCH_DIR\\(([^)]*)\\)').findall(output)\n return [d.strip(' \"') for d in search_dirs]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap and return the given socket, plus WSGI environ entries. | def wrap(self, sock):
return sock, self._environ.copy() | [
"def wrap_socket(self, socket):\n return ssl.wrap_socket(socket,\n ca_certs=self.cert_file,\n cert_reqs=ssl.CERT_REQUIRED,\n ssl_version=ssl.PROTOCOL_TLSv1)",
"def _xwrap(sock):\n return sock.sock if isinstance(sock, Netcat) else simplesock.wrap(sock)",
"def environ(request):\r\n hostport = request.host.split(\":\")\r\n if len(hostport) == 2:\r\n host = hostport[0]\r\n port = int(hostport[1])\r\n else:\r\n host = request.host\r\n port = 443 if request.protocol == \"https\" else 80\r\n environ = {\r\n \"REQUEST_METHOD\": request.method,\r\n \"SCRIPT_NAME\": \"\",\r\n \"PATH_INFO\": to_wsgi_str(escape.url_unescape(\r\n request.path, encoding=None, plus=False)),\r\n \"QUERY_STRING\": request.query,\r\n \"REMOTE_ADDR\": request.remote_ip,\r\n \"SERVER_NAME\": host,\r\n \"SERVER_PORT\": str(port),\r\n \"SERVER_PROTOCOL\": request.version,\r\n \"wsgi.version\": (1, 0),\r\n \"wsgi.url_scheme\": request.protocol,\r\n \"wsgi.input\": BytesIO(escape.utf8(request.body)),\r\n \"wsgi.errors\": sys.stderr,\r\n \"wsgi.multithread\": False,\r\n \"wsgi.multiprocess\": True,\r\n \"wsgi.run_once\": False,\r\n }\r\n if \"Content-Type\" in request.headers:\r\n environ[\"CONTENT_TYPE\"] = request.headers.pop(\"Content-Type\")\r\n if \"Content-Length\" in request.headers:\r\n environ[\"CONTENT_LENGTH\"] = request.headers.pop(\"Content-Length\")\r\n for key, value in request.headers.items():\r\n environ[\"HTTP_\" + key.replace(\"-\", \"_\").upper()] = value\r\n return environ",
"def wrap_socket(self, socket: _pysocket.socket) -> TLSWrappedSocket:\n buffer = self.wrap_buffers()\n return TLSWrappedSocket(socket, buffer)",
"def make_environ(extra=None, **kwds):\n environ = {}\n if extra is not None:\n environ.update(extra)\n environ[\"wsgi.version\"] = (1, 0)\n environ[\"wsgi.url_scheme\"] = \"http\"\n environ[\"SERVER_NAME\"] = \"localhost\"\n environ[\"SERVER_PORT\"] = \"80\"\n environ[\"REQUEST_METHOD\"] = \"GET\"\n environ[\"SCRIPT_NAME\"] = \"\"\n environ[\"PATH_INFO\"] = \"/\"\n environ.update(kwds)\n return environ",
"def wrap_socket(\n self, socket: _pysocket.socket, server_hostname: Optional[str]\n ) -> TLSWrappedSocket:\n buffer = self.wrap_buffers(server_hostname)\n return TLSWrappedSocket(socket, buffer)",
"def setup_environ(app, global_conf, app_conf):\n\n from examplesite.lib.templating import make_templating\n couchish_config = adminish.config.make_couchish_config(app_conf, 'examplesite.model')\n adminish_config = adminish.config.make_adminish_config(couchish_config, store_factory=lambda request: request.environ['couchish'])\n notification_service = NotificationService(global_conf['smtpHost'], emailFromAddress=global_conf['emailFromAddress'], swallowSMTPErrors=True, emailTemplateDir=global_conf['emailTemplateDir'])\n templating = make_templating(app_conf)\n\n def application(environ, start_response):\n\n # Add additional keys to the environ here.\n _db = couchdb.Database(app_conf['couchish.db.url'])\n cache_db = couchdb.Database(app_conf['cache.db.url'])\n db = couchish.CouchishStore(_db, couchish_config, pre_flush_hook=wrap_hook(environ, hooks.pre_flush_hook), post_flush_hook=wrap_hook(environ, hooks.post_flush_hook))\n environ['restish.templating'] = templating\n environ['couchish'] = db\n environ['cache'] = cache_db\n environ['adminish'] = adminish_config\n environ['searcher'] = index.Searcher(db, app_conf['index_dir'], adminish_config = adminish_config)\n environ['notification'] = notification_service\n return app(environ, start_response)\n\n return application",
"def _base_environ(self, **request):\n environ = {\n 'HTTP_COOKIE': self.cookies.output(header='', sep='; '),\n 'PATH_INFO': '/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '127.0.0.1',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': 'testserver',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'wsgi.version': (1, 0),\n 'wsgi.url_scheme': 'http',\n 'wsgi.errors': self.errors,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': False,\n 'wsgi.run_once': False,\n }\n environ.update(self.defaults)\n environ.update(request)\n return environ",
"def inject_into_urllib3() -> None:\n log.debug(\"Injecting ssl_wrap_socket_with_ocsp\")\n connection_.ssl_wrap_socket = ssl_wrap_socket_with_ocsp",
"def create_socket():\r\n return socket.socket(socket.AF_INET, socket.SOCK_STREAM)",
"def __init__(self, environ, start_response):\n super(WSGISession, self).__init__(environ)\n for key, value in environ.items():\n if key.startswith('wsgi.'):\n key = 'wsgi_' + key[5:]\n self[key] = value\n self.start_response = start_response\n self.time = time.time()",
"def patch_socket(dns=True, aggressive=True):\n from gevent import socket\n _socket = __import__('socket')\n _socket.socket = socket.socket\n _socket.SocketType = socket.SocketType\n _socket.create_connection = socket.create_connection\n if hasattr(socket, 'socketpair'):\n _socket.socketpair = socket.socketpair\n if hasattr(socket, 'fromfd'):\n _socket.fromfd = socket.fromfd\n try:\n from gevent.socket import ssl, sslerror\n _socket.ssl = ssl\n _socket.sslerror = sslerror\n except ImportError:\n if aggressive:\n try:\n del _socket.ssl\n except AttributeError:\n pass\n if dns:\n patch_dns()",
"def extension_environ(env_config_path, monkeypatch):\n monkeypatch.setattr(serverextension, \"ENV_CONFIG_PATH\", [str(env_config_path)])",
"def wrapped_env(self) -> [Env, SimEnv]:\n return self._wrapped_env",
"def configure_environ(dsn_env_name='CACHES_DSN', parse_class=DSN):\n inters = []\n cs = dsnparse.parse_environs(dsn_env_name, parse_class=parse_class)\n for c in cs:\n inter = c.interface()\n set_interface(inter, c.connection_name)\n inters.append(inter)\n return inters",
"def getWsgi(self):\n return partial(Application.wsgi, self)",
"def ssl_wrap_socket(\n socket: socket.socket,\n ssl_options: Union[Dict[str, Any], ssl.SSLContext],\n server_hostname: Optional[str] = None,\n server_side: Optional[bool] = None,\n **kwargs: Any\n) -> ssl.SSLSocket:\n context = ssl_options_to_context(ssl_options, server_side=server_side)\n if server_side is None:\n server_side = False\n if ssl.HAS_SNI:\n # In python 3.4, wrap_socket only accepts the server_hostname\n # argument if HAS_SNI is true.\n # TODO: add a unittest (python added server-side SNI support in 3.4)\n # In the meantime it can be manually tested with\n # python3 -m tornado.httpclient https://sni.velox.ch\n return context.wrap_socket(\n socket, server_hostname=server_hostname, server_side=server_side, **kwargs\n )\n else:\n return context.wrap_socket(socket, server_side=server_side, **kwargs)",
"def environInject(shellName):",
"def __call__(self, environ, start_response):\n environ[\"wsgi.url_scheme\"] = environ.get(\"HTTP_X_FORWARDED_PROTO\",\n \"http\")\n return self.app(environ, start_response)",
"def _wrap_app(app):\n app = request_id.RequestId(app)\n\n if CONF.audit.enabled:\n try:\n app = audit_middleware.AuditMiddleware(\n app,\n audit_map_file=CONF.audit.audit_map_file,\n ignore_req_list=CONF.audit.ignore_req_list\n )\n except (EnvironmentError, OSError,\n audit_middleware.PycadfAuditApiConfigError) as e:\n raise exceptions.InputFileError(\n file_name=CONF.audit.audit_map_file,\n reason=e\n )\n\n if cfg.CONF.api_settings.auth_strategy == constants.KEYSTONE:\n app = keystone.SkippingAuthProtocol(app, {})\n\n app = http_proxy_to_wsgi.HTTPProxyToWSGI(app)\n\n # This should be the last middleware in the list (which results in\n # it being the first in the middleware chain). This is to ensure\n # that any errors thrown by other middleware, such as an auth\n # middleware - are annotated with CORS headers, and thus accessible\n # by the browser.\n app = cors.CORS(app, cfg.CONF)\n cors.set_defaults(\n allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id'],\n allow_methods=['GET', 'PUT', 'POST', 'DELETE'],\n expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id']\n )\n\n return app"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an SSL.Context from self attributes. | def get_context(self):
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c | [
"def create_ssl_context(self):\n ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ssl_context.options |= ssl.OP_NO_TLSv1\n ssl_context.options |= ssl.OP_NO_TLSv1_1\n ssl_context.options |= ssl.OP_NO_COMPRESSION\n ssl_context.set_ciphers(self.tls_ciphers)\n ssl_context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)\n ssl_context.set_alpn_protocols([\"h2\", \"http/1.1\"])\n return ssl_context",
"def getContext(self):\n ctx = Context(SSLv23_METHOD)\n ctx.use_certificate_file(\"cert.pem\")\n ctx.use_privatekey_file(\"key.pem\")\n ctx.load_tmp_dh(\"dhparam.pem\")\n ctx.set_options(OP_SINGLE_DH_USE|OP_NO_SSLv2|OP_NO_SSLv3)\n ctx.set_verify(VERIFY_PEER, self._verify)\n return ctx",
"def _create_ssl_context(cfg: Any) -> SSLContext:\n ...",
"def make_context(self, *args, **kwargs):\r\n return self.ctxcls(*args, **kwargs)",
"def configure_ssl_context(credentials):\n\n ssl_ctx = ssl.SSLContext(credentials.ssl_version)\n ssl_ctx.verify_mode = ssl.CERT_REQUIRED\n if hasattr(ssl_ctx, 'check_hostname'):\n ssl_ctx.check_hostname = True\n if credentials.cacert_file is None:\n raise SecurityError(\"cacert_file is required in SecurityCreds\")\n if credentials.ciphers is not None:\n ssl_ctx.set_ciphers(credentials.ciphers)\n\n ssl_ctx.load_verify_locations(credentials.cacert_file)\n if credentials.ciphers is not None:\n ssl_ctx.set_ciphers(credentials.ciphers)\n\n pkeyfile = credentials.pkey_file\n certfile = credentials.cert_file\n if pkeyfile and not certfile:\n raise SecurityError(\"cert_file must be specified with pkey_file\")\n if certfile and not pkeyfile:\n pkeyfile = certfile\n if certfile:\n ssl_ctx.load_cert_chain(certfile, pkeyfile)\n # TODO https://bugs.python.org/issue8813\n if credentials.crl_file is not None:\n ssl_ctx.load_verify_locations(credentials.crl_file)\n ssl_ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF\n\n # SSLv2 considered harmful.\n ssl_ctx.options |= ssl.OP_NO_SSLv2\n\n # SSLv3 has problematic security and is only required for really old\n # clients such as IE6 on Windows XP\n ssl_ctx.options |= ssl.OP_NO_SSLv3\n\n # disable compression to prevent CRIME attacks (OpenSSL 1.0+)\n ssl_ctx.options |= ssl.OP_NO_COMPRESSION\n\n return ssl_ctx",
"def testing_context():\n client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n client_context.load_verify_locations(SIGNING_CA)\n\n server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n server_context.load_cert_chain(SIGNED_CERTFILE)\n server_context.load_verify_locations(SIGNING_CA)\n\n return client_context, server_context, 'localhost'",
"def get_ssl_context(*dummy): # type: ignore\n raise ConfigurationError(\"The ssl module is not available.\")",
"def _get_default_ssl_context():\n return ssl.create_default_context()",
"def ssl(self):\r\n return self.sslobj",
"def load_ssl_context(cert_file, key_file=None):\n context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n context.load_cert_chain(cert_file, key_file)\n\n return context",
"async def make_context(self, args):\n _, ctx = await self._make_argkey_and_context(args)\n return ctx",
"def ssl_options_to_context(\n ssl_options: Union[Dict[str, Any], ssl.SSLContext],\n server_side: Optional[bool] = None,\n) -> ssl.SSLContext:\n if isinstance(ssl_options, ssl.SSLContext):\n return ssl_options\n assert isinstance(ssl_options, dict)\n assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options\n # TODO: Now that we have the server_side argument, can we switch to\n # create_default_context or would that change behavior?\n default_version = ssl.PROTOCOL_TLS\n if server_side:\n default_version = ssl.PROTOCOL_TLS_SERVER\n elif server_side is not None:\n default_version = ssl.PROTOCOL_TLS_CLIENT\n context = ssl.SSLContext(ssl_options.get(\"ssl_version\", default_version))\n if \"certfile\" in ssl_options:\n context.load_cert_chain(\n ssl_options[\"certfile\"], ssl_options.get(\"keyfile\", None)\n )\n if \"cert_reqs\" in ssl_options:\n if ssl_options[\"cert_reqs\"] == ssl.CERT_NONE:\n # This may have been set automatically by PROTOCOL_TLS_CLIENT but is\n # incompatible with CERT_NONE so we must manually clear it.\n context.check_hostname = False\n context.verify_mode = ssl_options[\"cert_reqs\"]\n if \"ca_certs\" in ssl_options:\n context.load_verify_locations(ssl_options[\"ca_certs\"])\n if \"ciphers\" in ssl_options:\n context.set_ciphers(ssl_options[\"ciphers\"])\n if hasattr(ssl, \"OP_NO_COMPRESSION\"):\n # Disable TLS compression to avoid CRIME and related attacks.\n # This constant depends on openssl version 1.0.\n # TODO: Do we need to do this ourselves or can we trust\n # the defaults?\n context.options |= ssl.OP_NO_COMPRESSION\n return context",
"def get_http2_ssl_context():\n # Get the basic context from the standard library.\n ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n ctx.set_ciphers(\"ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20\")\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError:\n print(\"BAD@get_http2_ssl_context\")\n\t\n return ctx",
"def _createXacmlRequestCtx(cls):\n ctx = Request()\n \n ctx.subjects.append(Subject())\n openidAttr = Attribute()\n ctx.subjects[-1].attributes.append(openidAttr)\n openidAttr.attributeId = cls.OPENID_ATTR_ID\n openidAttr.dataType = 'http://www.w3.org/2001/XMLSchema#anyURI'\n \n anyUriAttrValue = cls.attributeValueClassFactory(openidAttr.dataType)\n \n openidAttrVal = anyUriAttrValue(TestUserDatabase.OPENID_URI)\n openidAttr.attributeValues.append(openidAttrVal) \n \n return ctx",
"def make_adhoc_ssl_context():\n cert, key = generate_adhoc_ssl_pair(host='example.com')\n\n with contextlib.ExitStack() as clean_stack:\n with contextlib.ExitStack() as close_stack:\n cert_handle, cert_file = tempfile.mkstemp(\n prefix='verktyg-adhoc-', suffix='.cert.pem'\n )\n close_stack.callback(os.close, cert_handle)\n clean_stack.callback(os.remove, cert_file)\n\n key_handle, key_file = tempfile.mkstemp(\n prefix='verktyg-adhoc-', suffix='.key.pem'\n )\n close_stack.callback(os.close, key_handle)\n clean_stack.callback(os.remove, key_file)\n\n os.write(cert_handle, cert)\n os.write(key_handle, key)\n\n return load_ssl_context(cert_file, key_file)",
"def clone(self):\n with self._lock:\n new_ctx = Context(\n trace_id=self._parent_trace_id,\n span_id=self._parent_span_id,\n sampling_priority=self._sampling_priority,\n )\n new_ctx._current_span = self._current_span\n return new_ctx",
"def load(self):\n data = self.get_data(\"certificates/%s\" % self.id)\n certificate = data[\"certificate\"]\n\n for attr in certificate.keys():\n setattr(self, attr, certificate[attr])\n\n return self",
"def security(cls):\n ca_file = str(security.CA_FILE)\n cert_file = str(security.CERT_FILE)\n return Security(\n tls_ca_file=ca_file,\n tls_worker_cert=cert_file,\n tls_worker_key=cert_file,\n tls_client_cert=cert_file,\n tls_client_key=cert_file,\n tls_scheduler_cert=cert_file,\n tls_scheduler_key=cert_file,\n require_encryption=True,\n )",
"def _parseClientSSL(**kwargs):\n from twisted.internet import ssl\n kwargs = _parseClientTCP(**kwargs)\n certKey = kwargs.pop('certKey', None)\n privateKey = kwargs.pop('privateKey', None)\n caCertsDir = kwargs.pop('caCertsDir', None)\n if certKey is not None:\n certx509 = ssl.Certificate.loadPEM(\n FilePath(certKey).getContent()).original\n else:\n certx509 = None\n if privateKey is not None:\n privateKey = ssl.PrivateCertificate.loadPEM(\n FilePath(privateKey).getContent()).privateKey.original\n else:\n privateKey = None\n if caCertsDir is not None:\n verify = True\n caCerts = _loadCAsFromDir(FilePath(caCertsDir))\n else:\n verify = False\n caCerts = None\n kwargs['sslContextFactory'] = ssl.CertificateOptions(\n method=ssl.SSL.SSLv23_METHOD,\n certificate=certx509,\n privateKey=privateKey,\n verify=verify,\n caCerts=caCerts\n )\n return kwargs",
"def get_ssl_context(usercert, userkey):\n pass_str = 'Please enter the pass phrase for'\n for _ in range(0, 2): # allow two password attempts\n def prompt_for_password(verify):\n return getpass.getpass(pass_str + f\" '{userkey}':\").encode('utf-8')\n\n ssl_context = SSL.Context()\n ssl_context.set_options(m2.SSL_OP_NO_SSLv2 | m2.SSL_OP_NO_SSLv3)\n\n try:\n ssl_context.load_cert_chain(usercert, userkey, callback=prompt_for_password)\n return ssl_context\n except SSL.SSLError as exc:\n if 'bad password read' in str(exc):\n pass_str = 'Incorrect password. Please enter the password again for'\n else:\n raise\n\n # if we fell off the loop, the passphrase was incorrect twice\n raise BadPassphraseException('Incorrect passphrase. Attempt failed twice.')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names. | def plat_specific_errors(*errnames):
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys()) | [
"def listErrorCodes(errno=None):\n\n\tif errno is None:\n\t\tfor i in range(MinErrorNo, (MaxErrorNo+1)):\n\t\t\tlistErrorCodes(errno=i)\n\telse:\n\t\tif errno == 1:\n\t\t\tprint \"1: End of file encountered during filehandle read\"\n\t\telif errno == 2:\n\t\t\tprint \"2: End of file encountered during numpy.fromfile call\"\n\t\telif errno == 3:\n\t\t\tprint \"3: Mark 5C sync word differs from expected\"\n\t\telif errno == 4:\n\t\t\tprint \"4: Data appears to be TBW, not TBN as expected\"\n\t\telif errno == 5:\n\t\t\tprint \"5: Data appears to be TBN, not TBW as expected\"\n\t\telse:\n\t\t\tprint \"Unknown error code '%i'\" % errno",
"def test_plat_specific_errors(err_names, err_nums):\n actual_err_nums = errors.plat_specific_errors(*err_names)\n assert len(actual_err_nums) == len(err_nums)\n assert sorted(actual_err_nums) == sorted(err_nums)",
"def __errcode_names(cls, err_vars):\n errcode = None\n errmsg = None\n for evar in err_vars:\n stdname = evar.get_prop_value('standard_name')\n if stdname == 'ccpp_error_code':\n errcode = evar.get_prop_value('local_name')\n elif stdname == 'ccpp_error_message':\n errmsg = evar.get_prop_value('local_name')\n else:\n emsg = \"Bad errcode variable, '{}'\"\n raise ParseInternalError(emsg.format(stdname))\n # end if\n # end for\n if (not errcode) or (not errmsg):\n raise ParseInternalError(\"Unsupported error scheme\")\n # end if\n return errcode, errmsg",
"def get_all_errs(self):\n thiserr = self.get_err()\n errors = []\n while thiserr != '+0,\"No error\"':\n thiserr = self.get_err()\n errors.append(thiserr)\n return errors",
"async def read_errors(self) -> list[int]:\n last_5_errors = await self.create_and_send_command(ERRORS)\n logger.debug(f\"Error reading returns {last_5_errors}\")\n return [int(err_code) for err_code in last_5_errors.split(\",\")]",
"def get_socket_conn_refused_errs():\n errors = [errno.ECONNREFUSED]\n if hasattr(errno, 'ENETUNREACH'):\n # On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED\n errors.append(errno.ENETUNREACH)\n if hasattr(errno, 'EADDRNOTAVAIL'):\n # bpo-31910: socket.create_connection() fails randomly\n # with EADDRNOTAVAIL on Travis CI\n errors.append(errno.EADDRNOTAVAIL)\n if hasattr(errno, 'EHOSTUNREACH'):\n # bpo-37583: The destination host cannot be reached\n errors.append(errno.EHOSTUNREACH)\n if not IPV6_ENABLED:\n errors.append(errno.EAFNOSUPPORT)\n return errors",
"def library_errors():\n ret = quick_library_check()\n return ret[_ERRORS]",
"def get_errno(e):\n try:\n return e.errno\n except AttributeError:\n return e.args[0]",
"def get_errors(self):\n result = []\n for error in self.errors:\n result.append(os.path.basename(error[0]) +\n ':\\n ' + str(error[1]) + '\\n')\n return result",
"def get_err_counter(self, name):\n return sum(self.get_counter(name))",
"def is_errno(error, errnos):\n\n if not isinstance(error, EnvironmentError):\n return False\n\n if isinstance(errnos, collections.Iterable):\n return error.errno in errnos\n else:\n return error.errno == errnos",
"def all_error_type(self):\n all_count_error_type = []\n for i in range(self.size):\n d = dict()\n for et in ErrorType:\n d[et] = 0\n all_count_error_type.append(d)\n for t in self.multi_alignment_tokens:\n error_type_list = t.error_type\n\n for (M, error_type) in enumerate(error_type_list):\n all_count_error_type[M][error_type] += 1\n return all_count_error_type\n\n # print(all_count_error_type)",
"def errors(self):\n return [thread.err for thread in self._threads]",
"def list_error_types():\n return list(sorted(ApiError.subtypes(), key=lambda e: e.error_type))",
"def get_errors(self, value):\n return list(self.errors(value))",
"def GetErrors(self):\n return error_check.GetErrors(self.GetData(), self.edid_version)",
"def errno_from_exception(e):\n\n\tif hasattr(e, 'errno'):\n\t\treturn e.errno\n\telif e.args:\n\t\treturn e.args[0]\n\telse:\n\t\treturn None",
"def calculate_errors(residuals):\n num_residuals = len(residuals)\n mfe = (residuals.sum() / num_residuals).tolist()[0]\n mae = (residuals.abs().sum() / num_residuals).tolist()[0]\n rmse = (residuals.pow(2).sum().pow(0.5)).tolist()[0]\n residuals = residuals.values\n residuals = [value.item() for value in residuals]\n return mfe, mae, rmse",
"def errors(self, cluster: str, namespace: str) -> list[str]:\n return self._errors.setdefault(cluster, {}).setdefault(namespace, [])",
"def getErrorIndices(self):\n\n errors = set()\n\n for const in self.constraints:\n errorRows = const.eval(self.df)\n for col in const.getColumnList(self.df):\n for row in errorRows:\n errors.add((row, col))\n\n return errors"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens. | def read_headers(rfile, hdict=None):
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict | [
"def read_header(header):\n request = str(header, \"ASCII\").split(\"\\r\\n\", 1)[0]\n request_headers = str(header, \"ASCII\").split(\"\\r\\n\", 1)[1]\n request_headers = message_from_string(request_headers)\n request_headers = dict(request_headers)\n request_headers[\"Method\"] = request.split(\" \")[0]\n request_headers[\"Request\"] = request.split(\" \")[1]\n request_headers[\"Protocol\"] = request.split(\" \")[2]\n return request_headers",
"def parse_headers(self, raw_headers):\n header = {}\n self.clear_header(header)\n for line in raw_headers.splitlines():\n if line.startswith(\"HTTP/\"):\n self.parse_http_protocol(line, header)\n elif \": \" in line:\n self.parse_http_header(line, header)\n elif self.env[\"url\"].startswith(\"ftp://\"):\n self.parse_ftp_header(line, header)\n elif line == \"\":\n # we got an empty line; end of headers (or curl exited)\n if header.get(\"http_result_code\") in [\n \"301\",\n \"302\",\n \"303\",\n \"307\",\n \"308\",\n ]:\n # redirect, so more headers are coming.\n # Throw away the headers we've received so far\n header[\"http_redirected\"] = header.get(\"location\", None)\n self.clear_header(header)\n return header",
"def read_header(self, lines):\n header_list = []\n for line in lines:\n header_list.append(line.strip())\n if self.HEADER_ENDING in line:\n break\n header_dict = {}\n for line in header_list:\n if line and not line.startswith('#'):\n line_list = line.split('\\t')\n header_dict[line_list[0]] = tuple(line_list[1:])\n return header_dict, len(header_list)",
"def _parse_header(fh):\n headerConverters = {\n b'StartFontMetrics': float,\n b'FontName': _to_str,\n b'FullName': _to_str,\n b'FamilyName': _to_str,\n b'Weight': _to_str,\n b'ItalicAngle': float,\n b'IsFixedPitch': _to_bool,\n b'FontBBox': _to_list_of_ints,\n b'UnderlinePosition': _to_int,\n b'UnderlineThickness': _to_int,\n b'Version': _to_str,\n b'Notice': _to_str,\n b'EncodingScheme': _to_str,\n b'CapHeight': float, # Is the second version a mistake, or\n b'Capheight': float, # do some AFM files contain 'Capheight'? -JKS\n b'XHeight': float,\n b'Ascender': float,\n b'Descender': float,\n b'StdHW': float,\n b'StdVW': float,\n b'StartCharMetrics': _to_int,\n b'CharacterSet': _to_str,\n b'Characters': _to_int,\n }\n d = {}\n while 1:\n line = bytes(fh.readline(), 'ascii')\n if not line: break\n line = line.rstrip()\n if line.startswith(b'Comment'): continue\n lst = line.split(b' ', 1 )\n key = lst[0]\n if len( lst ) == 2:\n val = lst[1]\n else:\n val = b''\n #key, val = line.split(' ', 1)\n try: d[key] = headerConverters[key](val)\n except ValueError:\n continue\n except KeyError:\n continue\n if key==b'StartCharMetrics': return d\n raise RuntimeError('Bad parse')",
"def decode_ros_handshake_header(header_str):\n (size, ) = struct.unpack('<I', header_str[0:4])\n size += 4 # add in 4 to include size of size field\n header_len = len(header_str)\n if size > header_len:\n raise ROSHandshakeException('Incomplete header. Expected %s bytes but only have %s' % ((size+4), header_len))\n\n d = {}\n start = 4\n while start < size:\n (field_size, ) = struct.unpack('<I', header_str[start:start+4])\n if field_size == 0:\n raise ROSHandshakeException('Invalid 0-length handshake header field')\n start += field_size + 4\n if start > size:\n raise ROSHandshakeException('Invalid line length in handshake header: %s' % size)\n line = header_str[start-field_size:start]\n\n # python3 compatibility\n if python3 == 1:\n line = line.decode()\n\n idx = line.find('=')\n if idx < 0:\n raise ROSHandshakeException('Invalid line in handshake header: [%s]' % line)\n key = line[:idx]\n value = line[idx+1:]\n d[key.strip()] = value\n return d",
"def __readHeaders(self, fh):\n fh.readline()\n fh.readline()\n \n headersStr = fh.readline()\n headers = [ s.strip() for s in headersStr[1:].split() ]\n unitsStr = fh.readline()\n units = [ s.strip() for s in unitsStr[1:].split() ]\n \n fh.readline()\n \n headers.pop(1)\n units[0] = 'mjd'\n units[1] = 'seconds'\n\n self.startDate = self.__getStartDate(fh)\n\n # Get a mapping of header names to column index\n headerDict = dict(list(zip(headers,list(range(len(headers))))))\n return (headerDict, units)",
"def _parse_headers(self, instr):\n top, rest = hdr_end.split(instr, 1)\n self.input_header_length = len(top)\n header_lines = top.splitlines()\n\n # chop off the top line\n while True: # TODO: limit?\n try:\n top_line = header_lines.pop(0)\n if top_line.strip() != \"\":\n break\n except IndexError: # empty\n return rest\n \n try:\n hdr_tuples, conn_tokens, transfer_codes, content_length \\\n = self._parse_fields(header_lines, True)\n except TypeError: # returned None because there was an error\n if not self.inspecting:\n return \"\" # throw away the rest\n \n # ignore content-length if transfer-encoding is present\n if transfer_codes != [] and content_length != None:\n content_length = None\n\n try:\n allows_body = self.input_start(top_line, hdr_tuples,\n conn_tokens, transfer_codes, content_length)\n except ValueError: # parsing error of some kind; abort.\n if not self.inspecting:\n return \"\" # throw away the rest\n allows_body = True\n\n self._input_state = HEADERS_DONE\n if not allows_body:\n self._input_delimit = NOBODY\n elif len(transfer_codes) > 0:\n if transfer_codes[-1] == 'chunked':\n self._input_delimit = CHUNKED\n self._input_body_left = -1 # flag that we don't know\n else:\n self._input_delimit = CLOSE\n elif content_length != None:\n self._input_delimit = COUNTED\n self._input_body_left = content_length\n else:\n self._input_delimit = CLOSE\n return rest",
"def parse_headers(header_dict):\n values = dict()\n values['from_addr'] = header_dict.get(b'From', '').decode('utf8')\n values['to_addr'] = header_dict.get(b'To', '').decode('utf8')\n values['subject'] = header_dict.get(b'Subject', '').decode('utf8')\n values['spam'] = header_dict.get(b'X-Spam-Flag', '').decode('utf8')\n message_timestamp_str = header_dict[b'Date'].decode('utf8')\n values['ts'] = maya.MayaDT.from_rfc2822(message_timestamp_str)\n return values",
"def read_ros_handshake_header(sock, b, buff_size):\n header_str = None\n while not header_str:\n d = sock.recv(buff_size)\n if not d:\n raise ROSHandshakeException('connection from sender terminated before handshake header received. %s bytes were received. Please check sender for additional details.' % b.tell())\n b.write(d)\n btell = b.tell()\n if btell > 4:\n # most likely we will get the full header in the first recv, so\n # not worth tiny optimizations possible here\n bval = b.getvalue()\n (size,) = struct.unpack('<I', bval[0:4])\n if btell - 4 >= size:\n header_str = bval\n\n # memmove the remnants of the buffer back to the start\n leftovers = bval[size+4:]\n b.truncate(len(leftovers))\n b.seek(0)\n b.write(leftovers)\n\n # process the header\n return decode_ros_handshake_header(bval)",
"def parse_http_header(cls, header_string):\n if not header_string:\n return None\n\n # Get parts seperated by ';'\n parts = [s.strip() for s in header_string.split(';')]\n\n # Get key-value pairs from parts\n data = {}\n for part in parts:\n if '=' in part:\n key, value = part.split('=')\n # Ignore reserved keys\n if key.lower() not in ['path', 'domain', 'expires']:\n data[key] = value\n\n # Cookie send by client only contains data fields\n return cls(data, None, None)",
"def read_request(stream):\n request_line, headers, body = yield from read_message(stream)\n method, uri, version = request_line[:-2].decode().split(None, 2)\n return method, uri, headers, body",
"def consume_header_bytes(self, data):\n # We're done if there is no content.\n if not data or (len(data) == 0):\n return None\n\n full_header_len = 4\n\n assert len(self.header_contents) < full_header_len\n\n bytes_avail = len(data)\n bytes_needed = full_header_len - len(self.header_contents)\n header_bytes_avail = min(bytes_needed, bytes_avail)\n self.header_contents += data[:header_bytes_avail]\n if len(self.header_contents) == full_header_len:\n import struct\n # End of header.\n self.packet_bytes_remaining = struct.unpack(\n \"!I\", self.header_contents)[0]\n self.header_contents = b\"\"\n self.reading_header = False\n return data[header_bytes_avail:]\n\n # If we made it here, we've exhausted the data and\n # we're still parsing header content.\n return None",
"def read_header(req, param_name, **_):\n try:\n return req.get_header(param_name, required=True)\n except HTTPBadRequest:\n raise MissingValueError(Location.HEADER, param_name)",
"def h2_safe_headers(headers):\n stripped = {\n i.lower().strip()\n for k, v in headers if k == b'connection'\n for i in v.split(b',')\n }\n stripped.add(b'connection')\n\n return [header for header in headers if header[0] not in stripped]",
"def _parse_dict_header(self, address):\n\n addr = address\n num_separators = self._memory[addr]\n separators = self._memory[(addr + 1):(addr + num_separators)]\n addr += (1 + num_separators)\n entry_length = self._memory[addr]\n addr += 1\n num_entries = self._memory.read_word(addr)\n addr += 2\n\n return num_entries, entry_length, separators, addr",
"def parse_header(self, header):\n header_separator = self.header_separator.encode()\n length, separator, message_chunk = header.partition(header_separator)\n try:\n return int(length), message_chunk\n except ValueError:\n return None, None",
"def _clean_headers(self, headers: dict) -> dict:\n\n def getval(key, value):\n \"\"\"Pass.\"\"\"\n if isinstance(self.LOG_HIDE_STR, str) and self.LOG_HIDE_STR:\n skey = str(key).lower()\n for check in self.LOG_HIDE_HEADERS:\n if (isinstance(check, str) and check.lower() == skey) or (\n isinstance(check, t.Pattern) and check.search(key)\n ):\n return self.LOG_HIDE_STR\n return value\n\n # noinspection PyBroadException\n try:\n return {k: getval(k, v) for k, v in headers.items()}\n except Exception: # pragma: no cover # noqa: BLE001\n return headers",
"def rfc6376_simple_holistic(\n headers: Headers, body: Optional[bytes]\n) -> Tuple[Headers, bytes]:\n # Note: This modifies headers in place\n # There may be no body, but canonicalisation synthesizes one\n # Therefore we may need to add CRLF to the last header value\n if body is None:\n if headers:\n if not headers[-1][1].endswith(b\"\\r\\n\"):\n headers[-1][1] += b\"\\r\\n\"\n return (headers, b\"\\r\\n\")\n return (headers, rfc6376_simple_body(body))",
"def parse_auth_header(header):\n try:\n to_return = dict(\n map(\n lambda x: x.strip().split('='),\n header.split(' ')\n )\n )\n except (IndexError, ValueError):\n return None\n return to_return",
"def _get_headers(payload):\n if payload and \"RequestHeaders\" in payload:\n return payload[\"RequestHeaders\"]\n\n return {}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse the next HTTP request startline and messageheaders. | def parse_request(self):
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
else:
if not success:
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True | [
"def parse_request(self):\n self.method, self.location, self.http_version = \\\n self.request_line.decode(\"utf-8\").split()",
"def parse_request(self):\r\n self.command = None # set in case of error on the first line\r\n self.request_version = version = \"HTTP/0.9\" # Default\r\n self.close_connection = 1\r\n requestline = self.raw_requestline\r\n if requestline[-2:] == '\\r\\n':\r\n requestline = requestline[:-2]\r\n elif requestline[-1:] == '\\n':\r\n requestline = requestline[:-1]\r\n self.requestline = requestline\r\n words = requestline.split()\r\n if len(words) == 3:\r\n [command, path, version] = words\r\n if version[:5] != 'HTTP/':\r\n self.send_error(400, \"Bad request version (%r)\" % version)\r\n return False\r\n try:\r\n base_version_number = version.split('/', 1)[1]\r\n version_number = base_version_number.split(\".\")\r\n # RFC 2145 section 3.1 says there can be only one \".\" and\r\n # - major and minor numbers MUST be treated as\r\n # separate integers;\r\n # - HTTP/2.4 is a lower version than HTTP/2.13, which in\r\n # turn is lower than HTTP/12.3;\r\n # - Leading zeros MUST be ignored by recipients.\r\n if len(version_number) != 2:\r\n raise ValueError\r\n version_number = int(version_number[0]), int(version_number[1])\r\n except (ValueError, IndexError):\r\n self.send_error(400, \"Bad request version (%r)\" % version)\r\n return False\r\n if version_number >= (1, 1) and self.protocol_version >= \"HTTP/1.1\":\r\n self.close_connection = 0\r\n if version_number >= (2, 0):\r\n self.send_error(505,\r\n \"Invalid HTTP Version (%s)\" % base_version_number)\r\n return False\r\n elif len(words) == 2:\r\n [command, path] = words\r\n self.close_connection = 1\r\n if command != 'GET':\r\n self.send_error(400,\r\n \"Bad HTTP/0.9 request type (%r)\" % command)\r\n return False\r\n elif not words:\r\n return False\r\n else:\r\n self.send_error(400, \"Bad request syntax (%r)\" % requestline)\r\n return False\r\n self.command, self.path, self.request_version = command, path, version\r\n\r\n # Examine the headers and look for a Connection directive\r\n self.headers = self.MessageClass(self.rfile, 0)\r\n\r\n conntype = self.headers.get('Connection', \"\")\r\n if conntype.lower() == 'close':\r\n self.close_connection = 1\r\n elif (conntype.lower() == 'keep-alive' and\r\n self.protocol_version >= \"HTTP/1.1\"):\r\n self.close_connection = 0\r\n return True",
"def parseHead(self):\n if self.headed:\n return # already parsed the head\n\n self.headers = lodict()\n\n # create generator\n lineParser = httping.parseLine(raw=self.msg, eols=(CRLF, LF), kind=\"status line\")\n while True: # parse until we get a non-100 status\n if self.closed and not self.msg: # connection closed prematurely\n raise httping.PrematureClosure(\"Connection closed unexpectedly\"\n \" while parsing response start line\")\n\n line = next(lineParser)\n if line is None:\n (yield None)\n continue\n lineParser.close() # close generator\n\n version, status, reason = httping.parseStatusLine(line)\n if status != httping.CONTINUE: # 100 continue (with request or ignore)\n break\n\n leaderParser = httping.parseLeader(raw=self.msg,\n eols=(CRLF, LF),\n kind=\"continue header line\")\n while True:\n if self.closed and not self.msg: # connection closed prematurely\n raise httping.PrematureClosure(\"Connection closed unexpectedly\"\n \" while parsing response header\")\n headers = next(leaderParser)\n if headers is not None:\n leaderParser.close()\n break\n (yield None)\n\n self.code = self.status = status\n self.reason = reason.strip()\n if version in (\"HTTP/1.0\", \"HTTP/0.9\"):\n # Some servers might still return \"0.9\", treat it as 1.0 anyway\n self.version = (1, 0)\n elif version.startswith(\"HTTP/1.\"):\n self.version = (1, 1) # use HTTP/1.1 code for HTTP/1.x where x>=1\n else:\n raise httping.UnknownProtocol(version)\n\n leaderParser = httping.parseLeader(raw=self.msg,\n eols=(CRLF, LF),\n kind=\"leader header line\")\n while True:\n if self.closed and not self.msg: # connection closed prematurely\n raise httping.PrematureClosure(\"Connection closed unexpectedly\"\n \" while parsing response header\")\n headers = next(leaderParser)\n if headers is not None:\n leaderParser.close()\n break\n (yield None)\n self.headers.update(headers)\n\n # are we using the chunked-style of transfer encoding?\n transferEncoding = self.headers.get(\"transfer-encoding\")\n if transferEncoding and transferEncoding.lower() == \"chunked\":\n self.chunked = True\n else:\n self.chunked = False\n\n # NOTE: RFC 2616, S4.4, #3 says ignore if transfer-encoding is \"chunked\"\n contentLength = self.headers.get(\"content-length\")\n if contentLength and not self.chunked:\n try:\n self.length = int(contentLength)\n except ValueError:\n self.length = None\n else:\n if self.length < 0: # ignore nonsensical negative lengths\n self.length = None\n else:\n self.length = None\n\n # does the body have a fixed length? (of zero)\n if ((self.status == httping.NO_CONTENT or self.status == httping.NOT_MODIFIED) or\n (100 <= self.status < 200) or # 1xx codes\n (self.method == \"HEAD\")):\n self.length = 0\n\n contentType = self.headers.get(\"content-type\")\n if contentType:\n if u';' in contentType: # should also parse out charset for decoding\n contentType, sep, encoding = contentType.rpartition(u';')\n if encoding:\n self.encoding = encoding\n\n if 'text/event-stream' in contentType.lower():\n self.evented = True\n self.eventSource = httping.EventSource(raw=self.body,\n events=self.events,\n dictable=self.dictable)\n else:\n self.evented = False\n\n if 'application/json' in contentType.lower():\n self.jsoned = True\n else:\n self.jsoned = False\n\n # Should connection be kept open until server closes\n self.checkPersisted() # sets .persisted\n\n if self.status in (httping.MULTIPLE_CHOICES,\n httping.MOVED_PERMANENTLY,\n httping.FOUND,\n httping.SEE_OTHER,\n httping.TEMPORARY_REDIRECT):\n self.redirectant = True\n\n self.headed = True\n yield True\n return",
"def read_request(stream):\n request_line, headers, body = yield from read_message(stream)\n method, uri, version = request_line[:-2].decode().split(None, 2)\n return method, uri, headers, body",
"def _parse_headers(self, instr):\n top, rest = hdr_end.split(instr, 1)\n self.input_header_length = len(top)\n header_lines = top.splitlines()\n\n # chop off the top line\n while True: # TODO: limit?\n try:\n top_line = header_lines.pop(0)\n if top_line.strip() != \"\":\n break\n except IndexError: # empty\n return rest\n \n try:\n hdr_tuples, conn_tokens, transfer_codes, content_length \\\n = self._parse_fields(header_lines, True)\n except TypeError: # returned None because there was an error\n if not self.inspecting:\n return \"\" # throw away the rest\n \n # ignore content-length if transfer-encoding is present\n if transfer_codes != [] and content_length != None:\n content_length = None\n\n try:\n allows_body = self.input_start(top_line, hdr_tuples,\n conn_tokens, transfer_codes, content_length)\n except ValueError: # parsing error of some kind; abort.\n if not self.inspecting:\n return \"\" # throw away the rest\n allows_body = True\n\n self._input_state = HEADERS_DONE\n if not allows_body:\n self._input_delimit = NOBODY\n elif len(transfer_codes) > 0:\n if transfer_codes[-1] == 'chunked':\n self._input_delimit = CHUNKED\n self._input_body_left = -1 # flag that we don't know\n else:\n self._input_delimit = CLOSE\n elif content_length != None:\n self._input_delimit = COUNTED\n self._input_body_left = content_length\n else:\n self._input_delimit = CLOSE\n return rest",
"def _parse_request(self) -> None:\n action_and_name_from_request = self._request.split(PROTOCOL)\n self._action_from_request = action_and_name_from_request[0].split()[0]\n self._name_from_request = ' '.join(action_and_name_from_request[0].split()[1:])\n self._phone_from_request = self._request.split('\\r\\n')[1]",
"def read_request(self):\n data = b''\n while data.find(b'\\r\\n\\r\\n') == -1:\n r = self.conn.recv(1024)\n # r is empty if socket is closed\n if not r:\n logging.error(\"socket is closed\")\n break\n data += r\n try:\n self.request_line = data.splitlines()[0]\n except Exception as e:\n logging.error(\"recieved data:{0}\".format(data))\n raise e",
"def parse_request_start_line(line: str) -> RequestStartLine:\n try:\n method, path, version = line.split(\" \")\n except ValueError:\n # https://tools.ietf.org/html/rfc7230#section-3.1.1\n # invalid request-line SHOULD respond with a 400 (Bad Request)\n raise HTTPInputError(\"Malformed HTTP request line\")\n if not _http_version_re.match(version):\n raise HTTPInputError(\n \"Malformed HTTP version in HTTP Request-Line: %r\" % version\n )\n return RequestStartLine(method, path, version)",
"def parse_request(self):\n raw_requestline = self.rfile.readline(65537)\n if len(raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)\n return False\n if not raw_requestline:\n self.close_connection = True\n return False\n self.command = None # set in case of error on the first line\n self.request_version = version = self.default_request_version\n self.close_connection = True\n requestline = str(raw_requestline, 'iso-8859-1')\n requestline = requestline.rstrip('\\r\\n')\n self.requestline = requestline\n words = requestline.split()\n if len(words) == 0:\n return False\n\n if len(words) >= 3: # Enough to determine protocol version\n version = words[-1]\n try:\n if not version.startswith('HTTP/'):\n raise ValueError\n base_version_number = version.split('/', 1)[1]\n version_number = base_version_number.split(\".\")\n # RFC 2145 section 3.1 says there can be only one \".\" and\n # - major and minor numbers MUST be treated as\n # separate integers;\n # - HTTP/2.4 is a lower version than HTTP/2.13, which in\n # turn is lower than HTTP/12.3;\n # - Leading zeros MUST be ignored by recipients.\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError):\n self.send_error(\n HTTPStatus.BAD_REQUEST,\n \"Bad request version (%r)\" % version)\n return False\n if version_number >= (1, 1) and self.protocol_version >= \"HTTP/1.1\":\n self.close_connection = False\n if version_number >= (2, 0):\n self.send_error(\n HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,\n \"Invalid HTTP version (%s)\" % base_version_number)\n return False\n self.request_version = version\n\n if not 2 <= len(words) <= 3:\n self.send_error(\n HTTPStatus.BAD_REQUEST,\n \"Bad request syntax (%r)\" % requestline)\n return False\n command, path = words[:2]\n if len(words) == 2:\n self.close_connection = True\n if command != 'GET':\n self.send_error(\n HTTPStatus.BAD_REQUEST,\n \"Bad HTTP/0.9 request type (%r)\" % command)\n return False\n self.command, self.path = command, path\n\n self.request_path = self._get_request_path(self.path)\n\n self.query_string = self.__get_query_string(self.path)\n\n self.query_parameters = self._decode_query_string(self.query_string)\n\n # Examine the headers and look for a Connection directive.\n try:\n self.headers = http.client.parse_headers(self.rfile,\n _class=self.MessageClass)\n except http.client.LineTooLong as err:\n self.send_error(\n HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,\n \"Line too long\",\n str(err))\n return False\n except http.client.HTTPException as err:\n self.send_error(\n HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,\n \"Too many headers\",\n str(err)\n )\n return False\n\n conntype = self.headers.get('Connection', \"\")\n if conntype.lower() == 'close':\n self.close_connection = True\n elif (conntype.lower() == 'keep-alive' and\n self.protocol_version >= \"HTTP/1.1\"):\n self.close_connection = False\n # Examine the headers and look for an Expect directive\n expect = self.headers.get('Expect', \"\")\n if (expect.lower() == \"100-continue\" and\n self.protocol_version >= \"HTTP/1.1\" and\n self.request_version >= \"HTTP/1.1\"):\n if not self.handle_expect_100():\n return False\n return True",
"def parse_lead_headers(self):\n # parse General Header blocks\n self.header_data[\"General Header\"] = OrderedDict([])\n for header_block in self.schema[\"General Header\"]:\n self.header_data[\"General Header\"][header_block] = self._read_header_block(\n self.schema[\"General Header\"][header_block]\n )\n self.cursor_position += self.schema[\"General Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n # parse Channel Set Descriptor blocks\n self.header_data[\"Channel Set Descriptor\"] = OrderedDict([])\n for n in range(\n self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"channel_sets_per_scan_type\"\n ][\"value\"]\n ):\n self.header_data[\"Channel Set Descriptor\"][\n \"Channel Set Descriptor Block #%d\" % (n + 1)\n ] = self._read_header_block(\n self.schema[\"Channel Set Descriptor\"][\"Main Block\"]\n )\n self.cursor_position += self.schema[\"Channel Set Descriptor\"][\"Main Block\"][\n \"block_length_in_bytes\"\n ]\n # parse the first three Extended Header blocks\n self.header_data[\"Extended Header\"] = OrderedDict([])\n for n in range(3):\n header_block = \"32-byte Extended Header Block #%d\" % (n + 1)\n self.header_data[\"Extended Header\"][header_block] = self._read_header_block(\n self.schema[\"Extended Header\"][header_block]\n )\n self.cursor_position += self.schema[\"Extended Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n self.number_of_trace_blocks = (\n self.header_data[\"Extended Header\"][\"32-byte Extended Header Block #2\"][\n \"number_of_records_in_file\"\n ][\"value\"]\n * self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"channel_sets_per_scan_type\"\n ][\"value\"]\n )\n # parse the next n 32-byte Extended Header blocks as necessary\n for n in range(\n 3,\n self.header_data[\"General Header\"][\"General Header Block #2\"][\n \"extended_header_blocks\"\n ][\"value\"],\n ):\n header_block = \"32-byte Extended Header Auxiliary Block\"\n block_label = \"32-byte Extended Header Block #%d\" % (n + 1)\n self.header_data[\"Extended Header\"][block_label] = self._read_header_block(\n self.schema[\"Extended Header\"][header_block]\n )\n self.cursor_position += self.schema[\"Extended Header\"][header_block][\n \"block_length_in_bytes\"\n ]\n # parse the general External Header Block\n self.header_data[\"External Header\"] = OrderedDict([])\n self.header_data[\"External Header\"][\n \"External Header Block #1\"\n ] = self._read_header_block(\n self.schema[\"External Header\"][\"External Header Block #1\"]\n )\n self.cursor_position += self.schema[\"External Header\"][\n \"External Header Block #1\"\n ][\"block_length_in_bytes\"]\n # parse the next n 32-byte External Header blocks\n if (\n self.header_data[\"General Header\"][\"General Header Block #1\"][\n \"number_of_32_byte_external_header_blocks\"\n ][\"value\"]\n == \"ff\"\n ):\n number_of_32_byte_external_header_blocks = self.header_data[\n \"General Header\"\n ][\"General Header Block #2\"][\"external_header_blocks\"][\"value\"]\n else:\n number_of_32_byte_external_header_blocks = self.header_data[\n \"General Header\"\n ][\"General Header Block #1\"][\"number_of_32_byte_external_header_blocks\"][\n \"value\"\n ]\n for n in range(number_of_32_byte_external_header_blocks - 1):\n self.header_data[\"External Header\"][\n \"32-byte External Header Block #%d\" % (n + 1)\n ] = self._read_header_block(\n self.schema[\"External Header\"][\n \"32-byte External Header Auxiliary Block\"\n ]\n )\n self.cursor_position += self.schema[\"External Header\"][\n \"32-byte External Header Auxiliary Block\"\n ][\"block_length_in_bytes\"]",
"def parse_headers(self, raw_headers):\n header = {}\n self.clear_header(header)\n for line in raw_headers.splitlines():\n if line.startswith(\"HTTP/\"):\n self.parse_http_protocol(line, header)\n elif \": \" in line:\n self.parse_http_header(line, header)\n elif self.env[\"url\"].startswith(\"ftp://\"):\n self.parse_ftp_header(line, header)\n elif line == \"\":\n # we got an empty line; end of headers (or curl exited)\n if header.get(\"http_result_code\") in [\n \"301\",\n \"302\",\n \"303\",\n \"307\",\n \"308\",\n ]:\n # redirect, so more headers are coming.\n # Throw away the headers we've received so far\n header[\"http_redirected\"] = header.get(\"location\", None)\n self.clear_header(header)\n return header",
"def parse_response_start_line(line: str) -> ResponseStartLine:\n line = native_str(line)\n match = _http_response_line_re.match(line)\n if not match:\n raise HTTPInputError(\"Error parsing response start line\")\n return ResponseStartLine(match.group(1), int(match.group(2)), match.group(3))",
"def parseRequest(self):\n\t\tself.rBuf = self.rBuf.replace('\\x0c','')",
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.fire_handlers(self)",
"def got_line(self, data):\n\n requestor_princ = self.peer()\n username = data.decode()\n _LOGGER.info(\n 'Processing ipakeytab request for \\'%s\\' from \\'%s\\'',\n username,\n requestor_princ)\n\n try:\n self._validate_request(username)\n requestor = self._validate_requestor(requestor_princ)\n req_name = requestor[0]\n req_inst = requestor[1]\n req_realm = requestor[2]\n keytab_entries = self._get_keytab(\n username, req_inst, req_realm)\n except Exception as err: # pylint: disable=W0703\n _LOGGER.error(repr(err))\n response = \"ERROR: \" + str(err)\n self.write(str(response).encode(\"utf-8\"))\n return\n\n self.write(keytab_entries)\n self.transport.loseConnection()",
"def _parse_header(self):\n log.debug('---In dcd.py, parse_header()')\n #process the first header block\n\n header1 = self._fo.read(92)\n header1_format=\\\n \"i---cccci---i---i---i---xxxxxxxxxxxxxxxxxxxxf---i---i---xxxxxxxxxxxxxxxxxxxxxxxxxxxxi---i---\"\n # |1 |5 |10 |15 |20 |25 |30 |35 |40 |45 |50 |55 |60 |65 |70 |75 |80 |85 |90\n #|header size=84 |nframes*tstep |tstep_size |charm_ver\n # |CORD=has coordinates |block_a |header_size=84\n # |nframes |block_b\n # |starting timestep\n # |timestep between coord sets \n header1_format = string.replace(header1_format, \"-\", \"\")\n header1 = struct.unpack(header1_format, header1)\n header1_size1, c1, c2, c3, c4, self._nframes, self._firsttstep, self._dcdfreq, self._ntsteps, self._tstep_size, self._block_a, self._block_b, self._charm_v, header1_size2 = header1 #unpack the tuple header1\n \n \n self._dcdtype = \"\".join((c1,c2,c3,c4)) #get the data-type field. I it should always be cord...\n if header1_size1 != 84 or header1_size2 !=84:\n log.error(\"error-- header size fields not correct (should be 84)\\n\")\n if self._block_a != 0 or self._block_b != 0:\n log.info(\"I've found a signal possibly indicating an extra record block\")\n log.info(\" I'll try to parse it, but it might fail. Also, I won't use\")\n log.info(\" any data from them.\")",
"def _parse_mxp_header(it, current, recipe):\n current = _skip_empty(it, current)\n if _is_mxp_header(current):\n current = next(it)\n return current",
"def parse(self, line):\n\n\t\tbits = line.strip().split(None, 1)\n\t\ttry:\n\t\t\tversion, status = bits\n\t\texcept ValueError:\n\t\t\traise InvalidLine(_(u'Invalid response line: %r'), line.decode('ISO8859-1'))\n\n\t\t# version\n\t\tsuper(Response, self).parse(version)\n\n\t\t# status\n\t\tself.status.parse(status)",
"def _parse_header(head):\n # CALL1>CALL2,CALL3,CALL4,CALL5:\n # |from-|--to-|------path-------|\n #\n try:\n (fromcall, path) = head.split('>', 1)\n except:\n raise ParseError(\"invalid packet header\")\n\n # looking at aprs.fi, the rules for from/src callsign\n # are a lot looser, causing a lot of packets to fail\n # this check.\n #\n # if len(fromcall) == 0:\n # raise ParseError(\"no fromcallsign in header\")\n # _validate_callsign(fromcall, \"fromcallsign\")\n\n if (not 1 <= len(fromcall) <= 9 or\n not re.findall(r\"^[a-z0-9]{0,9}(\\-[a-z0-9]{1,8})?$\", fromcall, re.I)):\n\n raise ParseError(\"fromcallsign is invalid\")\n\n path = path.split(',')\n\n if len(path) < 1 or len(path[0]) == 0:\n raise ParseError(\"no tocallsign in header\")\n\n tocall = path[0]\n path = path[1:]\n\n _validate_callsign(tocall, \"tocallsign\")\n\n for digi in path:\n if not re.findall(r\"^[A-Z0-9\\-]{1,9}\\*?$\", digi, re.I):\n raise ParseError(\"invalid callsign in path\")\n\n parsed = {\n 'from': fromcall,\n 'to': tocall,\n 'path': path,\n }\n\n # viacall is the callsign that gated the packet to the net\n # it's located behind the q-contructed\n #\n # CALL1>CALL2,CALL3,qAR,CALL5:\n # .....................|-via-|\n #\n viacall = \"\"\n if len(path) >= 2 and re.match(r\"^q..$\", path[-2]):\n viacall = path[-1]\n\n parsed.update({'via': viacall})\n\n return parsed",
"def _iterate_over_requests(log):\n if log.extension not in {\"log\", \"gz\"}:\n raise ValueError(\"Invalid extension of the log-file.\")\n\n open_method = open if log.extension == \"log\" else gzip.open\n\n with open_method(log.path, \"rb\") as f:\n for line in f:\n line = line.decode(\"utf-8\")\n search = LOG_REQUEST_PATTERN.search(line)\n\n if search is None:\n yield None\n continue\n\n try:\n method, url, protocol = search.group(1).split()\n except ValueError:\n # Invalid $request format\n yield None\n continue\n\n time = float(search.group(2))\n\n yield LogRequest(url, time)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a RequestURI into (scheme, authority, path). | def parse_request_uri(self, uri):
if uri == ASTERISK:
return None, None, uri
i = uri.find('://')
if i > 0 and QUESTION_MARK not in uri[:i]:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
scheme, remainder = uri[:i].lower(), uri[i + 3:]
authority, path = remainder.split(FORWARD_SLASH, 1)
path = FORWARD_SLASH + path
return scheme, authority, path
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None | [
"def _url_parse(uri):\n host = \"\"\n path = \"\"\n\n p_uri = urlparse(uri)\n host = p_uri.netloc\n path = p_uri.path.rstrip('/').strip('/')\n\n return (host,path)",
"def split_path(uri):\n parsed = urlparse(uri)\n return parsed.path, parsed.query, parsed.fragment",
"def parseURI(self, uri):\n match = re.match(self.regex, uri, self.regexFlags)\n if not match:\n raise InvalidURIException(\"Invalid URI: %r\" % uri)\n return match.groupdict()",
"def parse_path(self, path):\n\n parsed = urllib.parse.urlparse(path)\n return parsed.path, urllib.parse.parse_qs(parsed.query)",
"def parse_path(r):\n m = _REQUEST_RE.search(r)\n return m.groupdict().get(\"path\") if m else \"\"",
"def parseCheckoutUri(uri):\n\n\t# Attempt to parse a URI with a + in it. ex: hg+http://blah\n\t# If it doesn't find the 'type' it should extract 'real_uri' and 'rev'\n\tm = re.match(r'^((?P<type>\\w*)\\+)?(?P<realUri>.+?)(#(?P<rev>.+))?$', uri)\n\tif not m or not m.group('realUri'):\n\t\tsysExit(\"Couldn't parse repository URI \" + uri)\n\n\turiType = m.group('type')\n\trealUri = m.group('realUri')\n\trev = m.group('rev')\n\n\t# Attempt to parse a URI without a + in it. ex: svn://blah\n\tif not uriType:\n\t\tm = re.match(r'^(\\w*).*$', realUri)\n\t\tif m:\n\t\t\turiType = m.group(1)\n\n\tif not uriType:\n\t\tsysExit(\"Couldn't parse repository type from URI \" + realUri)\n\n\treturn (uriType, realUri, rev)",
"def pathfromuri(uri):\n\n\t\taddress = urlsplit(uri)\n\t\tbase = address.netloc\n\n\t\tif address.path != '':\n\n\t\t\t# remove first slash\n\t\t\tif base == '' and address.path[0:1] == '/':\n\t\t\t\tpath = address.path[1:]\n\t\t\telse:\n\t\t\t\tpath = address.path\n\n\t\t\t# don't underscore a directory type path\n\t\t\tif path[-1] == '/':\n\t\t\t\tpath = re.sub('/', '_', path[:-1])\n\t\t\telse:\n\t\t\t\tpath = re.sub('/', '_', path)\n\n\t\t\tbase += path\n\n\t\tif address.query != '':\n\t\t\tquery = re.sub('&', '-', address.query)\n\t\t\tbase += '+' + query\n\n\t\treturn base",
"def parseURI(url):\n\thostport = url.split(':')\n\thost = hostport[0] if hostport[0] != 'localhost' else socket.gethostname()\n\treturn host, hostport[1] if len(hostport) > 1 else '80'",
"def SplitUriRef(uriref):\r\n # the pattern will match every possible string, so it's safe to\r\n # assume there's a groupdict method to call.\r\n g = SPLIT_URI_REF_PATTERN.match(uriref).groupdict()\r\n scheme = g['scheme']\r\n authority = g['authority']\r\n path = g['path']\r\n query = g['query']\r\n fragment = g['fragment']\r\n return (scheme, authority, path, query, fragment)",
"def parse_backend_uri(backend_uri):\n if backend_uri.find(':') == -1:\n raise InvalidCacheBackendError(\"Backend URI must start with scheme://\")\n scheme, rest = backend_uri.split(':', 1)\n if not rest.startswith('//'):\n raise InvalidCacheBackendError(\"Backend URI must start with scheme://\")\n\n host = rest[2:]\n qpos = rest.find('?')\n\n if qpos != -1:\n params = dict(parse_qsl(rest[qpos + 1:]))\n host = rest[2:qpos]\n else:\n params = {}\n\n if host.endswith('/'):\n host = host[:-1]\n\n return scheme, host, params",
"def urlparse(self, url, scheme='', allow_fragments=True):\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url, scheme, allow_fragments)\n self._scheme=scheme\n self._netloc=netloc\n self._path=path\n self._params=params\n self._query=query\n self._fragment=fragment\n return scheme, netloc, path, params, query, fragment",
"def SplitUriRef(uriref):\n # the pattern will match every possible string, so it's safe to\n # assume there's a groupdict method to call.\n g = SPLIT_URI_REF_PATTERN.match(uriref).groupdict()\n scheme = g['scheme']\n authority = g['authority']\n path = g['path']\n query = g['query']\n fragment = g['fragment']\n return (scheme, authority, path, query, fragment)",
"def urlparse(url):\n result = {} \n status = 0\n mark = 0\n remain = None \n for i, c in enumerate(url): \n #not enough\n if i < mark:\n continue\n\n #optimization for letters\n if c in letters:\n continue\n \n #handle delimiters\n if c == \":\": \n if url[i: i+3] == \"://\":\n status = 1\n result[\"scheme\"] = url[:i]\n mark = i + 2 \n remain = \"host\" \n else: \n #host:port\n if url[i+1].isdigit():\n #next port\n result[\"host\"] = url[mark:i] \n status = 4 \n remain = \"port\"\n #user\n else: \n result[\"user\"] = url[mark:i] \n #next password\n status = 2 \n remain = \"password\"\n\n elif c == \"/\": \n if status >= 5: \n continue\n #host:port, for port\n if status in (0, 1, 3):\n result[\"host\"] = url[mark:i] \n if status == 4:\n result[\"port\"] = url[mark:i] \n #next possible \"path\"\n remain = \"path\" \n status = 5 \n elif c == \"@\": \n if status != 2:\n #user@host\n result[\"user\"] = url[mark:i] \n #user:password@host\n else:\n result[\"password\"] = url[mark:i] \n #next possible \"host\"\n remain = \"host\"\n status = 3 \n\n elif c in \";?#\":\n #path\n if status == 5:\n result[\"path\"] = url[mark:i] \n status = 6 \n #params\n elif status == 6:\n result[\"params\"] = url[mark:i] \n status = 7\n #query\n elif status == 7:\n result[\"query\"] = url[mark:i] \n status = 8\n #frag\n elif status == 8: \n result[\"fragment\"] = url[mark:i] \n status = 9 \n #skip normal char\n else: \n continue\n\n if c == \";\":\n #next params \n remain = \"params\"\n status = 6\n\n elif c == \"?\":\n #next query\n remain = \"query\"\n status = 7\n\n elif c == \"#\":\n remain = \"fragment\"\n status = 8 \n\n if mark < i:\n mark = i + 1\n else:\n mark += 1\n #host.com \n if not status:\n result[\"host\"] = url\n else:\n if mark < len(url):\n result[remain] = url[mark:]\n result.setdefault(\"path\", \"/\")\n return result",
"def parse(url_str):\r\n url_str = to_unicode(url_str)\r\n\r\n result = urlparse(url_str)\r\n netloc_parts = result.netloc.split('@')\r\n if len(netloc_parts) == 1:\r\n username = password = None\r\n host = netloc_parts[0]\r\n else:\r\n username, password = netloc_parts[0].split(':')\r\n host = netloc_parts[1]\r\n\r\n if host and ':' in host:\r\n host = host.split(':')[0]\r\n\r\n return {'host': host,\r\n 'username': username,\r\n 'password': password,\r\n 'scheme': result.scheme,\r\n 'port': result.port,\r\n 'path': result.path,\r\n 'query': result.query,\r\n 'fragment': result.fragment}",
"def urlunparse(parts):\n scheme, netloc, path, params, query, fragment = parts\n\n # Avoid encoding the windows drive letter colon\n if RE_DRIVE_LETTER_PATH.match(path):\n quoted_path = path[:3] + parse.quote(path[3:])\n else:\n quoted_path = parse.quote(path)\n\n return parse.urlunparse((\n parse.quote(scheme),\n parse.quote(netloc),\n quoted_path,\n parse.quote(params),\n parse.quote(query),\n parse.quote(fragment)\n ))",
"def parse_uri(self, uri):\n return self.parse(HTTPCache(uri).content())",
"def parseURI(self,url):\n addr = \"\"\n parts = []\n ip = False\n parts = url.split('/')\n #extract ip address with port\n if(len(parts)>2):\n addr = parts[2] #this contains X.X.X.X:PORT\n else:\n addr = parts[0] #it is possible the mtURL is \"X.X.X.X:PORT/\" (no http), then parts[0] will still be X.X.X.X:PORT\n # extract the ip address \n addr = addr.split(':')\n if(len(addr)>1):\n ip = addr[0]\n port = addr[1]\n else:\n ip = False\n port = False\n return ip, port",
"def parse_for_base_url(url: str) -> str:\n parsed = urlparse(url)\n parsed = (parsed.netloc + parsed.path).rstrip(\"/\")\n return parsed",
"def url_parser(url):\r\n if url.startswith(URL_SCHEMES):\r\n return url\r\n else:\r\n return 'https://' + url",
"def parse_address(addr, strict=False):\n if not isinstance(addr, six.string_types):\n raise TypeError(\"expected str, got %r\" % addr.__class__.__name__)\n scheme, sep, loc = addr.rpartition(\"://\")\n if strict and not sep:\n msg = (\n \"Invalid url scheme. \"\n \"Must include protocol like tcp://localhost:8000. \"\n \"Got %s\" % addr\n )\n raise ValueError(msg)\n if not sep:\n scheme = DEFAULT_SCHEME\n return scheme, loc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assert, process, and send the HTTP response messageheaders. You must set self.status, and self.outheaders before calling this. | def send_headers(self):
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", rfc822.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall(EMPTY.join(buf)) | [
"def _sendResponseHeaders(self):\n code, message = self.status.split(None, 1)\n code = int(code)\n self.request.setResponseCode(code, _wsgiStringToBytes(message))\n\n for name, value in self.headers:\n # Don't allow the application to control these required headers.\n if name.lower() not in ('server', 'date'):\n self.request.responseHeaders.addRawHeader(\n _wsgiStringToBytes(name), _wsgiStringToBytes(value))",
"def set_status_and_headers_in_response(response, status, headers):\n ...",
"def write_headers(self):\r\n self._write(\"HTTP/1.1 %s \\r\\n\" % (self.status,))\r\n has_content_length = False\r\n has_date = False\r\n for (k,v) in self.headers:\r\n self._write(\"%s: %s\\r\\n\" % (k,v,))\r\n if k.lower() == \"content-length\":\r\n has_content_length = True\r\n elif k.lower() == \"date\":\r\n has_date = True\r\n if not has_date:\r\n self._write(\"Date: %s\\r\\n\" % (rfc822_format_date(),))\r\n if not has_content_length:\r\n if self.request.headers[\"VERSION\"] == \"HTTP/1.1\":\r\n if self.request.headers[\"METHOD\"] != \"HEAD\":\r\n self._write(\"Transfer-Encoding: chunked\\r\\n\")\r\n self.is_chunked = True\r\n else:\r\n self.should_close = True\r\n self._write(\"\\r\\n\")",
"def end_headers(self):\n if self.request_version != 'HTTP/0.9':\n self._headers_buffer.append(b\"\\r\\n\")\n self.flush_headers()",
"def start_response_impl(self, state, res, hdr, exc_info):\n\t\tstate.result = res.split(\" \")[0]\n\n\t\t# Work out from the request environment what output format we\n\t\t# want to use, and select it\n\t\tstate.transformer = self.get_transformer(state.env)\n\n\t\t# Modify the existing headers: drop any content-type or\n\t\t# content-length headers\n\t\tnew_hdr = []\n\t\tfor name, value in hdr:\n\t\t\tlname = name.lower()\n\t\t\tif lname == \"content-type\":\n\t\t\t\tcontinue\n\t\t\tif lname == \"content-length\":\n\t\t\t\tcontinue\n\t\t\tnew_hdr.append((name, value))\n\n\t\t# Add in suitable headers for the transformed output\n\t\tstate.transformer.http_headers(new_hdr)\n\n\t\t# Continue with the original function call as if nothing has\n\t\t# happened\n\t\twrite = state.start_response(res, new_hdr)\n\t\tdef new_write(data):\n\t\t\tlog.error(\"Deprecated write function called! Data not written.\")\n\t\t\twrite(state.transformer.write(data))\n\n\t\treturn new_write",
"def start_response_wrapper(self,status,response_headers,exc_info=None):\n\t\tresponse_headers=response_headers+self.responseHeaders\n\t\treturn self.start_response(status,response_headers,exc_info)",
"def send_response(self, code, message=None):\n self.log_request(code)\n self.send_response_only(code, message)\n self.send_header('Server', self.version_string())\n self.send_header('Date', self.date_time_string())",
"def write(cgi_response):\n cgi_response = str(cgi_response)\n cgi_response.replace('\\r\\n', '\\n')\n head, body = cgi_response.split('\\n\\n', 1)\n lines = head.split('\\n')\n\n for line in lines:\n if line.isspace(): \n continue\n hdr, value = line.split(\":\", 1)\n value = value.strip()\n if hdr.lower() == \"status\": \n web.ctx.status = value\n else: \n web.header(hdr, value)\n\n web.output(body)",
"def passed(self):\n self.response['status'] = 'success'\n self.status_code = 200",
"def setHTTPHeaders(self, response, filename):\n response.setHeader('Content-Type',\n self.getDestinationFormat())",
"def test_httpresponse_pass_through(self):\n response = twilio_view(self.response_view)(self.request_post)\n self.assertTrue(isinstance(response, HttpResponse))",
"def test_response_without_notifications(self):\n request = http.HttpRequest()\n response = http.HttpResponse()\n self.middleware.process_response(request, response)",
"def ddHeaders(self):\n self.dumpRequestHeaders()\n self.dumpResponseHeaders()\n self.testcase.stop()",
"def _header_poster(self, url, rpath, fheaders):\r\n\r\n # perform Object POST request for header update.\r\n resp = http.post_request(url=url, rpath=rpath, headers=fheaders)\r\n self.resp_exception(resp=resp)\r\n\r\n report.reporter(\r\n msg='STATUS: %s MESSAGE: %s REASON: %s' % (resp.status_code,\r\n resp.request,\r\n resp.reason),\r\n prt=False,\r\n lvl='debug'\r\n )\r\n\r\n return resp.headers",
"def compose(self):\n header = 'HTTP/1.1 {code} {name}\\r\\n'.format(\n code=self.code, name=client.responses[self.code]\n )\n self.headers.update(self._default_headers)\n self.headers.update(\n Date=formatdate(timeval=None, localtime=False, usegmt=True)\n )\n if self.additional_headers:\n self.headers.update(self.additional_headers)\n for head, value in self.headers.items():\n header += '{}: {}\\r\\n'.format(head, value)\n return '{}\\r\\n{}'.format(header, self.body)",
"def run(self, url, environ):\n self._reset(url)\n try:\n self._process(url,environ)\n #environ.__iter__(4) #hack: test function to fall into the execpt below (for testing purposes)\n \n# if(isinstance(self.body, basestring)):\n# self.body = [self.body];\n# else: \n# try:\n# iterator = iter(self.body)\n# except TypeError:\n# # not iterable\n# return [\"\"]\n# #else:\n# # iterable: do nothing\n except:\n #NOTE: content-length does not seem to be mandatory, see\n #http://www.techques.com/question/1-6919182/Is-Content-length-the-only-way-to-know-when-the-HTTP-message-is-completely-received\n #As it involves more calculation, we omit if it is not retriavable without the risk of performance loss\n if CARAVAN_DEBUG:\n traceback.print_exc()\n self.headers = {} #re-init the dict\n self.headers['Content-Type'] = 'text/plain'\n strlen=0\n if environ[\"REQUEST_METHOD\"] == \"HEAD\":\n self.body = [\"\"]\n else:\n \n import StringIO\n output = StringIO.StringIO()\n output.write(\"A server error occurred.\") #message copied from what I got in in the browser in case of unexpected error\n if CARAVAN_DEBUG:\n output.write(\"\\n\")\n traceback.print_exc(file=output)\n #get string value (this is the part which has the best benefits over performances compared to strings):\n output_str = output.getvalue()\n #wrap the error message, set content length, go on...:\n self.body = [output_str]\n strlen = len(output_str)\n \n self.headers['Content-Length'] = str(strlen)\n self.status = ResponseHandler._status500;\n \n \n self.headers = list(self.headers.items()) #update headers into a list of tuples. Note that there exists the wsgiref.Headers class but it doesn't seem to be great...\n #Note on line above: Python3 converts to list the dict items(), which the new view of the dictionary's items ((key, value) pairs))\n #In python <3, copies the list the dict items(), which is already a list of (key, value) pairs.\n #The method above, although not entirely efficient in Python <3 (the list part could be removed) assures compatibility between Python versions.",
"def set_response(self):\n\t\tresponse = cherrypy.response\n\t\t\n\t\t#removes headers from original request\n\t\trespheaders = response.headers\n\t\tfor key in [\"Accept-Ranges\", \"Age\", \"ETag\", \"Location\", \"Retry-After\",\n\t\t\t\t\t\"Vary\", \"Content-Encoding\", \"Content-Length\",\"Content-Range\" , \"Expires\",\n\t\t\t\t\t\"Content-Location\", \"Content-MD5\", \"Last-Modified\"]:\n\t\t\tif respheaders.has_key(key):\n\t\t\t\tdel respheaders[key]\n\t\t\n\t\t#defines response json\n\t\tresponse.status = self.status\n\t\terror_body = {\"error\": {\"status\": self.status,\"message\": self.message}}\n\t\tif cherrypy.request.show_tracebacks and not self.status == 401:\n\t\t\terror_body[\"traceback\"] = format_exc()\n\t\t\n\t\tif self.status == 500 or self.status == 404:\n\t\t\terror_body = simplejson.dumps(error_body, indent=1)\n\t\t\trespheaders['Content-Length'] = len(error_body)\n\t\t\trespheaders['Content-Type'] = \"application/json\"\n\t\t\n\t\tresponse.body = error_body",
"def test_process_response_does_not_add_header_when_turned_off(self):\n response, _, _ = self.get_process_response()\n header = 'X-REQUEST-ID'\n\n self.assertNotIn(header, response)",
"def response_header(self, names: List[str]):\n self.__response_headers = names.copy()\n return self"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sendall for nonblocking sockets. | def sendall(self, data):
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise | [
"def _flush(self):\n\t\t\n\t\tfor element in self._writequeue:\n\t\t\tsize = len(element)\n\t\t\twhile size > 0:\n\t\t\t\ttry:\n\t\t\t\t\tsent = self._socket.send(element)\n\t\t\t\t\telement = element[sent:]\n\t\t\t\t\tsize -= sent\n\t\t\t\texcept socket.error, e:\n\t\t\t\t\tif e.errno == errno.EAGAIN:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\traise\n\t\t\n\t\tself._writequeue = []",
"def socket_send(self):\n if not self.send_ready():\n warnings.warn('socket_send() called on empty buffer',\n RuntimeWarning, 2)\n return 0\n ready_bytes = bytes(''.join(self.send_buffer))\n self.send_buffer = array.array('c')\n\n def send(send_bytes):\n \"\"\"\n throws x84.bbs.exception.Disconnected on sock.send err\n \"\"\"\n try:\n return self.sock.send(send_bytes)\n except socket.error as err:\n if err[0] == 11:\n warnings.warn('%s: %s (bandwidth exceed)' % (\n self.addrport(), err[1],), RuntimeWarning, 2)\n else:\n raise Disconnected(\n 'socket send %d: %s' % (err[0], err[1],))\n\n sent = send(ready_bytes)\n if sent < len(ready_bytes):\n # re-buffer data that could not be pushed to socket;\n self.send_buffer.fromstring(ready_bytes[sent:])\n else:\n # When a process has completed sending data to an NVT printer\n # and has no queued input from the NVT keyboard for further\n # processing (i.e., when a process at one end of a TELNET\n # connection cannot proceed without input from the other end),\n # the process must transmit the TELNET Go Ahead (GA) command.\n if (not self.input_ready()\n and self.check_local_option(SGA) is False\n and not self._check_reply_pending(SGA)):\n sent += send(bytes(''.join((IAC, GA))))\n return sent",
"def to_send_all(self):\n while self.has_to_send():\n yield self.to_send()",
"def _send_to_all(self,func,args=None):\n results = []\n with self.conLock:\n # Loop over a copy of self.connections (slice notation) to\n # encompass changes to itself, during the loop.\n if len(self.connections) == 0:\n raise BeanstalkcException('Pool is empty. Nothing sent.')\n for conn in self.connections[:]:\n try:\n results.append( (conn,self._call_wrap(conn,func,args)) )\n except SocketError as e:\n logging.error('beanstalkc-pool connection error in _send_to_all(). Skipping connection.')\n return results",
"def runTCP(self, sock):\n # connect to receiever, tls handshake\n sock.connect((self.recv_ip, self.recv_port))\n # continue to send massage until...\n\n for block in self.blocks:\n self.packetsSent += 1\n if (self.noise < random.random()):\n # send message to receiver at IP, PORT\n print((block))\n # print(pickle.loads(pickle.dumps(block)))\n sock.sendall(pickle.dumps(block))\n for _ in range(10): # send constant number of sentinals\n sock.sendto(pickle.dumps(None), (self.recv_ip, self.recv_port))",
"def __send_bytes(self, data):\n self.socket.sendall(data)",
"def send(self, data):\n self._socket.sendall(data)",
"def send_to_all(sockets: List[LengthSocket], packets):\n for packet in packets:\n for s in sockets:\n try:\n s.send(packet)\n except (Exception, socket.error) as e:\n print(f\"Could not send packets to {s}: {e}\")\n print(f\"Stopping transmit to {s}\")\n sockets.remove(s)",
"def flush(self):\n if not self.requests:\n return\n data = []\n while self.requests:\n request = self.requests.popleft()\n log.info(\"C: %s\", request.description)\n for offset in range(0, len(request.data), MAX_CHUNK_SIZE):\n end = offset + MAX_CHUNK_SIZE\n chunk = request.data[offset:end]\n data.append(raw_pack(UINT_16, len(chunk)))\n data.append(chunk)\n data.append(raw_pack(UINT_16, 0))\n self.socket.sendall(b\"\".join(data))",
"def send(self, buf):",
"async def start_sending(self):\n\n while self.is_connected:\n await asyncio.sleep(Peer.REQUEST_DELAY_NO_BLOCK)\n # print(\"Sending?\")\n if not self.peer_choking:\n # make block requests\n request_message = self.client.piece_manager.get_next_request(self)\n if request_message is not None:\n await self.send(request_message)",
"def send(self, message):\n self.socket.sendall(message.encode())",
"def try_send(s, packet):\n try:\n s.sendall(packet)\n\n except:\n print('Problem occurred while sending')\n sys.exit(1)",
"def runUDP(self, sock):\n # just send entire message without check for completeness\n for block in self.blocks:\n self.packetsSent += 1\n if (self.noise < random.random()):\n # send message to receiver at IP, PORT\n sock.sendto(pickle.dumps(block), (self.recv_ip, self.recv_port))\n sock.sendto(pickle.dumps(None), (self.recv_ip, self.recv_port))",
"def write_and_send(self, data):\r\n self.__my_socket.send_(data)\r\n self.recev()",
"def sendall(origin, msg):\n try:\n for cl in clientes:\n if cl != origin:\n clientes.get(cl)[2].send(('{\"event\": \"' + origin + msg + '\"}').encode())\n except Exception as e:\n print(\"SendAll error: \" + str(e))",
"def sendmany(self, messages):\n with self.lock_many:\n for i in messages:\n self.send(i)",
"def sock_send(self, data):\n\n self.sock.send(data)",
"def _send(self, data, newline=\"\\r\\n\", sock=None):\n self.outbuff.append(data+newline)\n for msg in self.outbuff:\n if self.verbose:\n print(\"<<< \"+msg)\n self.sock.send((msg+newline).encode(\"utf-8\"))",
"def send_packet():"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark the given socket fd as noninheritable (Windows). | def prevent_socket_inheritance(sock):
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError() | [
"def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def make_nonblocking(fp):\n if isinstance(fp, socket.socket):\n fp.setblocking(0)\n elif fcntl:\n flags = fcntl.fcntl(fp, fcntl.F_GETFL)\n flags |= os.O_NONBLOCK\n fcntl.fcntl(fp, fcntl.F_SETFL, flags)\n else:\n raise RuntimeError()",
"def setBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def _set_nonblock(self, stream):\n fdesc = stream.fileno()\n flags = fcntl.fcntl(fdesc, fcntl.F_GETFL)\n fcntl.fcntl(fdesc, fcntl.F_SETFL, flags | os.O_NONBLOCK)",
"def set_wakeup_fd(space, fd):\n if not space.threadlocals.signals_enabled():\n raise oefmt(space.w_ValueError,\n \"set_wakeup_fd only works in main thread or with \"\n \"__pypy__.thread.enable_signals()\")\n if fd != -1:\n try:\n os.fstat(fd)\n except OSError as e:\n if e.errno == errno.EBADF:\n raise oefmt(space.w_ValueError, \"invalid fd\")\n old_fd = pypysig_set_wakeup_fd(fd, True)\n return space.newint(intmask(old_fd))",
"def unregister(self, fd):\r\n self.read.discard(fd)\r\n self.write.discard(fd)\r\n self.error.discard(fd)",
"def skip_unless_bind_unix_socket(test):\n if not hasattr(socket, 'AF_UNIX'):\n return unittest.skip('No UNIX Sockets')(test)\n global _bind_nix_socket_error\n if _bind_nix_socket_error is None:\n from .os_helper import TESTFN, unlink\n path = TESTFN + \"can_bind_unix_socket\"\n with socket.socket(socket.AF_UNIX) as sock:\n try:\n sock.bind(path)\n _bind_nix_socket_error = False\n except OSError as e:\n _bind_nix_socket_error = e\n finally:\n unlink(path)\n if _bind_nix_socket_error:\n msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error\n return unittest.skip(msg)(test)\n else:\n return test",
"def patch_socket(autononous=True):\r\n \r\n if stacklessio:\r\n from stacklessio import _socket\r\n sys.modules[\"_socket\"] = _socket\r\n else:\r\n # Fallback on the generic 'stacklesssocket' module.\r\n if pyuv:\r\n from stacklesslib.replacements import socket_pyuv as socket\r\n else:\r\n from stacklesslib.replacements import socket_asyncore as socket\r\n\r\n socket._sleep_func = main.sleep\r\n socket._schedule_func = lambda: main.sleep(0)\r\n # If the user plans to pump themselves, disable auto-pumping.\r\n if not pyuv and not autononous:\r\n socket._manage_sockets_func = lambda: None\r\n socket.install()",
"def make_nonblocking ( f, nonblocking=True ):\n\n\timport os, fcntl\n\t\n\tflags = fcntl.fcntl ( f, fcntl.F_GETFL )\n\tif nonblocking:\n\t\tflags |= os.O_NONBLOCK\n\telse:\n\t\tflags &= ~os.O_NONBLOCK\n\tfcntl.fcntl( f, fcntl.F_SETFL, flags )",
"def testReadInterrupted(self):\n for version in [4, 5, 6]:\n family = {4: AF_INET, 5: AF_INET6, 6: AF_INET6}[version]\n s = net_test.UDPSocket(family)\n self.SelectInterface(s, random.choice(self.NETIDS), \"mark\")\n addr = self.GetRemoteAddress(version)\n\n # Check that reads on connected sockets are interrupted.\n s.connect((addr, 53))\n self.assertEquals(3, s.send(\"foo\"))\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)\n\n # A destroyed socket is no longer connected, but still usable.\n self.assertRaisesErrno(EDESTADDRREQ, s.send, \"foo\")\n self.assertEquals(3, s.sendto(\"foo\", (addr, 53)))\n\n # Check that reads on unconnected sockets are also interrupted.\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)",
"def rem_fd(self, fd):\n raise NotImplementedError('ReactorInterface.rem_fd method not'\\\n ' implemented.')",
"def is_socket(fd):\n file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)\n\n try:\n file_socket.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)\n except socket.error as ex:\n return ex.args[0] != errno.ENOTSOCK\n else:\n return True",
"def clear_fd(self):\n\n fcntl.ioctl(self.fd, self.LOOP_CLR_FD)",
"def patch_socket(dns=True, aggressive=True):\n from gevent import socket\n _socket = __import__('socket')\n _socket.socket = socket.socket\n _socket.SocketType = socket.SocketType\n _socket.create_connection = socket.create_connection\n if hasattr(socket, 'socketpair'):\n _socket.socketpair = socket.socketpair\n if hasattr(socket, 'fromfd'):\n _socket.fromfd = socket.fromfd\n try:\n from gevent.socket import ssl, sslerror\n _socket.ssl = ssl\n _socket.sslerror = sslerror\n except ImportError:\n if aggressive:\n try:\n del _socket.ssl\n except AttributeError:\n pass\n if dns:\n patch_dns()",
"def set_direct_io(self, dio=True):\n\n fcntl.ioctl(self.fd, self.LOOP_SET_DIRECT_IO, dio)",
"def lift_descriptor(self, descriptor):\n return UnboundAttribute(descriptor, self.owner)",
"def _collectSocketDetails(self):\n del self.socket, self.fileno",
"def bind_unix_socket(sock, addr):\n assert sock.family == socket.AF_UNIX\n try:\n sock.bind(addr)\n except PermissionError:\n sock.close()\n raise unittest.SkipTest('cannot bind AF_UNIX sockets')",
"def set_non_toggle_mask(self, non_toggle_mask):\n set_cmd = self._create_set_property_msg(\n \"_non_toggle_mask\", 0x08, non_toggle_mask\n )\n self._send_method(set_cmd, self._property_set)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark the given socket fd as noninheritable (POSIX). | def prevent_socket_inheritance(sock):
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) | [
"def prevent_socket_inheritance(sock):\r\n if not _SetHandleInformation(sock.fileno(), 1, 0):\r\n raise WinError()",
"def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def make_nonblocking(fp):\n if isinstance(fp, socket.socket):\n fp.setblocking(0)\n elif fcntl:\n flags = fcntl.fcntl(fp, fcntl.F_GETFL)\n flags |= os.O_NONBLOCK\n fcntl.fcntl(fp, fcntl.F_SETFL, flags)\n else:\n raise RuntimeError()",
"def setBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def set_wakeup_fd(space, fd):\n if not space.threadlocals.signals_enabled():\n raise oefmt(space.w_ValueError,\n \"set_wakeup_fd only works in main thread or with \"\n \"__pypy__.thread.enable_signals()\")\n if fd != -1:\n try:\n os.fstat(fd)\n except OSError as e:\n if e.errno == errno.EBADF:\n raise oefmt(space.w_ValueError, \"invalid fd\")\n old_fd = pypysig_set_wakeup_fd(fd, True)\n return space.newint(intmask(old_fd))",
"def skip_unless_bind_unix_socket(test):\n if not hasattr(socket, 'AF_UNIX'):\n return unittest.skip('No UNIX Sockets')(test)\n global _bind_nix_socket_error\n if _bind_nix_socket_error is None:\n from .os_helper import TESTFN, unlink\n path = TESTFN + \"can_bind_unix_socket\"\n with socket.socket(socket.AF_UNIX) as sock:\n try:\n sock.bind(path)\n _bind_nix_socket_error = False\n except OSError as e:\n _bind_nix_socket_error = e\n finally:\n unlink(path)\n if _bind_nix_socket_error:\n msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error\n return unittest.skip(msg)(test)\n else:\n return test",
"def _set_nonblock(self, stream):\n fdesc = stream.fileno()\n flags = fcntl.fcntl(fdesc, fcntl.F_GETFL)\n fcntl.fcntl(fdesc, fcntl.F_SETFL, flags | os.O_NONBLOCK)",
"def make_nonblocking ( f, nonblocking=True ):\n\n\timport os, fcntl\n\t\n\tflags = fcntl.fcntl ( f, fcntl.F_GETFL )\n\tif nonblocking:\n\t\tflags |= os.O_NONBLOCK\n\telse:\n\t\tflags &= ~os.O_NONBLOCK\n\tfcntl.fcntl( f, fcntl.F_SETFL, flags )",
"def patch_socket(autononous=True):\r\n \r\n if stacklessio:\r\n from stacklessio import _socket\r\n sys.modules[\"_socket\"] = _socket\r\n else:\r\n # Fallback on the generic 'stacklesssocket' module.\r\n if pyuv:\r\n from stacklesslib.replacements import socket_pyuv as socket\r\n else:\r\n from stacklesslib.replacements import socket_asyncore as socket\r\n\r\n socket._sleep_func = main.sleep\r\n socket._schedule_func = lambda: main.sleep(0)\r\n # If the user plans to pump themselves, disable auto-pumping.\r\n if not pyuv and not autononous:\r\n socket._manage_sockets_func = lambda: None\r\n socket.install()",
"def patch_socket(dns=True, aggressive=True):\n from gevent import socket\n _socket = __import__('socket')\n _socket.socket = socket.socket\n _socket.SocketType = socket.SocketType\n _socket.create_connection = socket.create_connection\n if hasattr(socket, 'socketpair'):\n _socket.socketpair = socket.socketpair\n if hasattr(socket, 'fromfd'):\n _socket.fromfd = socket.fromfd\n try:\n from gevent.socket import ssl, sslerror\n _socket.ssl = ssl\n _socket.sslerror = sslerror\n except ImportError:\n if aggressive:\n try:\n del _socket.ssl\n except AttributeError:\n pass\n if dns:\n patch_dns()",
"def unregister(self, fd):\r\n self.read.discard(fd)\r\n self.write.discard(fd)\r\n self.error.discard(fd)",
"def bind_unix_socket(sock, addr):\n assert sock.family == socket.AF_UNIX\n try:\n sock.bind(addr)\n except PermissionError:\n sock.close()\n raise unittest.SkipTest('cannot bind AF_UNIX sockets')",
"def rem_fd(self, fd):\n raise NotImplementedError('ReactorInterface.rem_fd method not'\\\n ' implemented.')",
"def is_socket(fd):\n file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)\n\n try:\n file_socket.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)\n except socket.error as ex:\n return ex.args[0] != errno.ENOTSOCK\n else:\n return True",
"def bind_unix_socket(\n file: str, mode: int = 0o600, backlog: int = _DEFAULT_BACKLOG\n ) -> socket.socket:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except socket.error as e:\n if errno_from_exception(e) != errno.ENOPROTOOPT:\n # Hurd doesn't support SO_REUSEADDR\n raise\n sock.setblocking(False)\n try:\n st = os.stat(file)\n except FileNotFoundError:\n pass\n else:\n if stat.S_ISSOCK(st.st_mode):\n os.remove(file)\n else:\n raise ValueError(\"File %s exists and is not a socket\", file)\n sock.bind(file)\n os.chmod(file, mode)\n sock.listen(backlog)\n return sock",
"def set_nonmaskable_callback(self, key_name, callback, silent=False):\n self.check_special_callback(key_name)\n if not silent:\n self.sanity_check_cb(key_name, callback)\n self.nonmaskable_keymap[key_name] = callback",
"def testReadInterrupted(self):\n for version in [4, 5, 6]:\n family = {4: AF_INET, 5: AF_INET6, 6: AF_INET6}[version]\n s = net_test.UDPSocket(family)\n self.SelectInterface(s, random.choice(self.NETIDS), \"mark\")\n addr = self.GetRemoteAddress(version)\n\n # Check that reads on connected sockets are interrupted.\n s.connect((addr, 53))\n self.assertEquals(3, s.send(\"foo\"))\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)\n\n # A destroyed socket is no longer connected, but still usable.\n self.assertRaisesErrno(EDESTADDRREQ, s.send, \"foo\")\n self.assertEquals(3, s.sendto(\"foo\", (addr, 53)))\n\n # Check that reads on unconnected sockets are also interrupted.\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)",
"def mark_as_unmanaged(self):\n fname = self.unix_path.split('/')[-1]\n self.unix_path = self.unix_path[: -len(fname)] + 'unmanage-' + fname",
"def _set_server_mode_faulty(server, mode):\n allowed_mode = ()\n _do_set_server_mode(server, mode, allowed_mode)",
"def _setup_unix(self, bind, basedir=None):\n if AF_UNIX is None:\n raise ConfigurationError(\"UNIX domain sockets are not available\")\n\n obind = repr(bind(0))\n if bind(u'perm'):\n try:\n socket_perm = int(bind('perm'), 8)\n except (TypeError, ValueError):\n raise ConfigurationError(\"Invalid permission\")\n umask = 0777 & ~socket_perm\n else:\n umask = None\n basedir = basedir or _os.getcwd()\n if not isinstance(basedir, unicode):\n basedir = basedir.decode(_sys.getfilesystemencoding())\n path = _os.path.normpath(_os.path.join(\n basedir, bind(u'path')\n )).encode(_sys.getfilesystemencoding())\n socket = _socket.socket(AF_UNIX, _socket.SOCK_STREAM)\n self._sockets.append(UnixSocket(socket, obind, path, umask))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an SSL adapter class for the given name. | def get_ssl_adapter_class(name='pyopenssl'):
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter | [
"def get_ssl_adapter_class(name='builtin'):\r\n adapter = ssl_adapters[name.lower()]\r\n if isinstance(adapter, basestring):\r\n last_dot = adapter.rfind(\".\")\r\n attr_name = adapter[last_dot + 1:]\r\n mod_path = adapter[:last_dot]\r\n\r\n try:\r\n mod = sys.modules[mod_path]\r\n if mod is None:\r\n raise KeyError()\r\n except KeyError:\r\n # The last [''] is important.\r\n mod = __import__(mod_path, globals(), locals(), [''])\r\n\r\n # Let an AttributeError propagate outward.\r\n try:\r\n adapter = getattr(mod, attr_name)\r\n except AttributeError:\r\n raise AttributeError(\"'%s' object has no attribute '%s'\"\r\n % (mod_path, attr_name))\r\n\r\n return adapter",
"def get_adapter(self, name):\n if isinstance(name, Adapter):\n if name.name in self._adapters:\n if self._adapters[name.name] == name:\n return name\n if name in self._adapters:\n return self._adapters[name]",
"def get_driver_adapter(driver_name):\n try:\n driver_adapter = _ADAPTERS[driver_name]\n except KeyError:\n raise ValueError(f\"Encountered unregistered driver_name: {driver_name}\")\n\n return driver_adapter()",
"def nameToClass(name: str) -> Any:\n return _registered_classes.get(name)",
"def get_strategy(cls, name, ns=None):\n ns = ns or cls.__strategy_ns__\n if ns is None:\n raise RuntimeError(\n _('No namespace provided and __strategy_ns__ unset'))\n\n LOG.debug('Looking for strategy %s in %s', name, ns)\n\n return utils.import_class(ns + \".\" + name)",
"def get_by_name(name):\n return database.get(Certificate, name, field='name')",
"def get_encoder_by_name(name, board_size):\n if isinstance(board_size, int):\n board_size = (board_size, board_size)\n module = importlib.import_module('dlgo.encoders.' + name)\n constructor = getattr(module, 'create')\n return constructor(board_size)",
"def from_name(name):\n Strategy = possible_writers[name]\n if Strategy.isfunctional():\n strategy = Strategy()\n strategy.name = name\n return strategy\n raise NonFunctionalStrategy(name, Strategy.hint)",
"def get_detector_class(name=\"webcheck4\"):\n return find_class(name, detector_classes())",
"def get_collection_class(cls, name):\n try:\n return cls.collection_classes[name]\n except KeyError:\n raise KeyError(f\"There is no Collection Class of type: '{name}'; currently supported values: [{', '.join(get_collection_classes().keys())}]\")",
"def get_allowed_protocol_by_name(self,\n name,\n headers=None,\n **query_parameters):\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n check_type(name, basestring,\n may_be_none=False)\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n 'name': name,\n }\n\n e_url = ('/ers/config/allowedprotocols/name/{name}')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n _api_response = self._session.get(endpoint_full_url, params=_params,\n headers=_headers)\n else:\n _api_response = self._session.get(endpoint_full_url, params=_params)\n\n return self._object_factory('bpm_ac8c8cb9b5007a1e1a6434a20a881_v3_0_0', _api_response)",
"def get_adapter(self, url):\n for (prefix, adapter) in self.adapters.items():\n\n if url.lower().startswith(prefix):\n return adapter\n\n # Nothing matches :-/\n #raise InvalidSchema(\"No connection adapters were found for '%s'\" % url)",
"def get_layer_by_name(name):\n if name == ConvLayer.__name__:\n return ConvLayer\n elif name == DepthConvLayer.__name__:\n return DepthConvLayer\n elif name == PoolingLayer.__name__:\n return PoolingLayer\n elif name == IdentityLayer.__name__:\n return IdentityLayer\n elif name == LinearLayer.__name__:\n return LinearLayer\n else:\n raise ValueError('unrecognized layer: %s' % name)",
"def get_class_from_config(key, config):\n try:\n return get_class_by_name(config[key])\n except Exception, e:\n raise AttributeError(\n \"Could not get class '%s' for Auth setting '%s' >> %s\" % \n (config[key], key, e))",
"def GetConfigWriterClass(name):\n try:\n return _CONFIG_DATA_TYPES[name]\n except KeyError:\n msg = \"Unknown configuration storage type: %r\" % name\n raise errors.ConfigurationError(msg)",
"def __call__(self, adaptee):\n\n adapter = self._adapter_cache.get(adaptee, None)\n if adapter is None:\n adapter = self.factory(adaptee)\n self._adapter_cache[adaptee] = adapter\n\n return adapter",
"def get_driver_by_name(driver_name: str) -> Driver:\n driver = DriverName[driver_name]\n return get_driver(driver)",
"def importPyDeviceClass(cls,name):\n name = name.upper()\n py_devices = cls.findPyDevices()\n if name in py_devices:\n return py_devices[name]\n raise _exc.DevPYDEVICE_NOT_FOUND",
"def get_default_adapter(info, config):\n target_adapter_name = f'{config.model.name}Adapter'\n for module in ADAPTER_MODULES:\n if hasattr(module, target_adapter_name):\n adapter_cls = getattr(module, target_adapter_name)\n return adapter_cls(info, config)\n\n # Fall back on a common adapter.\n return common_adapters.SequenceAdapter(info, config)",
"def get_component_class(name):\n return _COMPONENT_CLASSES[name]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names. | def plat_specific_errors(*errnames):
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys()) | [
"def listErrorCodes(errno=None):\n\n\tif errno is None:\n\t\tfor i in range(MinErrorNo, (MaxErrorNo+1)):\n\t\t\tlistErrorCodes(errno=i)\n\telse:\n\t\tif errno == 1:\n\t\t\tprint \"1: End of file encountered during filehandle read\"\n\t\telif errno == 2:\n\t\t\tprint \"2: End of file encountered during numpy.fromfile call\"\n\t\telif errno == 3:\n\t\t\tprint \"3: Mark 5C sync word differs from expected\"\n\t\telif errno == 4:\n\t\t\tprint \"4: Data appears to be TBW, not TBN as expected\"\n\t\telif errno == 5:\n\t\t\tprint \"5: Data appears to be TBN, not TBW as expected\"\n\t\telse:\n\t\t\tprint \"Unknown error code '%i'\" % errno",
"def test_plat_specific_errors(err_names, err_nums):\n actual_err_nums = errors.plat_specific_errors(*err_names)\n assert len(actual_err_nums) == len(err_nums)\n assert sorted(actual_err_nums) == sorted(err_nums)",
"def __errcode_names(cls, err_vars):\n errcode = None\n errmsg = None\n for evar in err_vars:\n stdname = evar.get_prop_value('standard_name')\n if stdname == 'ccpp_error_code':\n errcode = evar.get_prop_value('local_name')\n elif stdname == 'ccpp_error_message':\n errmsg = evar.get_prop_value('local_name')\n else:\n emsg = \"Bad errcode variable, '{}'\"\n raise ParseInternalError(emsg.format(stdname))\n # end if\n # end for\n if (not errcode) or (not errmsg):\n raise ParseInternalError(\"Unsupported error scheme\")\n # end if\n return errcode, errmsg",
"def get_all_errs(self):\n thiserr = self.get_err()\n errors = []\n while thiserr != '+0,\"No error\"':\n thiserr = self.get_err()\n errors.append(thiserr)\n return errors",
"async def read_errors(self) -> list[int]:\n last_5_errors = await self.create_and_send_command(ERRORS)\n logger.debug(f\"Error reading returns {last_5_errors}\")\n return [int(err_code) for err_code in last_5_errors.split(\",\")]",
"def get_socket_conn_refused_errs():\n errors = [errno.ECONNREFUSED]\n if hasattr(errno, 'ENETUNREACH'):\n # On Solaris, ENETUNREACH is returned sometimes instead of ECONNREFUSED\n errors.append(errno.ENETUNREACH)\n if hasattr(errno, 'EADDRNOTAVAIL'):\n # bpo-31910: socket.create_connection() fails randomly\n # with EADDRNOTAVAIL on Travis CI\n errors.append(errno.EADDRNOTAVAIL)\n if hasattr(errno, 'EHOSTUNREACH'):\n # bpo-37583: The destination host cannot be reached\n errors.append(errno.EHOSTUNREACH)\n if not IPV6_ENABLED:\n errors.append(errno.EAFNOSUPPORT)\n return errors",
"def library_errors():\n ret = quick_library_check()\n return ret[_ERRORS]",
"def get_errno(e):\n try:\n return e.errno\n except AttributeError:\n return e.args[0]",
"def get_errors(self):\n result = []\n for error in self.errors:\n result.append(os.path.basename(error[0]) +\n ':\\n ' + str(error[1]) + '\\n')\n return result",
"def get_err_counter(self, name):\n return sum(self.get_counter(name))",
"def is_errno(error, errnos):\n\n if not isinstance(error, EnvironmentError):\n return False\n\n if isinstance(errnos, collections.Iterable):\n return error.errno in errnos\n else:\n return error.errno == errnos",
"def all_error_type(self):\n all_count_error_type = []\n for i in range(self.size):\n d = dict()\n for et in ErrorType:\n d[et] = 0\n all_count_error_type.append(d)\n for t in self.multi_alignment_tokens:\n error_type_list = t.error_type\n\n for (M, error_type) in enumerate(error_type_list):\n all_count_error_type[M][error_type] += 1\n return all_count_error_type\n\n # print(all_count_error_type)",
"def errors(self):\n return [thread.err for thread in self._threads]",
"def list_error_types():\n return list(sorted(ApiError.subtypes(), key=lambda e: e.error_type))",
"def get_errors(self, value):\n return list(self.errors(value))",
"def GetErrors(self):\n return error_check.GetErrors(self.GetData(), self.edid_version)",
"def errno_from_exception(e):\n\n\tif hasattr(e, 'errno'):\n\t\treturn e.errno\n\telif e.args:\n\t\treturn e.args[0]\n\telse:\n\t\treturn None",
"def calculate_errors(residuals):\n num_residuals = len(residuals)\n mfe = (residuals.sum() / num_residuals).tolist()[0]\n mae = (residuals.abs().sum() / num_residuals).tolist()[0]\n rmse = (residuals.pow(2).sum().pow(0.5)).tolist()[0]\n residuals = residuals.values\n residuals = [value.item() for value in residuals]\n return mfe, mae, rmse",
"def errors(self, cluster: str, namespace: str) -> list[str]:\n return self._errors.setdefault(cluster, {}).setdefault(namespace, [])",
"def getErrorIndices(self):\n\n errors = set()\n\n for const in self.constraints:\n errorRows = const.eval(self.df)\n for col in const.getColumnList(self.df):\n for row in errorRows:\n errors.add((row, col))\n\n return errors"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a RequestURI into (scheme, authority, path). | def parse_request_uri(self, uri):
if uri == ASTERISK:
return None, None, uri
scheme, sep, remainder = uri.partition(b'://')
if sep and QUESTION_MARK not in scheme:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
authority, path_a, path_b = remainder.partition(FORWARD_SLASH)
return scheme.lower(), authority, path_a+path_b
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None | [
"def _url_parse(uri):\n host = \"\"\n path = \"\"\n\n p_uri = urlparse(uri)\n host = p_uri.netloc\n path = p_uri.path.rstrip('/').strip('/')\n\n return (host,path)",
"def split_path(uri):\n parsed = urlparse(uri)\n return parsed.path, parsed.query, parsed.fragment",
"def parseURI(self, uri):\n match = re.match(self.regex, uri, self.regexFlags)\n if not match:\n raise InvalidURIException(\"Invalid URI: %r\" % uri)\n return match.groupdict()",
"def parse_path(self, path):\n\n parsed = urllib.parse.urlparse(path)\n return parsed.path, urllib.parse.parse_qs(parsed.query)",
"def parse_path(r):\n m = _REQUEST_RE.search(r)\n return m.groupdict().get(\"path\") if m else \"\"",
"def parseCheckoutUri(uri):\n\n\t# Attempt to parse a URI with a + in it. ex: hg+http://blah\n\t# If it doesn't find the 'type' it should extract 'real_uri' and 'rev'\n\tm = re.match(r'^((?P<type>\\w*)\\+)?(?P<realUri>.+?)(#(?P<rev>.+))?$', uri)\n\tif not m or not m.group('realUri'):\n\t\tsysExit(\"Couldn't parse repository URI \" + uri)\n\n\turiType = m.group('type')\n\trealUri = m.group('realUri')\n\trev = m.group('rev')\n\n\t# Attempt to parse a URI without a + in it. ex: svn://blah\n\tif not uriType:\n\t\tm = re.match(r'^(\\w*).*$', realUri)\n\t\tif m:\n\t\t\turiType = m.group(1)\n\n\tif not uriType:\n\t\tsysExit(\"Couldn't parse repository type from URI \" + realUri)\n\n\treturn (uriType, realUri, rev)",
"def pathfromuri(uri):\n\n\t\taddress = urlsplit(uri)\n\t\tbase = address.netloc\n\n\t\tif address.path != '':\n\n\t\t\t# remove first slash\n\t\t\tif base == '' and address.path[0:1] == '/':\n\t\t\t\tpath = address.path[1:]\n\t\t\telse:\n\t\t\t\tpath = address.path\n\n\t\t\t# don't underscore a directory type path\n\t\t\tif path[-1] == '/':\n\t\t\t\tpath = re.sub('/', '_', path[:-1])\n\t\t\telse:\n\t\t\t\tpath = re.sub('/', '_', path)\n\n\t\t\tbase += path\n\n\t\tif address.query != '':\n\t\t\tquery = re.sub('&', '-', address.query)\n\t\t\tbase += '+' + query\n\n\t\treturn base",
"def parseURI(url):\n\thostport = url.split(':')\n\thost = hostport[0] if hostport[0] != 'localhost' else socket.gethostname()\n\treturn host, hostport[1] if len(hostport) > 1 else '80'",
"def SplitUriRef(uriref):\r\n # the pattern will match every possible string, so it's safe to\r\n # assume there's a groupdict method to call.\r\n g = SPLIT_URI_REF_PATTERN.match(uriref).groupdict()\r\n scheme = g['scheme']\r\n authority = g['authority']\r\n path = g['path']\r\n query = g['query']\r\n fragment = g['fragment']\r\n return (scheme, authority, path, query, fragment)",
"def parse_backend_uri(backend_uri):\n if backend_uri.find(':') == -1:\n raise InvalidCacheBackendError(\"Backend URI must start with scheme://\")\n scheme, rest = backend_uri.split(':', 1)\n if not rest.startswith('//'):\n raise InvalidCacheBackendError(\"Backend URI must start with scheme://\")\n\n host = rest[2:]\n qpos = rest.find('?')\n\n if qpos != -1:\n params = dict(parse_qsl(rest[qpos + 1:]))\n host = rest[2:qpos]\n else:\n params = {}\n\n if host.endswith('/'):\n host = host[:-1]\n\n return scheme, host, params",
"def urlparse(self, url, scheme='', allow_fragments=True):\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url, scheme, allow_fragments)\n self._scheme=scheme\n self._netloc=netloc\n self._path=path\n self._params=params\n self._query=query\n self._fragment=fragment\n return scheme, netloc, path, params, query, fragment",
"def SplitUriRef(uriref):\n # the pattern will match every possible string, so it's safe to\n # assume there's a groupdict method to call.\n g = SPLIT_URI_REF_PATTERN.match(uriref).groupdict()\n scheme = g['scheme']\n authority = g['authority']\n path = g['path']\n query = g['query']\n fragment = g['fragment']\n return (scheme, authority, path, query, fragment)",
"def urlparse(url):\n result = {} \n status = 0\n mark = 0\n remain = None \n for i, c in enumerate(url): \n #not enough\n if i < mark:\n continue\n\n #optimization for letters\n if c in letters:\n continue\n \n #handle delimiters\n if c == \":\": \n if url[i: i+3] == \"://\":\n status = 1\n result[\"scheme\"] = url[:i]\n mark = i + 2 \n remain = \"host\" \n else: \n #host:port\n if url[i+1].isdigit():\n #next port\n result[\"host\"] = url[mark:i] \n status = 4 \n remain = \"port\"\n #user\n else: \n result[\"user\"] = url[mark:i] \n #next password\n status = 2 \n remain = \"password\"\n\n elif c == \"/\": \n if status >= 5: \n continue\n #host:port, for port\n if status in (0, 1, 3):\n result[\"host\"] = url[mark:i] \n if status == 4:\n result[\"port\"] = url[mark:i] \n #next possible \"path\"\n remain = \"path\" \n status = 5 \n elif c == \"@\": \n if status != 2:\n #user@host\n result[\"user\"] = url[mark:i] \n #user:password@host\n else:\n result[\"password\"] = url[mark:i] \n #next possible \"host\"\n remain = \"host\"\n status = 3 \n\n elif c in \";?#\":\n #path\n if status == 5:\n result[\"path\"] = url[mark:i] \n status = 6 \n #params\n elif status == 6:\n result[\"params\"] = url[mark:i] \n status = 7\n #query\n elif status == 7:\n result[\"query\"] = url[mark:i] \n status = 8\n #frag\n elif status == 8: \n result[\"fragment\"] = url[mark:i] \n status = 9 \n #skip normal char\n else: \n continue\n\n if c == \";\":\n #next params \n remain = \"params\"\n status = 6\n\n elif c == \"?\":\n #next query\n remain = \"query\"\n status = 7\n\n elif c == \"#\":\n remain = \"fragment\"\n status = 8 \n\n if mark < i:\n mark = i + 1\n else:\n mark += 1\n #host.com \n if not status:\n result[\"host\"] = url\n else:\n if mark < len(url):\n result[remain] = url[mark:]\n result.setdefault(\"path\", \"/\")\n return result",
"def parse(url_str):\r\n url_str = to_unicode(url_str)\r\n\r\n result = urlparse(url_str)\r\n netloc_parts = result.netloc.split('@')\r\n if len(netloc_parts) == 1:\r\n username = password = None\r\n host = netloc_parts[0]\r\n else:\r\n username, password = netloc_parts[0].split(':')\r\n host = netloc_parts[1]\r\n\r\n if host and ':' in host:\r\n host = host.split(':')[0]\r\n\r\n return {'host': host,\r\n 'username': username,\r\n 'password': password,\r\n 'scheme': result.scheme,\r\n 'port': result.port,\r\n 'path': result.path,\r\n 'query': result.query,\r\n 'fragment': result.fragment}",
"def urlunparse(parts):\n scheme, netloc, path, params, query, fragment = parts\n\n # Avoid encoding the windows drive letter colon\n if RE_DRIVE_LETTER_PATH.match(path):\n quoted_path = path[:3] + parse.quote(path[3:])\n else:\n quoted_path = parse.quote(path)\n\n return parse.urlunparse((\n parse.quote(scheme),\n parse.quote(netloc),\n quoted_path,\n parse.quote(params),\n parse.quote(query),\n parse.quote(fragment)\n ))",
"def parse_uri(self, uri):\n return self.parse(HTTPCache(uri).content())",
"def parseURI(self,url):\n addr = \"\"\n parts = []\n ip = False\n parts = url.split('/')\n #extract ip address with port\n if(len(parts)>2):\n addr = parts[2] #this contains X.X.X.X:PORT\n else:\n addr = parts[0] #it is possible the mtURL is \"X.X.X.X:PORT/\" (no http), then parts[0] will still be X.X.X.X:PORT\n # extract the ip address \n addr = addr.split(':')\n if(len(addr)>1):\n ip = addr[0]\n port = addr[1]\n else:\n ip = False\n port = False\n return ip, port",
"def parse_for_base_url(url: str) -> str:\n parsed = urlparse(url)\n parsed = (parsed.netloc + parsed.path).rstrip(\"/\")\n return parsed",
"def url_parser(url):\r\n if url.startswith(URL_SCHEMES):\r\n return url\r\n else:\r\n return 'https://' + url",
"def parse_address(addr, strict=False):\n if not isinstance(addr, six.string_types):\n raise TypeError(\"expected str, got %r\" % addr.__class__.__name__)\n scheme, sep, loc = addr.rpartition(\"://\")\n if strict and not sep:\n msg = (\n \"Invalid url scheme. \"\n \"Must include protocol like tcp://localhost:8000. \"\n \"Got %s\" % addr\n )\n raise ValueError(msg)\n if not sep:\n scheme = DEFAULT_SCHEME\n return scheme, loc"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark the given socket fd as noninheritable (Windows). | def prevent_socket_inheritance(sock):
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError() | [
"def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def make_nonblocking(fp):\n if isinstance(fp, socket.socket):\n fp.setblocking(0)\n elif fcntl:\n flags = fcntl.fcntl(fp, fcntl.F_GETFL)\n flags |= os.O_NONBLOCK\n fcntl.fcntl(fp, fcntl.F_SETFL, flags)\n else:\n raise RuntimeError()",
"def setBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def _set_nonblock(self, stream):\n fdesc = stream.fileno()\n flags = fcntl.fcntl(fdesc, fcntl.F_GETFL)\n fcntl.fcntl(fdesc, fcntl.F_SETFL, flags | os.O_NONBLOCK)",
"def set_wakeup_fd(space, fd):\n if not space.threadlocals.signals_enabled():\n raise oefmt(space.w_ValueError,\n \"set_wakeup_fd only works in main thread or with \"\n \"__pypy__.thread.enable_signals()\")\n if fd != -1:\n try:\n os.fstat(fd)\n except OSError as e:\n if e.errno == errno.EBADF:\n raise oefmt(space.w_ValueError, \"invalid fd\")\n old_fd = pypysig_set_wakeup_fd(fd, True)\n return space.newint(intmask(old_fd))",
"def unregister(self, fd):\r\n self.read.discard(fd)\r\n self.write.discard(fd)\r\n self.error.discard(fd)",
"def skip_unless_bind_unix_socket(test):\n if not hasattr(socket, 'AF_UNIX'):\n return unittest.skip('No UNIX Sockets')(test)\n global _bind_nix_socket_error\n if _bind_nix_socket_error is None:\n from .os_helper import TESTFN, unlink\n path = TESTFN + \"can_bind_unix_socket\"\n with socket.socket(socket.AF_UNIX) as sock:\n try:\n sock.bind(path)\n _bind_nix_socket_error = False\n except OSError as e:\n _bind_nix_socket_error = e\n finally:\n unlink(path)\n if _bind_nix_socket_error:\n msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error\n return unittest.skip(msg)(test)\n else:\n return test",
"def patch_socket(autononous=True):\r\n \r\n if stacklessio:\r\n from stacklessio import _socket\r\n sys.modules[\"_socket\"] = _socket\r\n else:\r\n # Fallback on the generic 'stacklesssocket' module.\r\n if pyuv:\r\n from stacklesslib.replacements import socket_pyuv as socket\r\n else:\r\n from stacklesslib.replacements import socket_asyncore as socket\r\n\r\n socket._sleep_func = main.sleep\r\n socket._schedule_func = lambda: main.sleep(0)\r\n # If the user plans to pump themselves, disable auto-pumping.\r\n if not pyuv and not autononous:\r\n socket._manage_sockets_func = lambda: None\r\n socket.install()",
"def make_nonblocking ( f, nonblocking=True ):\n\n\timport os, fcntl\n\t\n\tflags = fcntl.fcntl ( f, fcntl.F_GETFL )\n\tif nonblocking:\n\t\tflags |= os.O_NONBLOCK\n\telse:\n\t\tflags &= ~os.O_NONBLOCK\n\tfcntl.fcntl( f, fcntl.F_SETFL, flags )",
"def testReadInterrupted(self):\n for version in [4, 5, 6]:\n family = {4: AF_INET, 5: AF_INET6, 6: AF_INET6}[version]\n s = net_test.UDPSocket(family)\n self.SelectInterface(s, random.choice(self.NETIDS), \"mark\")\n addr = self.GetRemoteAddress(version)\n\n # Check that reads on connected sockets are interrupted.\n s.connect((addr, 53))\n self.assertEquals(3, s.send(\"foo\"))\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)\n\n # A destroyed socket is no longer connected, but still usable.\n self.assertRaisesErrno(EDESTADDRREQ, s.send, \"foo\")\n self.assertEquals(3, s.sendto(\"foo\", (addr, 53)))\n\n # Check that reads on unconnected sockets are also interrupted.\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)",
"def rem_fd(self, fd):\n raise NotImplementedError('ReactorInterface.rem_fd method not'\\\n ' implemented.')",
"def is_socket(fd):\n file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)\n\n try:\n file_socket.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)\n except socket.error as ex:\n return ex.args[0] != errno.ENOTSOCK\n else:\n return True",
"def clear_fd(self):\n\n fcntl.ioctl(self.fd, self.LOOP_CLR_FD)",
"def patch_socket(dns=True, aggressive=True):\n from gevent import socket\n _socket = __import__('socket')\n _socket.socket = socket.socket\n _socket.SocketType = socket.SocketType\n _socket.create_connection = socket.create_connection\n if hasattr(socket, 'socketpair'):\n _socket.socketpair = socket.socketpair\n if hasattr(socket, 'fromfd'):\n _socket.fromfd = socket.fromfd\n try:\n from gevent.socket import ssl, sslerror\n _socket.ssl = ssl\n _socket.sslerror = sslerror\n except ImportError:\n if aggressive:\n try:\n del _socket.ssl\n except AttributeError:\n pass\n if dns:\n patch_dns()",
"def set_direct_io(self, dio=True):\n\n fcntl.ioctl(self.fd, self.LOOP_SET_DIRECT_IO, dio)",
"def lift_descriptor(self, descriptor):\n return UnboundAttribute(descriptor, self.owner)",
"def _collectSocketDetails(self):\n del self.socket, self.fileno",
"def bind_unix_socket(sock, addr):\n assert sock.family == socket.AF_UNIX\n try:\n sock.bind(addr)\n except PermissionError:\n sock.close()\n raise unittest.SkipTest('cannot bind AF_UNIX sockets')",
"def set_non_toggle_mask(self, non_toggle_mask):\n set_cmd = self._create_set_property_msg(\n \"_non_toggle_mask\", 0x08, non_toggle_mask\n )\n self._send_method(set_cmd, self._property_set)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Mark the given socket fd as noninheritable (POSIX). | def prevent_socket_inheritance(sock):
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) | [
"def prevent_socket_inheritance(sock):\r\n if not _SetHandleInformation(sock.fileno(), 1, 0):\r\n raise WinError()",
"def _set_non_blocking(fd):\n import fcntl\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags | os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def make_nonblocking(fp):\n if isinstance(fp, socket.socket):\n fp.setblocking(0)\n elif fcntl:\n flags = fcntl.fcntl(fp, fcntl.F_GETFL)\n flags |= os.O_NONBLOCK\n fcntl.fcntl(fp, fcntl.F_SETFL, flags)\n else:\n raise RuntimeError()",
"def setBlocking(fd):\n flags = fcntl.fcntl(fd, fcntl.F_GETFL)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)",
"def set_wakeup_fd(space, fd):\n if not space.threadlocals.signals_enabled():\n raise oefmt(space.w_ValueError,\n \"set_wakeup_fd only works in main thread or with \"\n \"__pypy__.thread.enable_signals()\")\n if fd != -1:\n try:\n os.fstat(fd)\n except OSError as e:\n if e.errno == errno.EBADF:\n raise oefmt(space.w_ValueError, \"invalid fd\")\n old_fd = pypysig_set_wakeup_fd(fd, True)\n return space.newint(intmask(old_fd))",
"def skip_unless_bind_unix_socket(test):\n if not hasattr(socket, 'AF_UNIX'):\n return unittest.skip('No UNIX Sockets')(test)\n global _bind_nix_socket_error\n if _bind_nix_socket_error is None:\n from .os_helper import TESTFN, unlink\n path = TESTFN + \"can_bind_unix_socket\"\n with socket.socket(socket.AF_UNIX) as sock:\n try:\n sock.bind(path)\n _bind_nix_socket_error = False\n except OSError as e:\n _bind_nix_socket_error = e\n finally:\n unlink(path)\n if _bind_nix_socket_error:\n msg = 'Requires a functional unix bind(): %s' % _bind_nix_socket_error\n return unittest.skip(msg)(test)\n else:\n return test",
"def _set_nonblock(self, stream):\n fdesc = stream.fileno()\n flags = fcntl.fcntl(fdesc, fcntl.F_GETFL)\n fcntl.fcntl(fdesc, fcntl.F_SETFL, flags | os.O_NONBLOCK)",
"def make_nonblocking ( f, nonblocking=True ):\n\n\timport os, fcntl\n\t\n\tflags = fcntl.fcntl ( f, fcntl.F_GETFL )\n\tif nonblocking:\n\t\tflags |= os.O_NONBLOCK\n\telse:\n\t\tflags &= ~os.O_NONBLOCK\n\tfcntl.fcntl( f, fcntl.F_SETFL, flags )",
"def patch_socket(autononous=True):\r\n \r\n if stacklessio:\r\n from stacklessio import _socket\r\n sys.modules[\"_socket\"] = _socket\r\n else:\r\n # Fallback on the generic 'stacklesssocket' module.\r\n if pyuv:\r\n from stacklesslib.replacements import socket_pyuv as socket\r\n else:\r\n from stacklesslib.replacements import socket_asyncore as socket\r\n\r\n socket._sleep_func = main.sleep\r\n socket._schedule_func = lambda: main.sleep(0)\r\n # If the user plans to pump themselves, disable auto-pumping.\r\n if not pyuv and not autononous:\r\n socket._manage_sockets_func = lambda: None\r\n socket.install()",
"def patch_socket(dns=True, aggressive=True):\n from gevent import socket\n _socket = __import__('socket')\n _socket.socket = socket.socket\n _socket.SocketType = socket.SocketType\n _socket.create_connection = socket.create_connection\n if hasattr(socket, 'socketpair'):\n _socket.socketpair = socket.socketpair\n if hasattr(socket, 'fromfd'):\n _socket.fromfd = socket.fromfd\n try:\n from gevent.socket import ssl, sslerror\n _socket.ssl = ssl\n _socket.sslerror = sslerror\n except ImportError:\n if aggressive:\n try:\n del _socket.ssl\n except AttributeError:\n pass\n if dns:\n patch_dns()",
"def unregister(self, fd):\r\n self.read.discard(fd)\r\n self.write.discard(fd)\r\n self.error.discard(fd)",
"def bind_unix_socket(sock, addr):\n assert sock.family == socket.AF_UNIX\n try:\n sock.bind(addr)\n except PermissionError:\n sock.close()\n raise unittest.SkipTest('cannot bind AF_UNIX sockets')",
"def rem_fd(self, fd):\n raise NotImplementedError('ReactorInterface.rem_fd method not'\\\n ' implemented.')",
"def is_socket(fd):\n file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)\n\n try:\n file_socket.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)\n except socket.error as ex:\n return ex.args[0] != errno.ENOTSOCK\n else:\n return True",
"def bind_unix_socket(\n file: str, mode: int = 0o600, backlog: int = _DEFAULT_BACKLOG\n ) -> socket.socket:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n try:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except socket.error as e:\n if errno_from_exception(e) != errno.ENOPROTOOPT:\n # Hurd doesn't support SO_REUSEADDR\n raise\n sock.setblocking(False)\n try:\n st = os.stat(file)\n except FileNotFoundError:\n pass\n else:\n if stat.S_ISSOCK(st.st_mode):\n os.remove(file)\n else:\n raise ValueError(\"File %s exists and is not a socket\", file)\n sock.bind(file)\n os.chmod(file, mode)\n sock.listen(backlog)\n return sock",
"def set_nonmaskable_callback(self, key_name, callback, silent=False):\n self.check_special_callback(key_name)\n if not silent:\n self.sanity_check_cb(key_name, callback)\n self.nonmaskable_keymap[key_name] = callback",
"def testReadInterrupted(self):\n for version in [4, 5, 6]:\n family = {4: AF_INET, 5: AF_INET6, 6: AF_INET6}[version]\n s = net_test.UDPSocket(family)\n self.SelectInterface(s, random.choice(self.NETIDS), \"mark\")\n addr = self.GetRemoteAddress(version)\n\n # Check that reads on connected sockets are interrupted.\n s.connect((addr, 53))\n self.assertEquals(3, s.send(\"foo\"))\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)\n\n # A destroyed socket is no longer connected, but still usable.\n self.assertRaisesErrno(EDESTADDRREQ, s.send, \"foo\")\n self.assertEquals(3, s.sendto(\"foo\", (addr, 53)))\n\n # Check that reads on unconnected sockets are also interrupted.\n self.CloseDuringBlockingCall(s, lambda sock: sock.recv(4096),\n ECONNABORTED)",
"def mark_as_unmanaged(self):\n fname = self.unix_path.split('/')[-1]\n self.unix_path = self.unix_path[: -len(fname)] + 'unmanage-' + fname",
"def _set_server_mode_faulty(server, mode):\n allowed_mode = ()\n _do_set_server_mode(server, mode, allowed_mode)",
"def _setup_unix(self, bind, basedir=None):\n if AF_UNIX is None:\n raise ConfigurationError(\"UNIX domain sockets are not available\")\n\n obind = repr(bind(0))\n if bind(u'perm'):\n try:\n socket_perm = int(bind('perm'), 8)\n except (TypeError, ValueError):\n raise ConfigurationError(\"Invalid permission\")\n umask = 0777 & ~socket_perm\n else:\n umask = None\n basedir = basedir or _os.getcwd()\n if not isinstance(basedir, unicode):\n basedir = basedir.decode(_sys.getfilesystemencoding())\n path = _os.path.normpath(_os.path.join(\n basedir, bind(u'path')\n )).encode(_sys.getfilesystemencoding())\n socket = _socket.socket(AF_UNIX, _socket.SOCK_STREAM)\n self._sockets.append(UnixSocket(socket, obind, path, umask))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an SSL adapter class for the given name. | def get_ssl_adapter_class(name='builtin'):
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter | [
"def get_adapter(self, name):\n if isinstance(name, Adapter):\n if name.name in self._adapters:\n if self._adapters[name.name] == name:\n return name\n if name in self._adapters:\n return self._adapters[name]",
"def get_driver_adapter(driver_name):\n try:\n driver_adapter = _ADAPTERS[driver_name]\n except KeyError:\n raise ValueError(f\"Encountered unregistered driver_name: {driver_name}\")\n\n return driver_adapter()",
"def nameToClass(name: str) -> Any:\n return _registered_classes.get(name)",
"def get_strategy(cls, name, ns=None):\n ns = ns or cls.__strategy_ns__\n if ns is None:\n raise RuntimeError(\n _('No namespace provided and __strategy_ns__ unset'))\n\n LOG.debug('Looking for strategy %s in %s', name, ns)\n\n return utils.import_class(ns + \".\" + name)",
"def get_by_name(name):\n return database.get(Certificate, name, field='name')",
"def get_encoder_by_name(name, board_size):\n if isinstance(board_size, int):\n board_size = (board_size, board_size)\n module = importlib.import_module('dlgo.encoders.' + name)\n constructor = getattr(module, 'create')\n return constructor(board_size)",
"def from_name(name):\n Strategy = possible_writers[name]\n if Strategy.isfunctional():\n strategy = Strategy()\n strategy.name = name\n return strategy\n raise NonFunctionalStrategy(name, Strategy.hint)",
"def get_detector_class(name=\"webcheck4\"):\n return find_class(name, detector_classes())",
"def get_collection_class(cls, name):\n try:\n return cls.collection_classes[name]\n except KeyError:\n raise KeyError(f\"There is no Collection Class of type: '{name}'; currently supported values: [{', '.join(get_collection_classes().keys())}]\")",
"def get_allowed_protocol_by_name(self,\n name,\n headers=None,\n **query_parameters):\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n check_type(name, basestring,\n may_be_none=False)\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n 'name': name,\n }\n\n e_url = ('/ers/config/allowedprotocols/name/{name}')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n _api_response = self._session.get(endpoint_full_url, params=_params,\n headers=_headers)\n else:\n _api_response = self._session.get(endpoint_full_url, params=_params)\n\n return self._object_factory('bpm_ac8c8cb9b5007a1e1a6434a20a881_v3_0_0', _api_response)",
"def get_adapter(self, url):\n for (prefix, adapter) in self.adapters.items():\n\n if url.lower().startswith(prefix):\n return adapter\n\n # Nothing matches :-/\n #raise InvalidSchema(\"No connection adapters were found for '%s'\" % url)",
"def get_layer_by_name(name):\n if name == ConvLayer.__name__:\n return ConvLayer\n elif name == DepthConvLayer.__name__:\n return DepthConvLayer\n elif name == PoolingLayer.__name__:\n return PoolingLayer\n elif name == IdentityLayer.__name__:\n return IdentityLayer\n elif name == LinearLayer.__name__:\n return LinearLayer\n else:\n raise ValueError('unrecognized layer: %s' % name)",
"def get_class_from_config(key, config):\n try:\n return get_class_by_name(config[key])\n except Exception, e:\n raise AttributeError(\n \"Could not get class '%s' for Auth setting '%s' >> %s\" % \n (config[key], key, e))",
"def GetConfigWriterClass(name):\n try:\n return _CONFIG_DATA_TYPES[name]\n except KeyError:\n msg = \"Unknown configuration storage type: %r\" % name\n raise errors.ConfigurationError(msg)",
"def __call__(self, adaptee):\n\n adapter = self._adapter_cache.get(adaptee, None)\n if adapter is None:\n adapter = self.factory(adaptee)\n self._adapter_cache[adaptee] = adapter\n\n return adapter",
"def get_driver_by_name(driver_name: str) -> Driver:\n driver = DriverName[driver_name]\n return get_driver(driver)",
"def importPyDeviceClass(cls,name):\n name = name.upper()\n py_devices = cls.findPyDevices()\n if name in py_devices:\n return py_devices[name]\n raise _exc.DevPYDEVICE_NOT_FOUND",
"def get_default_adapter(info, config):\n target_adapter_name = f'{config.model.name}Adapter'\n for module in ADAPTER_MODULES:\n if hasattr(module, target_adapter_name):\n adapter_cls = getattr(module, target_adapter_name)\n return adapter_cls(info, config)\n\n # Fall back on a common adapter.\n return common_adapters.SequenceAdapter(info, config)",
"def get_component_class(name):\n return _COMPONENT_CLASSES[name]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new environ dict targeting the given wsgi.version | def get_environ(self):
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = env_10.copy()
env['wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault('wsgi.url_encoding', 'utf-8')
try:
# SCRIPT_NAME is the empty string, who cares what encoding it is?
env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding'])
env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env['wsgi.url_encoding'] = 'ISO-8859-1'
env["PATH_INFO"] = env_10["PATH_INFO"]
env["QUERY_STRING"] = env_10["QUERY_STRING"]
return env | [
"def make_environ(extra=None, **kwds):\n environ = {}\n if extra is not None:\n environ.update(extra)\n environ[\"wsgi.version\"] = (1, 0)\n environ[\"wsgi.url_scheme\"] = \"http\"\n environ[\"SERVER_NAME\"] = \"localhost\"\n environ[\"SERVER_PORT\"] = \"80\"\n environ[\"REQUEST_METHOD\"] = \"GET\"\n environ[\"SCRIPT_NAME\"] = \"\"\n environ[\"PATH_INFO\"] = \"/\"\n environ.update(kwds)\n return environ",
"def expanded_env_dict():\n return generate_expanded_env_dict()",
"def environment_info(self):\n\n return {\n \"application_environment\": {\n \"framework\": \"pylons\",\n \"env\": dict(os.environ),\n \"language\": \"python\",\n \"language_version\": sys.version.replace('\\n', ''),\n \"application_root_directory\": self.project_root()\n },\n \"client\": {\n \"name\": \"pylons-exceptional\",\n \"version\": __version__,\n \"protocol_version\": EXCEPTIONAL_PROTOCOL_VERSION\n }\n }",
"def _base_environ(self, **request):\n environ = {\n 'HTTP_COOKIE': self.cookies.output(header='', sep='; '),\n 'PATH_INFO': '/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '127.0.0.1',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': 'testserver',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'wsgi.version': (1, 0),\n 'wsgi.url_scheme': 'http',\n 'wsgi.errors': self.errors,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': False,\n 'wsgi.run_once': False,\n }\n environ.update(self.defaults)\n environ.update(request)\n return environ",
"def environ(request):\r\n hostport = request.host.split(\":\")\r\n if len(hostport) == 2:\r\n host = hostport[0]\r\n port = int(hostport[1])\r\n else:\r\n host = request.host\r\n port = 443 if request.protocol == \"https\" else 80\r\n environ = {\r\n \"REQUEST_METHOD\": request.method,\r\n \"SCRIPT_NAME\": \"\",\r\n \"PATH_INFO\": to_wsgi_str(escape.url_unescape(\r\n request.path, encoding=None, plus=False)),\r\n \"QUERY_STRING\": request.query,\r\n \"REMOTE_ADDR\": request.remote_ip,\r\n \"SERVER_NAME\": host,\r\n \"SERVER_PORT\": str(port),\r\n \"SERVER_PROTOCOL\": request.version,\r\n \"wsgi.version\": (1, 0),\r\n \"wsgi.url_scheme\": request.protocol,\r\n \"wsgi.input\": BytesIO(escape.utf8(request.body)),\r\n \"wsgi.errors\": sys.stderr,\r\n \"wsgi.multithread\": False,\r\n \"wsgi.multiprocess\": True,\r\n \"wsgi.run_once\": False,\r\n }\r\n if \"Content-Type\" in request.headers:\r\n environ[\"CONTENT_TYPE\"] = request.headers.pop(\"Content-Type\")\r\n if \"Content-Length\" in request.headers:\r\n environ[\"CONTENT_LENGTH\"] = request.headers.pop(\"Content-Length\")\r\n for key, value in request.headers.items():\r\n environ[\"HTTP_\" + key.replace(\"-\", \"_\").upper()] = value\r\n return environ",
"def simple_environ(prefix='', env_value='value'):\n return {\n '{0}key'.format(prefix): env_value,\n 'a': 'b',\n }",
"def set_environ(envval):\n if overwrite:\n return lambda k, v: envval.update({k: str(v)})\n return lambda k, v: envval.setdefault(k, str(v))",
"def new_isv_app_settings_from_env():\n\n app_id, app_secret, verification_token, encrypt_key = Config.__app_settings_from_env()\n return AppSettings(APP_TYPE_ISV, app_id, app_secret, verification_token, encrypt_key)",
"def extend_env(extra_env):\n env = os.environ.copy()\n env.update(extra_env)\n return env",
"def inject_env():\n\n return dict(site.config, current_menu=current_menu)",
"def update_env(*remove, **update):\n orig_env = copy.deepcopy(os.environ)\n try:\n [os.environ.pop(r) for r in remove]\n os.environ.update(update)\n yield\n finally:\n os.environ = copy.deepcopy(orig_env)",
"def _get_version_from_container_config_env(self, attrs: dict) -> str:\n environment = attrs.get(\"Config\").get(\"Env\")\n for var in environment:\n if \"\".join(var).split(\"=\")[0] == self.version_var:\n version = \"\".join(var).split(\"=\")[1]\n return version\n return \"\"",
"def extract_msvc_env(self, vs_inst_path, arch):\n\n if arch not in self.archs:\n log.error(\"invalid architecture provided: %s\" % arch)\n raise XmakeException(\"invalid architecture provided: %s\" % arch)\n log.info(\"looking up env for \"+vs_inst_path)\n vc_vars_all = os.path.normpath(os.path.join(vs_inst_path, \"VC\", \"vcvarsall.bat\"))\n if not os.path.exists(vc_vars_all):\n log.error(\"vcvarsall.bat not found\")\n raise XmakeException(\"vcvarsall.bat not found\")\n\n cmd = subprocess.Popen(args=[\"cmd.exe\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\n cmd.stdin.write('\"%s\" %s\\n' % (vc_vars_all, arch))\n cmd.stdin.write('''\"%s\" -c \"import pickle, os; print '---{1}---\\\\n{0}\\\\n---{1}---'.format(pickle.dumps(dict(os.environ), -1).encode('base64'), 'ENV')\"\\n''' % sys.executable)\n cmd.stdin.close()\n output = cmd.stdout.read()\n rc = cmd.wait()\n\n if rc != 0:\n log.error(\"could not determine msvc environment\")\n raise XmakeException(\"could not determine msvc environment\")\n\n match = re.search(\"---ENV---(.*)---ENV---\", output, re.DOTALL)\n\n if match is None:\n log.error(\"getting environment failed\")\n raise XmakeException(\"getting environment failed\")\n\n environ_data = match.group(1)\n environ = pickle.loads(environ_data.strip().decode(\"base64\"))\n \n if self.printenv:\n log.info(\"environment modifications: \")\n for v in environ.keys():\n n=environ[v]\n if os.environ.has_key(v):\n if os.environ[v]!=n:\n log.info(\" modified: \"+v+\"=\"+os.environ[v]+\" -> \"+n)\n else:\n log.info(\" new : \"+v+\"=\"+n)\n \n return environ",
"def _get_env(self, config):\n for option, value in config.items():\n env_name = 'SCRAPEKIT_%s' % option.upper()\n value = os.environ.get(env_name, value)\n config[option] = value\n return config",
"def _environment_variables() -> Dict[str, str]:\n return {key: value for key, value in os.environ.items() if _is_encodable(value)}",
"def BuildEnv(self, unused_configuration=None):\n return os.environ.copy()",
"def activate():\n\n env_path = '/'.join([deployment_root(), 'env', env.project_fullname])\n\n if not exists(env_path):\n print env.host, \"ERROR: The version\", env.project_version, \\\n \"does not exist at\"\n print env_path\n sys.exit(1)\n\n active = active_version()\n servers = webserver_list()\n\n if env.patch or active != env.project_fullname:\n for s in servers:\n stop_webserver(s)\n\n if not env.patch and active != env.project_fullname:\n\n if env.verbosity:\n print env.host, \"ACTIVATING version\", env_path\n\n if not env.nomigration:\n sync_db()\n\n #south migration\n if ('south' in env.INSTALLED_APPS and\n not env.nomigration and\n not env.manualmigration):\n migration()\n\n if env.manualmigration or env.MANUAL_MIGRATION:\n manual_migration()\n\n #activate sites\n activate_sites = [''.join([\n d.name.replace('.', '_'),\n '-',\n env.project_version,\n '.conf'])\n for d in domain_sites()]\n if 'apache2' in get_packages():\n site_paths = ['/etc/apache2', '/etc/nginx']\n else:\n site_paths = ['/etc/nginx']\n\n #disable existing sites\n for path in site_paths:\n for site in _ls_sites('/'.join([path, 'sites-enabled'])):\n if site not in activate_sites:\n sudo(\"rm %s/sites-enabled/%s\" % (path, site))\n\n #activate new sites\n for path in site_paths:\n for site in activate_sites:\n if not exists('/'.join([path, 'sites-enabled', site])):\n sudo(\"chmod 644 %s\" % '/'.join(\n [path, 'sites-available', site]))\n sudo(\"ln -s %s/sites-available/%s %s/sites-enabled/%s\" % (\n path, site, path, site))\n if env.verbosity:\n print \" * enabled\", \"%s/sites-enabled/%s\" % (\n path, site)\n\n #delete existing symlink\n ln_path = '/'.join([deployment_root(), 'env', env.project_name])\n run('rm -f ' + ln_path)\n #run post deploy hooks\n post_exec_hook('post_deploy')\n #activate\n run('ln -s %s %s' % (env_path, ln_path))\n\n if env.verbosity:\n print env.host, env.project_fullname, \"ACTIVATED\"\n else:\n if env.verbosity and not env.patch:\n print env.project_fullname, \"is the active version\"\n\n if env.patch or active != env.project_fullname:\n for s in servers:\n start_webserver(s)\n print\n return",
"def getAppVersion():\n return os.environ.get('CURRENT_VERSION_ID')",
"def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Query a valid view by buffer id | def query_valid_view(buffer_id):
for window in sublime.windows():
for view in window.views():
if view.buffer_id() == buffer_id:
return view
return None | [
"def view(view_name, key=None):\n kwargs = {'key': key} if key is not None else {}\n print(settings.DB.view(view_name,**kwargs)).rows",
"def get_view(window, vid):\r\n\r\n for view in window.views():\r\n if view.id() == vid:\r\n return view",
"def get_by_id(self, view_id):\n return View(self.context,\n ResourcePathServiceOperation(\"GetById\", [view_id], self.resource_path), self._parent)",
"def test_view_column():\n view_clause = ViewClause(\n 'myview',\n MetaData(schema='myschema'),\n table1.select()\n )\n\n assert view_clause.c['id'].name == table1.c['id'].name\n assert view_clause.c['value1'].name == table1.c['value1'].name",
"def test_get_table_view_config_by_view_id(self):\n pass",
"def get_find_results_buffer():\n\n for view in sublime.active_window().views():\n if view.name() == FIND_RESULTS_BUFFER_NAME:\n return view\n\n return None",
"def _filter_view(self, map_body):\n if map_body is True:\n map_body = \"function(doc) { emit(doc._id, null); };\"\n assert isinstance(map_body, str), \"View map must be a string\"\n view_doc = \"%s_viewdoc\" % self.prefix\n view_name = \"%s_viewname\" % self.prefix\n ddoc = {\"_id\": \"_design/%s\" % view_doc, \"views\": {view_name: {\"map\": map_body}}}\n rep_params = {\n \"filter\": \"_view\",\n \"query_params\": {\"view\": \"%s/%s\" % (view_doc, view_name)},\n }\n return (ddoc, rep_params)",
"def get_object(self, query):\n self.d(\"query: %s\", str(query))\n ref = one(self._index.lookup_keys(query))\n if not ref:\n raise IndexError(\"Failed to find: {:s}\".format(str(query)))\n # TODO: should ensure this query has a unique result\n return self._cim.logical_data_store.get_object_buffer(ref)",
"def test_get_by_id(self, screen, query):\n assert query.get_by_id(screen.id) == screen",
"def test_queryView(self):\n v = views.View(self.fc, None, None, 'all_tags?include_docs=true', Tag)\n\n def _checkResults(results):\n results = list(results)\n self.assertEquals(len(results), 3)\n\n # this used to be not executed because it worked on the empty\n # generator; so guard against that\n looped = False\n for tag in results:\n looped = True\n self.assertIn(tag.name, ['foo', 'bar', 'baz'])\n self.failUnless(looped)\n\n d = v.queryView()\n d.addCallback(_checkResults)\n return d",
"def findFragmentById(self, id):\n pass",
"def openView(self, dbName, docId, viewId, **kwargs):\n # Responses:\n # 500 Internal Server Error (illegal database name)\n\n def buildUri(dbName=dbName, docId=docId, viewId=viewId, kwargs=kwargs):\n return \"/%s/_design/%s/_view/%s?%s\" % (\n _namequote(dbName), _namequote(docId.encode('utf-8')),\n viewId, urlencode(kwargs))\n\n # if there is a \"keys\" argument, remove it from the kwargs\n # dictionary now so that it doesn't get double JSON-encoded\n body = None\n if \"keys\" in kwargs:\n body = json.dumps({\"keys\": kwargs.pop(\"keys\")})\n\n # encode the rest of the values with JSON for use as query\n # arguments in the URI\n for k, v in kwargs.items():\n if k == 'keys': # we do this below, for the full body\n pass\n else:\n kwargs[k] = json.dumps(v)\n # we keep the paisley API, but couchdb uses limit now\n if 'count' in kwargs:\n kwargs['limit'] = kwargs.pop('count')\n\n # If there's a list of keys to send, POST the\n # query so that we can upload the keys as the body of\n # the POST request, otherwise use a GET request\n if body:\n return self.post(\n buildUri(), body=body, descr='openView').addCallback(\n self.parseResult)\n else:\n return self.get(\n buildUri(), descr='openView').addCallback(\n self.parseResult)",
"def testG_view_request_id(self):\n self._inject(11) # creates x docs/requests\n viewResult = self._getViewResults(\"request_ids\")\n requestIds = [ x[u\"key\"] for x in viewResult ]\n self.assertEqual(len(requestIds), 11)",
"async def page_view(id):\n # lookup page by its TinyMongo id\n g.brand = BRAND\n page = DB.blog.find_one({'_id':id})\n if page is None:\n # return a 404 error page does not exist\n abort(404)\n \n return await render_template('view.html', page=page)",
"def read(self, id):",
"def test_exists():\n view_clause = ViewClause(\n 'myview',\n MetaData(schema='myschema'),\n table1.select()\n )\n with pytest.raises(UnboundExecutionError):\n view_clause.exists()",
"def click_fuel_buffer(buffer_id, timeout=default_timeout):\n if buffer_id == 'A':\n try:\n dispenser['Edit1'].wait('ready',timeout).click_input()\n return True\n except Exception as e:\n logger.warning(\"select_fuel_buffer: %s\" % e)\n return False\n elif buffer_id == 'B':\n # Control ID for buffer B changes if A has a postpay\n try:\n a_has_postpay = (dispenser['Edit7'].wait('ready', timeout).\n texts()[0] != 'B')\n except Exception as e:\n logger.warning(\"select_fuel_buffer: %s\" % e)\n return False \n if a_has_postpay:\n edit_num = 9\n else:\n edit_num = 5\n try:\n # We already know buffer controls are ready, no need for wait here\n dispenser['Edit%d' % edit_num].click_input()\n return True\n except Exception as e:\n logger.warning(\"select_fuel_buffer: %s\" % e)\n return False",
"def focus_buffer(self, results_buffer, active_buffer):\n\n results_has_focus = results_buffer.id() == active_buffer.id()\n\n if not results_has_focus:\n self.window.focus_view(results_buffer)",
"def view(mod, id):\n Model = mod_lookup.get(mod, None)\n if not Model:\n return f\"No such route: {mod}\", 404\n model = Model.query.get(id)\n if not model:\n flash(\"That record does not exist\")\n model = {'id': 0}\n template = 'view.html'\n data = model_dict(model)\n print(data)\n return render_template(template, mod=mod, data=data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a markdown document into an ElementTree. Given a list of lines, an ElementTree object (not just a parent Element) is created and the root element is passed to the parser as the parent. The ElementTree object is returned. This should only be called on an entire document, not pieces. | def parseDocument(self, lines):
# Create a ElementTree from the lines
self.root = util.etree.Element(self.markdown.doc_tag)
self.parseChunk(self.root, '\n'.join(lines))
return util.etree.ElementTree(self.root) | [
"def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n root = markdown.etree.Element(\"div\")\r\n self.parseChunk(root, '\\n'.join(lines))\r\n return markdown.etree.ElementTree(root)",
"def create_tree(markdown):\n global blocks, pos\n # parse markdown\n blocks = parse_markdown(markdown)\n if config.DEBUG_MODE:\n print('[DEBUG]: Parsed markdown')\n print(blocks)\n\n # create root node\n title = blocks[0].content.get_clean()\n root = Node(title)\n\n # recursively generate children\n pos = 1\n while pos < len(blocks):\n c = recurse()\n if c:\n root.add_child(c)\n \n\n # clean up tree\n root = root.retract()\n return root",
"def md_parse_docs(fn):\n\n md = Markdown(safe_mode='escape')\n with open(fn, 'r') as f_in:\n docs_html = md.convert(f_in.read())\n etr = etree.HTML(docs_html)\n\n docs = {}\n section = ''\n field = ''\n first_h1 = etr.find('body/h1')\n\n if first_h1 is not None:\n section, field = first_h1.text, ''\n for e in first_h1.itersiblings():\n if e.tag == 'h1':\n section, field = e.text, ''\n elif e.tag == 'h2':\n field = e.text\n else:\n docs[(section, field)] = docs.get((section, field), '') + etree.tostring(e)\n return docs",
"def parse(text):\n md_extensions = getattr(settings, \"DOCCOMMENT_MARKDOWN_EXTENSIONS\", DEFAULT_EXTENSIONS)\n md_safemode = getattr(settings, \"DOCCOMMENT_MARKDOWN_SAFEMODE\", DEFAULT_SAFEMODE)\n return markdown(text, md_extensions, safe_mode=md_safemode)",
"def parse(line):\n\n document = Document()\n root = document.createElement('tree')\n current_element = root\n rest = line\n\n while True:\n element, separator, rest = parse_element(rest, document)\n\n if isinstance(current_element.lastChild, Text) and \\\n current_element.lastChild.data == '':\n current_element.removeChild(current_element.lastChild)\n\n current_element.appendChild(element)\n\n if rest is None:\n break\n\n if separator == '<':\n current_element = current_element.parentNode\n elif separator == '+':\n current_element = current_element\n elif separator == '>':\n current_element = element\n\n expand_multipliers(root)\n\n return root",
"def parse(self):\n lines = self.data.splitlines()\n level = 1\n bounds = []\n for i, x in enumerate(lines):\n if re.search(r'^\\*{' + str(level) + '} ', x):\n bounds.append(i)\n bounds.append(len(lines)) # To get the last heading and its content\n\n trees = []\n for i in range(len(bounds) - 1):\n trees.append(lines[bounds[i]:bounds[i+1]])\n\n for tree in trees:\n self.children.append(OrgNode('\\n'.join(tree), **self.properties))",
"def parse_markdown(self, markdown):\n return self.markdown.render(body)",
"def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n \n # Split in parts\n for line in text.splitlines():\n if line.startswith(('# ', '## ', '### ', '#### ', '##### ')):\n # Finish pending lines\n parts.append('\\n'.join(lines))\n lines = []\n # Process header\n level = len(line.split(' ')[0])\n title = line.split(' ', 1)[1]\n title_short = title.split('(')[0].split('<')[0].strip().replace('`', '')\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append('\\n'.join(lines))\n \n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + '\\n\\n'",
"def read_md(filename, new_html):\n\tmarkdown = mistune.Markdown()\n\twith open(filename, 'r') as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\tline = line.strip()\n\t\t\tnew_line = markdown(line)\n\t\t\tnew_html.append(new_line)\n\tf.close()\n\treturn new_html",
"def test_markup_markdown(self):\r\n\r\n a = self.new_article('Demo', '''A First Level Header\r\n====================\r\n\r\nA Second Level Header\r\n---------------------\r\n\r\nNow is the time for all good men to come to\r\nthe aid of their country. This is just a\r\nregular paragraph.''', markup=MARKUP_MARKDOWN)\r\n a.do_render_markup()\r\n\r\n print a.rendered_content",
"def _parse(txt):\n from mwlib.refine.compat import parse_txt\n from mwlib import parser\n \n res = parse_txt(txt)\n \n\n # res is an parser.Article. \n if len(res.children)!=1:\n res.__class__ = parser.Node\n return res\n\n res = res.children[0]\n \n if res.__class__==parser.Paragraph:\n res.__class__ = parser.Node\n\n return res\n\n # if len(res.children)!=1:\n # return res\n # return res.children[0]",
"def parse_lines(*lines):\n return list(parse_lines(lines, []))",
"def build_from(lines:[str], number:int=0) -> object:\n have_chapter = any(REG_CHAPTER.fullmatch(line.strip()) for line in lines)\n lines = iter(lines)\n # get title, and waste the next line, that should be empty\n title = next(lines).strip()\n empty = next(lines).strip()\n assert not empty, f\"an empty line should follow any episode title, not '{empty}' !\"\n if have_chapter:\n chapters = Chapter.build_from(lines)\n else: # make a phony chapter, populate it with all text\n chapters = [Chapter(1, '', tuple(Line.build_from(lines)))]\n return Episode(number, title, tuple(chapters))",
"def markdown_reader():\n with open(MARKDOWN_FILE_PATH, \"r\") as markdown_file_handler:\n return markdown_file_handler.read()",
"def _parse(txt):\n \n from mwlib import scanner, parser\n \n tokens = scanner.tokenize(txt)\n res=parser.Parser(tokens, \"unknown\").parse()\n\n # res is an parser.Article. \n if len(res.children)!=1:\n res.__class__ = parser.Node\n return res\n\n res = res.children[0]\n if res.__class__==parser.Paragraph:\n res.__class__ = parser.Node\n \n if len(res.children)!=1:\n return res\n return res.children[0]",
"def test_build_paragraph_tree(self):\n text = \"This (a) is a good (1) test (2) of (3) some (b) body.\"\n self.assertEqual(\n self.regParser.build_tree(text),\n Node(\"This \", children=[\n Node(\"(a) is a good \", label=['a'], children=[\n Node(\"(1) test \", label=['a', '1']),\n Node(\"(2) of \", label=['a', '2']),\n Node(\"(3) some \", label=['a', '3'])\n ]),\n Node(\"(b) body.\", label=['b'])\n ])\n )",
"def parse_line(cls, line):\n if not line:\n node = nodes.EmptyNode()\n elif line[0] in (cls.HTML_TAG_PREFIX, '.', '#'):\n node = nodes.HtmlNode.from_haml(line)\n elif line[0] in (cls.HTML_COMMENT_PREFIX, ):\n node = nodes.HtmlCommentNode(line[1:])\n elif line[0] in (cls.JINJA_TAG_PREFIX, ):\n node = nodes.JinjaNode.from_haml(line)\n elif line[0] in (cls.CUSTOM_BLOCK_PREFIX, ):\n node = nodes.CustomBlockNode(line[1:])\n elif line[0] in (cls.PREFORMATTED_PREFIX, ):\n node = nodes.PreformattedTextNode(line[1:])\n elif line[0] in (cls.ESCAPE_PREFIX, ):\n node = nodes.TextNode(line[1:])\n else:\n node = nodes.TextNode(line)\n\n return node",
"def tree_from_lines(lines):\n\n tree = []\n current_stack = []\n for line in lines:\n asl_indents = line[0]\n node = ([], line[1])\n if asl_indents == 0:\n tree += [node]\n current_stack = [node]\n else:\n while len(current_stack) > asl_indents:\n current_stack = current_stack[:-1]\n current_stack[-1][0].append(node)\n current_stack += [node]\n return tree",
"def get_contents_from_markdown(markdown_file=None, split_string=\"[//]: # (Begin Content)\", index=0):\n contents = \"\"\n if markdown_file:\n try:\n full_text = open(str(settings.APP_DIR / \"api_docs\" / \"markdown\" / markdown_file)).read()\n split = full_text.split(split_string)\n if index < len(split):\n contents = split[index]\n except Exception:\n pass\n return contents",
"def parse_articles_from_lines(lines):\n if not isinstance(lines, list):\n raise TypeError\n\n state = 0\n name = ''\n author = ''\n data = ''\n text = ''\n result = []\n\n for line in lines:\n if not isinstance(line, str):\n raise TypeError\n line = line.strip()\n if not line:\n continue\n if state == 0:\n name = line\n state = 1\n continue\n if state == 1:\n tmp = line.split('Автор: ')\n author = tmp[-1]\n state = 2\n continue\n if state == 2:\n data = line.split('Дата: ')[-1]\n state = 3\n continue\n if state == 3:\n text = line\n state = 4\n continue\n if state == 4:\n if line == '-----------':\n result.append(Article(name, author, data, text))\n state = 5\n continue\n text += '\\n' + line\n continue\n if state == 5 and line == '________________':\n state = 0\n continue\n\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a chunk of markdown text and attach to given etree node. While the ``text`` argument is generally assumed to contain multiple blocks which will be split on blank lines, it could contain only one block. Generally, this method would be called by extensions when block parsing is required. The ``parent`` etree Element passed in is altered in place. Nothing is returned. | def parseChunk(self, parent, text):
self.parseBlocks(parent, text.split('\n\n')) | [
"def parse(text):\n md_extensions = getattr(settings, \"DOCCOMMENT_MARKDOWN_EXTENSIONS\", DEFAULT_EXTENSIONS)\n md_safemode = getattr(settings, \"DOCCOMMENT_MARKDOWN_SAFEMODE\", DEFAULT_SAFEMODE)\n return markdown(text, md_extensions, safe_mode=md_safemode)",
"def parseText(self, node):\n self.appendContent(node.data)",
"def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])",
"def parseText(self, text):\n results = []\n for tag in self.iterTags(text):\n results.append(self.tagToMarkdown(tag, \n self.cards))\n return '\\n\\n'.join(results)",
"def _transform_markdown(self):\n # Do nothing if Content-Type is not text/markdown\n if not self._message['Content-Type'].startswith(\"text/markdown\"):\n return\n\n # Remove the markdown Content-Type header, it's non-standard for email\n del self._message['Content-Type']\n\n # Make sure the message is multipart. We need a multipart message so\n # that we can add an HTML part containing rendered Markdown.\n self._make_message_multipart()\n\n # Extract unrendered text and encoding. We assume that the first\n # plaintext payload is formatted with Markdown.\n for mimetext in self._message.get_payload():\n if mimetext['Content-Type'].startswith('text/plain'):\n encoding = str(mimetext.get_charset())\n text = mimetext.get_payload(decode=True).decode(encoding)\n break\n assert encoding\n assert text\n\n # Render Markdown to HTML and add the HTML as the last part of the\n # multipart message as per RFC 2046.\n #\n # Note: We need to use u\"...\" to ensure that unicode string\n # substitution works properly in Python 2.\n #\n # https://docs.python.org/3/library/email.mime.html#email.mime.text.MIMEText\n html = markdown.markdown(text)\n payload = future.backports.email.mime.text.MIMEText(\n u\"<html><body>{}</body></html>\".format(html),\n _subtype=\"html\",\n _charset=encoding,\n )\n self._message.attach(payload)",
"def telegram_chunk_by_line(self, text: str, max_length: int) -> List[str]:\n chunks = []\n\n # Escape Markdown by type\n lines = re.split(r'(`)(.*?)(`)', text)\n for i in range(len(lines)):\n if i % 4 in (1, 3):\n continue\n base_entity = 'code' if i % 4 == 2 else None\n # subtext = re.split(r'(\\[[^\\][]*]\\((?:https?|tel|mailto):[^()]*\\))', text[i])\n subtext = re.split(r'(\\[.*?]\\((?:https?|tel|mailto):[^()]*\\))', lines[i])\n for j in range(len(subtext)):\n if (j + 1) % 2:\n subtext[j] = self.telegram_escape_markdown(subtext[j], entity_type=base_entity)\n else:\n subtext[j] = ''.join(\n [\n '[',\n self.telegram_escape_markdown(subtext[j][1:].split(']')[0]),\n '](',\n self.telegram_escape_markdown(\n subtext[j].split('(')[-1].split(')')[0], entity_type='text_link'\n ),\n ')',\n ]\n )\n lines[i] = ''.join(subtext)\n\n new_text = ''.join(lines).splitlines(keepends=True)\n\n # check if any individual line is too long and chunk it\n if any(len(line) > max_length for line in new_text):\n new_lines: List[str] = []\n for line in new_text:\n if len(line) > max_length:\n new_lines.extend(chunk_string(line, max_length))\n else:\n new_lines.append(line)\n new_text = new_lines\n\n it_lines = iter(new_text)\n chunk_lines: List[str] = []\n pre_status = False # keep track of whether you're in the middle of a PreCode entity to close and reopen\n try:\n while True:\n next_line = next(it_lines)\n if sum(len(line) for line in chunk_lines) + len(next_line) > max_length - pre_status * 3:\n if pre_status:\n chunk_lines[-1] += '```'\n chunks.append(''.join(chunk_lines))\n chunk_lines = [pre_status * '```' + next_line]\n else:\n chunk_lines.append(next_line)\n if next_line.count('```') % 2:\n pre_status = not pre_status\n except StopIteration:\n chunks.append(''.join(chunk_lines))\n\n return chunks",
"def parse_elements(text):\n \n \n # sanitise and split using BeautifulSoup\n soup = BeautifulSoup(parse(text))\n elements = [e for e in soup.contents if type(e) == Tag]\n \n # wrap blocks in <div>\n format = u\"<div class='doccomment-block' id='DE-%d'>\\n%s\\n</div>\"\n for seq,txt in enumerate(elements):\n elements[seq] = format % (seq, txt)\n \n return elements",
"def markdown_html(text):\n return markdown.markdown(text)",
"def _parse_paragraph(self, node, state):\n # Both Paragraphs will share the same parent\n parent = (\n state[\"context\"][node]\n if node in state[\"context\"]\n else state[\"parent\"][node]\n )\n for field in [\"text\", \"tail\"]:\n text = getattr(node, field)\n text = text.strip() if text and self.strip else text\n\n # Skip if \"\" or None\n if not text:\n continue\n\n # Run RegEx replacements\n for (rgx, replace) in self.replacements:\n text = rgx.sub(replace, text)\n\n # Process the Paragraph\n stable_id = \"{}::{}:{}\".format(\n state[\"document\"].name, \"paragraph\", state[\"paragraph\"][\"idx\"]\n )\n parts = {}\n parts[\"stable_id\"] = stable_id\n parts[\"document\"] = state[\"document\"]\n parts[\"position\"] = state[\"paragraph\"][\"idx\"]\n if isinstance(parent, Caption):\n if parent.table:\n parts[\"section\"] = parent.table.section\n elif parent.figure:\n parts[\"section\"] = parent.figure.section\n parts[\"caption\"] = parent\n elif isinstance(parent, Cell):\n parts[\"section\"] = parent.table.section\n parts[\"cell\"] = parent\n elif isinstance(parent, Section):\n parts[\"section\"] = parent\n elif isinstance(parent, Figure): # occurs with text in the tail of an img\n parts[\"section\"] = parent.section\n else:\n raise NotImplementedError(\n 'Paragraph \"{}\" parent must be Section, Caption, or Cell, not {}'.format(\n text, parent\n )\n )\n\n # Create the Figure entry in the DB\n paragraph = Paragraph(**parts)\n\n state[\"paragraph\"][\"idx\"] += 1\n\n state[\"paragraph\"][\"text\"] = text\n state[\"paragraph\"][\"field\"] = field\n\n # Parse the Sentences in the Paragraph\n yield from self._parse_sentence(paragraph, node, state)\n\n return state",
"def _parse(txt):\n from mwlib.refine.compat import parse_txt\n from mwlib import parser\n \n res = parse_txt(txt)\n \n\n # res is an parser.Article. \n if len(res.children)!=1:\n res.__class__ = parser.Node\n return res\n\n res = res.children[0]\n \n if res.__class__==parser.Paragraph:\n res.__class__ = parser.Node\n\n return res\n\n # if len(res.children)!=1:\n # return res\n # return res.children[0]",
"def markdown(text):\n\n def pygments(m):\n return highlight(m.group(2), get_lexer_by_name(m.group(1)), HtmlFormatter())\n\n # add newline after a begin code tag\n text = text.replace(\"<code>\", \"<code>\\n\")\n # Pygments where <code> tags have a class\n text = re.sub(re.compile('<code class=\"([^\"]*)\">((.|\\n)*?)</code>'), pygments, text)\n # apply markdown\n return markup.markdown(text)",
"def test_markup_markdown(self):\r\n\r\n a = self.new_article('Demo', '''A First Level Header\r\n====================\r\n\r\nA Second Level Header\r\n---------------------\r\n\r\nNow is the time for all good men to come to\r\nthe aid of their country. This is just a\r\nregular paragraph.''', markup=MARKUP_MARKDOWN)\r\n a.do_render_markup()\r\n\r\n print a.rendered_content",
"def parse_and_return(self, text):\n return node_definition.parse(\n self.tokenize(\n text\n )\n )",
"def parse_multiline_text(self, field_content_elem):\n if field_content_elem is None:\n return ''\n value = ''\n # iterate over all children elements\n for elem in field_content_elem.getiterator():\n # extract text:\n if elem.tag == self.TAG_TEXT:\n value += elem.text\n # and line breaks:\n elif elem.tag == self.TAG_BREAK:\n value += '\\n'\n return value",
"def test_block_in_inline():\r\n box = parse('''\r\n<style>\r\n p { display: inline-block; }\r\n span, i { display: block; }\r\n</style>\r\n<p>Lorem <em>ipsum <strong>dolor <span>sit</span>\r\n <span>amet,</span></strong><span><em>conse<i></i></em></span></em></p>''')\r\n box = build.inline_in_block(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n # No whitespace processing here.\r\n ('strong', 'Text', '\\n '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])])]),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse'),\r\n ('i', 'Block', [])])])])])])])])])\r\n\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [\r\n # Whitespace processing not done yet.\r\n ('strong', 'Text', '\\n ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])]),\r\n\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [])])])]),\r\n ('span', 'Block', [\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse')])])]),\r\n ('i', 'Block', []),\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [])])])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [])])])])])])",
"def __process_inline_text_block(\n source_text,\n starting_whitespace=\"\",\n whitespace_to_recombine=None,\n is_setext=False,\n ):\n\n inline_blocks = []\n start_index = 0\n if whitespace_to_recombine and \" \" in whitespace_to_recombine:\n source_text = InlineProcessor.__recombine_with_whitespace(\n source_text, whitespace_to_recombine\n )\n else:\n whitespace_to_recombine = None\n\n current_string = \"\"\n current_string_unresolved = \"\"\n end_string = \"\"\n\n inline_response = InlineResponse()\n\n next_index = ParserHelper.index_any_of(\n source_text,\n InlineProcessor.__valid_inline_text_block_sequence_starts,\n start_index,\n )\n LOGGER.debug(\"__process_inline_text_block>>is_setext>>%s\", str(is_setext))\n LOGGER.debug(\n \"__process_inline_text_block>>%s>>%s\",\n source_text.replace(\"\\n\", \"\\\\n\"),\n str(start_index),\n )\n while next_index != -1:\n\n inline_response.clear_fields()\n reset_current_string = False\n whitespace_to_add = None\n\n LOGGER.debug(\n \"__process_inline_text_block>>%s>>%s\", str(start_index), str(next_index)\n )\n remaining_line = source_text[start_index:next_index]\n\n inline_request = InlineRequest(\n source_text,\n next_index,\n inline_blocks,\n remaining_line,\n current_string_unresolved,\n )\n if source_text[next_index] in InlineProcessor.__inline_character_handlers:\n LOGGER.debug(\"handler(before)>>%s<<\", source_text[next_index])\n proc_fn = InlineProcessor.__inline_character_handlers[\n source_text[next_index]\n ]\n inline_response = proc_fn(inline_request)\n LOGGER.debug(\"handler(after)>>%s<<\", source_text[next_index])\n else:\n assert source_text[next_index] == \"\\n\"\n LOGGER.debug(\n \"end_string(before)>>%s<<\",\n str(end_string).replace(\"\\n\", \"\\\\n\").replace(\"\\x02\", \"\\\\x02\"),\n )\n (\n inline_response.new_string,\n whitespace_to_add,\n inline_response.new_index,\n inline_response.new_tokens,\n remaining_line,\n end_string,\n current_string,\n ) = InlineHelper.handle_line_end(\n next_index, remaining_line, end_string, current_string\n )\n LOGGER.debug(\n \"handle_line_end>>new_tokens>>%s<<\",\n str(inline_response.new_tokens)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\x02\", \"\\\\x02\"),\n )\n if not inline_response.new_tokens:\n end_string = InlineProcessor.__add_recombined_whitespace(\n bool(whitespace_to_recombine),\n source_text,\n inline_response,\n end_string,\n is_setext,\n )\n LOGGER.debug(\n \"handle_line_end>>%s<<\",\n source_text[inline_response.new_index :]\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\x02\", \"\\\\x02\"),\n )\n LOGGER.debug(\n \"end_string(after)>>%s<<\",\n str(end_string).replace(\"\\n\", \"\\\\n\").replace(\"\\x02\", \"\\\\x02\"),\n )\n\n LOGGER.debug(\n \"new_string-->%s<--\",\n str(inline_response.new_string).replace(\"\\n\", \"\\\\n\"),\n )\n LOGGER.debug(\"new_index-->%s<--\", str(inline_response.new_index))\n LOGGER.debug(\n \"new_tokens-->%s<--\",\n str(inline_response.new_tokens).replace(\"\\n\", \"\\\\n\"),\n )\n LOGGER.debug(\n \"new_string_unresolved-->%s<--\",\n str(inline_response.new_string_unresolved).replace(\"\\n\", \"\\\\n\"),\n )\n LOGGER.debug(\n \"consume_rest_of_line-->%s<--\",\n str(inline_response.consume_rest_of_line),\n )\n LOGGER.debug(\n \"original_string-->%s<--\",\n str(inline_response.original_string).replace(\"\\n\", \"\\\\n\"),\n )\n\n if inline_response.consume_rest_of_line:\n inline_response.new_string = \"\"\n reset_current_string = True\n inline_response.new_tokens = None\n else:\n current_string = InlineHelper.append_text(\n current_string, remaining_line\n )\n current_string_unresolved = InlineHelper.append_text(\n current_string_unresolved, remaining_line\n )\n\n LOGGER.debug(\n \"current_string>>%s<<\",\n str(current_string).replace(\"\\n\", \"\\\\n\").replace(\"\\x02\", \"\\\\x02\"),\n )\n LOGGER.debug(\n \"current_string_unresolved>>%s<<\",\n str(current_string_unresolved)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\x02\", \"\\\\x02\"),\n )\n if inline_response.new_tokens:\n if current_string:\n # assert end_string is None\n inline_blocks.append(\n TextMarkdownToken(\n current_string,\n starting_whitespace,\n end_whitespace=end_string,\n )\n )\n reset_current_string = True\n starting_whitespace = \"\"\n end_string = None\n\n inline_blocks.extend(inline_response.new_tokens)\n\n if reset_current_string:\n current_string = \"\"\n current_string_unresolved = \"\"\n\n (\n start_index,\n next_index,\n end_string,\n current_string,\n current_string_unresolved,\n ) = InlineProcessor.__complete_inline_loop(\n source_text,\n inline_response.new_index,\n end_string,\n whitespace_to_add,\n current_string,\n current_string_unresolved,\n inline_response.new_string_unresolved,\n inline_response.new_string,\n inline_response.original_string,\n )\n LOGGER.debug(\n \"<<current_string<<%s<<%s<<\",\n str(len(current_string)),\n current_string.replace(\"\\b\", \"\\\\b\")\n .replace(\"\\a\", \"\\\\a\")\n .replace(\"\\n\", \"\\\\n\"),\n )\n LOGGER.debug(\n \"<<current_string_unresolved<<%s<<%s<<\",\n str(len(current_string_unresolved)),\n current_string_unresolved.replace(\"\\b\", \"\\\\b\")\n .replace(\"\\a\", \"\\\\a\")\n .replace(\"\\n\", \"\\\\n\"),\n )\n\n LOGGER.debug(\"<<__complete_inline_block_processing<<\")\n return InlineProcessor.__complete_inline_block_processing(\n inline_blocks,\n source_text,\n start_index,\n current_string,\n end_string,\n starting_whitespace,\n is_setext,\n )",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def _parse(self, remaining_text, tree, frontier):\n\n # If the tree covers the text, and there's nothing left to\n # expand, then we've found a complete parse; return it.\n if len(remaining_text) == 0 and len(frontier) == 0:\n if self._trace:\n self._trace_succeed(tree, frontier)\n yield tree\n\n # If there's still text, but nothing left to expand, we failed.\n elif len(frontier) == 0:\n if self._trace:\n self._trace_backtrack(tree, frontier)\n\n # If the next element on the frontier is a tree, expand it.\n elif isinstance(tree[frontier[0]], Tree):\n yield from self._expand(remaining_text, tree, frontier)\n\n # If the next element on the frontier is a token, match it.\n else:\n yield from self._match(remaining_text, tree, frontier)",
"def fragment_fromstring(html, create_parent=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n accept_leading_text = bool(create_parent)\n\n elements = fragments_fromstring(\n html, guess_charset=guess_charset, parser=parser,\n no_leading_text=not accept_leading_text)\n\n if create_parent:\n if not isinstance(create_parent, _strings):\n create_parent = 'div'\n new_root = Element(create_parent)\n if elements:\n if isinstance(elements[0], _strings):\n new_root.text = elements[0]\n del elements[0]\n new_root.extend(elements)\n return new_root\n\n if not elements:\n raise etree.ParserError('No elements found')\n if len(elements) > 1:\n raise etree.ParserError('Multiple elements found')\n result = elements[0]\n if result.tail and result.tail.strip():\n raise etree.ParserError('Element followed by text: %r' % result.tail)\n result.tail = None\n return result",
"def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process blocks of markdown text and attach to given etree node. Given a list of ``blocks``, each blockprocessor is stepped through until there are no blocks left. While an extension could potentially call this method directly, it's generally expected to be used internally. This is a public method as an extension may need to add/alter additional BlockProcessors which call this method to recursively parse a nested block. | def parseBlocks(self, parent, blocks):
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
if processor.run(parent, blocks) is not False:
# run returns True or None
break | [
"def _parse_blocks(self, definition):\n block_matches = re.finditer(self.__class__.block_regex, definition)\n\n for match in block_matches:\n name = match.group('name')\n args = match.group('args')\n defs = match.group('defs')\n\n nodes = re.findall(self.__class__.node_regex, args)\n nodes = [Node(n) for n in nodes]\n pairs = re.findall(self.__class__.pair_regex, args)\n pairs = {p[0].strip(): p[1].strip() for p in\n [pair.split('=') for pair in pairs]}\n defs = defs.split('\\n')\n self.blocks[name] = Block(name, nodes, defs, mux=self.mux, **pairs)\n return re.sub(self.__class__.block_regex, '', definition)",
"def set_blocks(self, blocks):\n\n if self.types_of_block is not None:\n raise Exception(\"Setting blocks twice.\")\n self.types_of_block = tuple(blocks)\n\n return self",
"def parse(self, lines):\n self.reset()\n if type(lines) is str:\n lines = lines.split(\"\\n\")\n\n line_no = 0\n for line in lines:\n line_no += 1\n\n # Block begin?\n m, block_class = self.is_block_begin(line)\n if block_class:\n new_block = block_class(line_no, m.group(1))\n self.push_block(switch=self.add_element(new_block))\n continue\n # Block end?\n m = self.is_block_end(line)\n if m:\n self.pop_block(m.group(1))\n continue\n\n m = self.RE_EXEC.search(line)\n if m:\n element = exec_t(line_no, stmt=m.group(2), indent=m.end(1))\n else:\n element = line_t(line_no, line)\n\n # Regular line\n self.add_element(element)",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def create_block_parser(self) -> BlockParser:\n parser = BlockParser()\n for processor in self.get_block_processors():\n parser.add_processor(processor(parser))\n return parser",
"def print_tree_to_blocks(self, blocks):\n fl = []\n for bl in blocks:\n fl+= self.get_parents_list_ids(bl)\n if isinstance(bl, str):\n fl.append(bl)\n else:\n fl.append(bl.id)\n return self.print_tree(self.root_block, filter_list=fl)",
"def _parse_block(self,idx):\n block_tmp = self._block_list[idx]\n blocktype = self._paragraph_or_table[idx]\n paragraph_count = sum(self._paragraph_or_table[:idx+1])\n table_count = idx + 1 - paragraph_count\n df = DataFrame()\n # paragraph\n if blocktype==1:\n l_runText = [r.text for r in block_tmp.runs]\n l_runID = arange(len(l_runText))\n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['paragraph_ID'] = paragraph_count - 1 # 0-starting index \n # table\n if blocktype==0:\n row_count = 0\n for row in block_tmp.rows:\n cell_count = 0\n for cell in row.cells:\n cell_para_count = 0\n for p in cell.paragraphs:\n l_runText = [r.text for r in p.runs]\n l_runID = arange(len(l_runText)) \n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['table_ID'] = table_count - 1 # 0-starting index\n df['row_ID'] = row_count\n df['cell_ID'] = cell_count\n df['paragraph_ID'] = cell_para_count \n cell_para_count += 1\n cell_count += 1\n row_count += 1\n df['block_ID'] = idx\n self._block_dataframe_list[idx] = df",
"def blocks(content):\n\n for group in content.split(DIV):\n yield group",
"def test_nested_three_block_block_block():\n\n # Arrange\n source_markdown = \"\"\"> > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::]\",\n \"[block-quote(1,3)::]\",\n \"[block-quote(1,5)::> > > \\n> > > ]\",\n \"[para(1,7):\\n]\",\n \"[text(1,7):list\\nitem::\\n]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list\nitem</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def add_data(self, Blocks) :\n\n if not hasattr(Blocks, '__iter__') :\n self._add_single_block(Blocks)\n else :\n for Block in Blocks :\n self._add_single_block(Block)",
"async def parse_block(ctx, block):\n # some questions are inputted from macs and have weird apostrophes. Kill them, and empty newlines\n # also escape underscores so when shown as a question in discord, they do not format, and normalize iOS apostrophes\n rawlines = block.replace('´', '\\'').replace('\\n\\n', '\\n').replace('_', '\\\\_').replace('´', '\\'').split('\\n')\n lines = []\n for line in rawlines:\n if not line.lower().startswith('source:'):\n lines.append(line)\n print(lines)\n # check validity of input\n try:\n if len(lines) % 2:\n raise UserWarning('Ope, I didn\\'t get that. Try not to separate any questions from their answers')\n for i in range(len(lines)):\n if i % 2 and not lines[i].startswith('Answer: '):\n raise UserWarning('Answer did not start with \"Answer: \"\\n```' + lines[i] + '```')\n if (1 + i) % 2 and not lines[i].startswith('Question: '):\n raise UserWarning('Question did not start with \"Question: \"\\n```' + lines[i] + '```')\n except UserWarning as e:\n await ctx.send(e)\n return\n\n out = []\n while lines:\n out.append(parse_next(lines))\n\n await ctx.send(display(out))\n return out",
"def f_blocks(self, f_blocks):\n \n self._f_blocks = f_blocks",
"def _traverse_blocks_in_course(self, course, access_all_block_fields):\n all_blocks = []\n stack = [course]\n while stack:\n curr_block = stack.pop()\n all_blocks.append(curr_block)\n if curr_block.has_children:\n for block in reversed(curr_block.get_children()):\n stack.append(block)\n\n if access_all_block_fields:\n # Read the fields on each block in order to ensure each block and its definition is loaded.\n for xblock in all_blocks:\n for __, field in xblock.fields.items():\n if field.is_set_on(xblock):\n __ = field.read_from(xblock)",
"def insert_or_append_blocks(self, blocks):\n start = 0\n bulk_insert = self.bulk_insert\n blocks_len = len(blocks)\n select = 'SELECT ?,?,?,\"\",0'\n query = 'INSERT OR IGNORE INTO gauged_data (namespace, offset, ' \\\n '`key`, data, flags) '\n execute = self.cursor.execute\n while start < blocks_len:\n rows = blocks[start:start+bulk_insert]\n params = []\n for namespace, offset, key, _, _ in rows:\n params.extend((namespace, offset, key))\n insert = (select + ' UNION ') * (len(rows) - 1) + select\n execute(query + insert, params)\n start += bulk_insert\n for namespace, offset, key, data, flags in blocks:\n execute('UPDATE gauged_data SET data = CAST(data || ? AS BLOB),'\n 'flags = ? WHERE namespace = ? AND offset = ? AND '\n '`key` = ?', (data, flags, namespace, offset, key))",
"def clean_blocks(blocks):\r\n \r\n for block in blocks:\r\n block.empty()",
"def replace_transformer_layers(model,\n NewLayer,\n blocks=range(12),\n block_parts=['attention.self.query',\n 'attention.self.key',\n 'attention.self.value',\n 'attention.output.dense',\n 'intermediate.dense',\n 'output.dense'],\n *args, **kwargs\n *args, **kwargs):\n\n for transformer_layer_ind in tqdm(blocks):\n block = model.bert.encoder.layer[transformer_layer_ind]\n for layer in block_parts:\n recursive_setattr(block,\n layer,\n NewLayer(recursive_getattr(block, layer),\n *args, **kwargs))",
"def create_tree(markdown):\n global blocks, pos\n # parse markdown\n blocks = parse_markdown(markdown)\n if config.DEBUG_MODE:\n print('[DEBUG]: Parsed markdown')\n print(blocks)\n\n # create root node\n title = blocks[0].content.get_clean()\n root = Node(title)\n\n # recursively generate children\n pos = 1\n while pos < len(blocks):\n c = recurse()\n if c:\n root.add_child(c)\n \n\n # clean up tree\n root = root.retract()\n return root",
"def convert_lines_to_block(lines, block_map, link_stack, source_path, block_name=ALL_BLOCK_NAME):\n found_closing_block = False\n segments = []\n while lines:\n line = lines.pop(0)\n\n # Check if the line is a closing tag. This should only occur if we encounter the closing\n # block tag that matches the block_name parameter.\n close_tag_match = BLOCK_CLOSE_REGEX.match(line)\n if close_tag_match:\n if close_tag_match.group(1) != block_name:\n raise InvalidBlockName('Expected closing block ' + block_name + \\\n 'but found block named \"' + block_tag_match.group(1) + '\" in ' + source_path)\n # If the block name is valid, we are done processing this block.\n found_closing_block = True\n break\n\n # Otherwise, check if the line is a nested block.\n open_tag_match = BLOCK_OPEN_REGEX.match(line)\n if open_tag_match:\n # Make sure the block name is not the reserved ALL_BLOCK_NAME.\n inner_block_name = open_tag_match.group(1)\n if inner_block_name == ALL_BLOCK_NAME:\n raise InvalidBlockName(\n '\"{0}\" is a reserved block name, but found block named \"{0}\" in {1}'.format(\n ALL_BLOCK_NAME, source_path))\n\n # Recursively convert nested block contents to a Block.\n inner_block = convert_lines_to_block(\n lines,\n block_map,\n link_stack,\n source_path,\n inner_block_name)\n segments.append(inner_block)\n continue\n\n # Otherwise, check if the line is a variable. The line should be omitted.\n variable_match = VARIABLE_REGEX.match(line)\n if variable_match:\n process_variable(variable_match, block_map)\n continue\n\n # Otherwise, check if the line is an include tag.\n include_match = INCLUDE_REGEX.match(line)\n if include_match:\n included_content = process_links(include_match, block_map, link_stack, source_path)\n # Omit empty content.\n if included_content != '':\n append_text_to_segments(segments, included_content)\n else:\n append_text_to_segments(segments, line)\n\n if block_name != ALL_BLOCK_NAME and not found_closing_block:\n raise InvalidBlockName(\n 'Expected closing block called \"{0}\" in {1}'.format(block_name, source_path))\n\n block = Block(source_path, block_name, segments)\n block_map.add_block(block)\n return block",
"def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])",
"def replace_blocks(self, blocks: Iterable[QuantumCircuit]) -> \"ControlFlowOp\":\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the default block parser used by Markdown. | def build_block_parser(md_instance, **kwargs):
parser = BlockParser(md_instance)
parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)
parser.blockprocessors['indent'] = ListIndentProcessor(parser)
parser.blockprocessors['code'] = CodeBlockProcessor(parser)
parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)
parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)
parser.blockprocessors['hr'] = HRProcessor(parser)
parser.blockprocessors['olist'] = OListProcessor(parser)
parser.blockprocessors['ulist'] = UListProcessor(parser)
parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)
parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)
return parser | [
"def create_block_parser(self) -> BlockParser:\n parser = BlockParser()\n for processor in self.get_block_processors():\n parser.add_processor(processor(parser))\n return parser",
"def get_parser():\n parser = (\n MarkdownIt(\"commonmark\")\n .enable(\"table\")\n .use(front_matter_plugin)\n .use(myst_block_plugin)\n .use(myst_role_plugin)\n # we only need to parse block level components (for efficiency)\n .disable(\"inline\", True)\n )\n return parser",
"def _parse_blocks(self, definition):\n block_matches = re.finditer(self.__class__.block_regex, definition)\n\n for match in block_matches:\n name = match.group('name')\n args = match.group('args')\n defs = match.group('defs')\n\n nodes = re.findall(self.__class__.node_regex, args)\n nodes = [Node(n) for n in nodes]\n pairs = re.findall(self.__class__.pair_regex, args)\n pairs = {p[0].strip(): p[1].strip() for p in\n [pair.split('=') for pair in pairs]}\n defs = defs.split('\\n')\n self.blocks[name] = Block(name, nodes, defs, mux=self.mux, **pairs)\n return re.sub(self.__class__.block_regex, '', definition)",
"def default_parser(self, desc, node=None, parent=None, attr_index=None,\n rawdata=None, root_offset=0, offset=0, **kwargs):\n if parent is not None and attr_index is not None:\n if not self.is_block:\n # non-Block node\n parent[attr_index] = desc.get(DEFAULT, self.default())\n elif isinstance(None, self.data_cls):\n # Block node_cls without a 'data_cls'\n parent[attr_index] = desc.get(NODE_CLS, self.node_cls)(desc)\n else:\n # Block node_cls with a 'data_cls'\n # the node is likely either an EnumBlock or BoolBlock\n parent[attr_index] = self.node_cls(desc, init_attrs=True)\n\n return offset",
"def test_basic_codeblock_default_theme(app: Sphinx) -> None:\n app.build()\n tree = parse_html(Path(app.outdir) / \"index.html\")\n\n # blocks without caption\n default_blocks = tree(\"div\", class_=\"highlight-default\")\n assert len(default_blocks) == 2\n assert str(default_blocks[0]).replace(\"\\n\", \"\") == (\n '<div class=\"highlight-default notranslate\">'\n '<div class=\"highlight\"><pre><span></span>'\n '<span class=\"nb\">print</span><span class=\"p\">'\n '(</span><span class=\"s2\">\"Hello\"</span>'\n '<span class=\"p\">)</span></pre></div></div>'\n )\n # code block with explicit label\n assert str(default_blocks[1]).replace(\"\\n\", \"\") == (\n '<div class=\"highlight-default notranslate\" id=\"foo\">'\n '<div class=\"highlight\"><pre><span></span>'\n '<span class=\"nb\">print</span><span class=\"p\">'\n '(</span><span class=\"s2\">\"Hello\"</span>'\n '<span class=\"p\">)</span></pre></div></div>'\n )",
"def create_tree(markdown):\n global blocks, pos\n # parse markdown\n blocks = parse_markdown(markdown)\n if config.DEBUG_MODE:\n print('[DEBUG]: Parsed markdown')\n print(blocks)\n\n # create root node\n title = blocks[0].content.get_clean()\n root = Node(title)\n\n # recursively generate children\n pos = 1\n while pos < len(blocks):\n c = recurse()\n if c:\n root.add_child(c)\n \n\n # clean up tree\n root = root.retract()\n return root",
"def __parser(self, text):\n buffer = self.view.buffer\n \n # Snippet components\n fields = Tree()\n mirrors = []\n stop = None\n \n root_init = fields.add(None, None) #empty root\n root = root_init\n \n # Cursor\n insert = buffer.get_insert()\n insert_iter = buffer.get_iter_at_mark(insert)\n \n # Indentation stuff\n pref_manager = tf.app.preferences_manager\n spaces = pref_manager.get_value(\"indentation/use_spaces\")\n tab_width = self.view.get_tab_width()\n indent = self.document.get_indentation(insert_iter)\n \n # Create a mark at start of snippet\n begin_bound_mark = buffer.create_mark(None, insert_iter, True)\n \n # Parsing text\n i = 0\n stack = []\n while (i<len(text)):\n char = text[i]\n \n # Verifying escape char \"\\\"\n if char == \"\\\\\":\n self.view.buffer.insert_at_cursor(text[i+1])\n i += 2\n continue \n \n # Look for a snippet special component \"${}\"\n if char == '$' and (i+1) < len(text) and text[i+1] == '{':\n \n if text[i+2] == '0':\n # STOP\n stop_iter = buffer.get_iter_at_mark(buffer.get_insert())\n stop = buffer.create_mark(None, stop_iter, True)\n\n i += 3\n elif text[i+2] == \"%\":\n # MIRROR\n mirror_iter = buffer.get_iter_at_mark(buffer.get_insert())\n begin_mark = buffer.create_mark(None, mirror_iter, True)\n end_mark = buffer.create_mark(None, mirror_iter, True)\n \n #begin_mark.set_visible(True)\n \n # Get mirror number\n j = i+3\n num = []\n\n while char != '}' and char != '/':\n char = text[j]\n num.append(char)\n j += 1\n\n mirror_num = int(\"\".join(num[:-1]))\n i = j-1\n \n if char == '/':\n k = i\n brace_count = 1\n \n while True:\n \n if text[k] == '{':\n brace_count += 1\n elif text[k] == '}':\n brace_count -= 1\n \n if brace_count == 0:\n break\n \n k += 1\n \n regexp = text[i+1:k].split('/')\n i = k\n \n m = SnippetMirror(self.view, mirror_num, \n (begin_mark, end_mark))\n \n m.regexp = (regexp[0], regexp[1])\n \n else:\n m = SnippetMirror(self.view, mirror_num, \n (begin_mark, end_mark))\n mirrors.append(m)\n else:\n # FIELD\n j = i+2\n num = []\n \n char = text[j]\n while char != ':':\n num.append(char)\n j+=1\n char = text[j]\n\n num = int(\"\".join(num))\n \n field_iter = buffer.get_iter_at_mark(buffer.get_insert())\n begin_mark = buffer.create_mark(None, field_iter, True)\n #begin_mark.set_visible(True)\n \n f = SnippetField(self.view, num, (begin_mark,))\n \n root = fields.add(f, root)\n stack.append(root)\n\n i = j\n \n elif char == '}':\n if len(stack) > 0:\n node = stack.pop()\n \n if len(stack) == 0:\n root = root_init\n \n bm = node.elem.marks[0]\n end_iter = buffer.get_iter_at_mark(buffer.get_insert())\n em = buffer.create_mark(None, end_iter, True)\n #em.set_visible(True)\n node.elem.marks = (bm, em)\n\n elif len(stack) == 0:\n root = root_init\n self.view.buffer.insert_at_cursor(char)\n else:\n root = stack[-1]\n\n elif char == '\\t':\n if spaces:\n self.view.buffer.insert_at_cursor(\" \" * tab_width)\n else:\n self.view.buffer.insert_at_cursor(char)\n elif char == '\\n':\n # LINE BREAK\n buffer.insert_at_cursor(\"\\n\")\n buffer.insert_at_cursor(indent)\n else:\n self.view.buffer.insert_at_cursor(char)\n \n i+=1\n \n #Not well-formed snippet\n if len(stack) > 0:\n fields.pre_order(self.__disconnect_field_signal)\n return\n \n # Change stop gravity\n if stop != None:\n stop_iter = buffer.get_iter_at_mark(stop)\n buffer.delete_mark(stop)\n stop = buffer.create_mark(None, stop_iter, False)\n #stop.set_visible(True)\n \n # Change mirrors gravity\n for i in range(len(mirrors)):\n m = mirrors[i].marks[1]\n n = mirrors[i].marks[0]\n m_iter = buffer.get_iter_at_mark(m)\n buffer.delete_mark(m)\n new_m = buffer.create_mark(None, m_iter, False)\n #new_m.set_visible(True)\n mirrors[i].marks = (n, new_m)\n \n # Change fields gravity\n fields.pre_order(self.__fields_change_gravity)\n \n # Change begin bound gravity\n m = begin_bound_mark\n m_iter = buffer.get_iter_at_mark(m)\n buffer.delete_mark(m)\n begin_bound_mark = buffer.create_mark(None, m_iter, False)\n #begin_bound_mark.set_visible(True)\n \n # Create end bound mark\n insert_iter = buffer.get_iter_at_mark(insert)\n end_bound_mark = buffer.create_mark(None, insert_iter, False)\n #end_bound_mark.set_visible(True)\n \n# print \"root: \", fields.root\n# print \"root's children: \", fields.root.children\n \n bounds = (begin_bound_mark, end_bound_mark)\n snippet = Snippet(self.document, fields, mirrors, stop, bounds)\n self.push_snippet(snippet)\n \n if len(snippet.fields.root.children) > 0:\n buffer.place_cursor(buffer.get_iter_at_mark(begin_bound_mark))\n self.next_field()\n else:\n self.pop_snippet()",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def create_docstructure_parser():\n # NOTE. This is just the default parser, no options needed\n # TODO. It is also a stub, awaiting the full docstructure parser redesign,\n # where the parser creates tags similar to other components and where the\n # elements variable is gone.\n return DocumentStructureParser()",
"def blockdiagram(self, name='main'):\n \n # instantiate a new blockdiagram\n bd = BlockDiagram(name=name)\n\n def new_method(cls, bd):\n\n # return a wrapper for the block constructor that automatically\n # add the block to the diagram's blocklist\n def block_init_wrapper(self, *args, **kwargs):\n\n block = cls(*args, bd=bd, **kwargs) # call __init__ on the block\n return block\n \n # return a function that invokes the class constructor\n f = block_init_wrapper\n\n # move the __init__ docstring to the class to allow BLOCK.__doc__\n f.__doc__ = cls.__init__.__doc__ \n\n return f\n \n # bind the block constructors as new methods on this instance\n self.blockdict = {}\n for block in self.blocklibrary:\n # create a function to invoke the block's constructor\n f = new_method(block.cls, bd)\n \n # set a bound version of this function as an attribute of the instance\n # method = types.MethodType(new_method, bd)\n # setattr(bd, block.name, method)\n setattr(bd, block.name, f.__get__(self))\n \n # broken, should be by folder\n # blocktype = block.cls.__module__.split('.')[1]\n # if blocktype in self.blockdict:\n # self.blockdict[blocktype].append(block.name)\n # else:\n # self.blockdict[blocktype] = [block.name]\n\n # add a clone of the options\n bd.options = copy.copy(self.options)\n\n return bd",
"def chunk_parse(self, grammar, no_blanks=True, incomplete='record', **kwargs):\n from nltk import chunk\n from nltk.parse import Tree\n\n cp = chunk.RegexpParser(grammar)\n db = self.parse(**kwargs)\n tb_etree = Element('toolbox_data')\n header = db.find('header')\n tb_etree.append(header)\n for record in db.findall('record'):\n parsed = cp.parse([(elem.text, elem.tag) for elem in record])\n top = parsed[0]\n if not isinstance(top, Tree) or len(parsed) != 1:\n # didn't get a full parse\n parsed.node = incomplete\n top = parsed\n tb_etree.append(self._tree2etree(top, no_blanks))\n return tb_etree",
"def test_captioned_codeblocks_with_default_theme(app: Sphinx) -> None:\n app.build()\n tree = parse_html(Path(app.outdir) / \"index.html\")\n default_blocks = tree(\"div\", class_=\"literal-block-wrapper\")\n\n assert len(default_blocks) == 2\n assert default_blocks[0][\"id\"] == \"id1\"\n assert default_blocks[1][\"id\"] == \"id2\"\n\n children = [c for c in default_blocks[0].children if c.strip is None]\n\n # the first block does not have an explicit label\n assert len(children) == 2\n # first the caption div ...\n assert str(children[0]) == (\n '<div class=\"code-block-caption\">'\n '<span class=\"caption-text\">test</span>'\n '<a class=\"headerlink\" href=\"#id1\" title=\"Permalink to this code\">¶</a></div>'\n )\n # ... then the actual code block\n assert str(children[1]).replace(\"\\n\", \"\") == (\n '<div class=\"highlight-python notranslate\">'\n '<div class=\"highlight\"><pre><span></span>'\n '<span class=\"nb\">print</span><span class=\"p\">'\n '(</span><span class=\"s2\">\"Hello\"</span><span class=\"p\">)'\n \"</span></pre></div></div>\"\n )\n\n children = [c for c in default_blocks[1].children if c.strip is None]\n\n # the second block has an explicit label\n assert len(children) == 3\n assert str(children[0]) == '<span id=\"bar\"></span>'\n assert str(children[1]) == (\n '<div class=\"code-block-caption\">'\n '<span class=\"caption-text\">test</span>'\n '<a class=\"headerlink\" href=\"#id2\" title=\"Permalink to this code\">¶</a></div>'\n )\n assert str(children[2]).replace(\"\\n\", \"\") == (\n '<div class=\"highlight-python notranslate\">'\n '<div class=\"highlight\"><pre><span></span>'\n '<span class=\"nb\">print</span><span class=\"p\">'\n '(</span><span class=\"s2\">\"Hello\"</span><span class=\"p\">)'\n \"</span></pre></div></div>\"\n )",
"def test_nested_three_block_max_block_max_block_max_empty():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >\\n > > > ]\",\n \"[BLANK(1,15):]\",\n \"[para(2,16):]\",\n \"[text(2,16):list:]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def test_nested_three_block_max_block_max_block_max_empty_no_bq2():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > \\n > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >]\",\n \"[BLANK(1,15):]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[icode-block(2,10): :]\",\n \"[text(2,10):\\a>\\a>\\a list: ]\",\n \"[end-icode-block:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n</blockquote>\n</blockquote>\n<pre><code> > list\n</code></pre>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def generate_basic_blocks(self, invoke_ends=False):\n enders = ['return', 'goto', 'throw', 'if-', 'packed-switch', 'sparse-switch']\n if invoke_ends:\n enders.append('invoke-')\n starters = [':', '.catch']\n entry = 0\n splits = self.instructions.splitlines()\n prev = None\n for key, instr in enumerate(splits):\n i = instr.strip()\n for e in enders:\n if i.startswith(e):\n bb = BasicBlock.new_block(self, splits[entry:key + 1], prev)\n if bb:\n self.basic_blocks.append(bb)\n entry = key + 1\n if prev:\n prev.next = bb\n prev = bb\n for s in starters:\n if i.startswith(s):\n if key != entry:\n if entry < key:\n bb = BasicBlock.new_block(self, splits[entry:key], prev)\n if bb:\n self.basic_blocks.append(bb)\n entry = key\n if prev:\n prev.next = bb\n prev = bb\n if entry != len(splits):\n bb = BasicBlock(self, splits[entry:], prev)\n if prev:\n prev.next = bb\n self.basic_blocks.append(bb)\n\n self.basic_blocks[-1].last = True\n self.build_cfg()",
"def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree",
"def code_block(name, arguments, options, content, lineno,\r\n content_offset, block_text, state, state_machine):\r\n language = arguments[0]\r\n highlighter = get_highlighter(language)\r\n if highlighter is None:\r\n error = state_machine.reporter.error(\r\n 'The \"%s\" directive does not support language \"%s\".' % (name, language),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n\r\n if not content:\r\n error = state_machine.reporter.error(\r\n 'The \"%s\" block is empty; content required.' % (name),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n return [error]\r\n\r\n include_text = highlighter(\"\\n\".join(content))\r\n html = '<div class=\"syntax %s\">\\n%s\\n</div>\\n' % (language, include_text)\r\n raw = nodes.raw('',html, format='html')\r\n return [raw]",
"async def parse_block(ctx, block):\n # some questions are inputted from macs and have weird apostrophes. Kill them, and empty newlines\n # also escape underscores so when shown as a question in discord, they do not format, and normalize iOS apostrophes\n rawlines = block.replace('´', '\\'').replace('\\n\\n', '\\n').replace('_', '\\\\_').replace('´', '\\'').split('\\n')\n lines = []\n for line in rawlines:\n if not line.lower().startswith('source:'):\n lines.append(line)\n print(lines)\n # check validity of input\n try:\n if len(lines) % 2:\n raise UserWarning('Ope, I didn\\'t get that. Try not to separate any questions from their answers')\n for i in range(len(lines)):\n if i % 2 and not lines[i].startswith('Answer: '):\n raise UserWarning('Answer did not start with \"Answer: \"\\n```' + lines[i] + '```')\n if (1 + i) % 2 and not lines[i].startswith('Question: '):\n raise UserWarning('Question did not start with \"Question: \"\\n```' + lines[i] + '```')\n except UserWarning as e:\n await ctx.send(e)\n return\n\n out = []\n while lines:\n out.append(parse_next(lines))\n\n await ctx.send(display(out))\n return out",
"def build_parser(self, parser: ArgumentParser):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a tab from the front of each line of the given text. | def detab(self, text):
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*self.tab_length):
newtext.append(line[self.tab_length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):]) | [
"def filter_spaces_tabs(text):\n\n return re.sub(\" |\\t\", \"\", text)",
"def remove_next_tab_chars(docs):\n return list(map(lambda s: s.replace('\\n', ' ').replace('\\t', ' '), docs))",
"def remove_indent(self) -> None:\n w = abs(self.tab_width)\n if self.result:\n s = self.result[-1]\n if s.isspace():\n self.result.pop()\n s = s.replace('\\t', ' ' * w)\n if s.startswith('\\n'):\n s2 = s[1:]\n self.result.append('\\n' + s2[: -w])\n else:\n self.result.append(s[: -w])",
"def unindent_text(text, pad):\r\n\tlines = text.splitlines()\r\n\t\r\n\tfor i,line in enumerate(lines):\r\n\t\tif line.startswith(pad):\r\n\t\t\tlines[i] = line[len(pad):]\r\n\t\r\n\treturn '\\n'.join(lines)",
"def remove_extra_lines_and_tabs():\n\n\tfor filename in os.listdir(DATA_DIR):\n\n\t\tclean_lines = []\n\t\twith open(os.path.join(DATA_DIR, filename), \"r\") as f:\n\t\t lines = f.readlines()\n\t\t clean_lines = [l.strip() for l in lines if l.strip()]\n\n\t\twith open(os.path.join(DATA_DIR, filename), \"w\") as f:\n\t\t f.writelines('\\n'.join(clean_lines))",
"def rm_first_line(text):\n return '\\n'.join(text.split('\\n')[1:])",
"def remove_multiple_line_spaces(text):\n return \"\\n\".join([line.strip() for line in text.splitlines() if line.strip() != \"\"])",
"def delete_spaces(text):\n modified_text = ''\n for i in text:\n if i != ' ' and i != '/n':\n modified_text += i\n return modified_text",
"def remove_formatting(self, event):\n self.current_tab.text.delete('1.0', 'end-1c')\n self.current_tab.text.insert(tk.END, self.current_tab.raw)\n self.current_tab.highlighted_text_list = {}\n # Erase saved text list.",
"def delete_till_beginning_of_line(text):\n if text.rfind(\"\\n\") == -1:\n return ''\n return text[0:text.rfind(\"\\n\") + 1]",
"def dedent(text, tabsize=8, skip_first_line=False):\n lines = text.splitlines(1)\n _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)\n return ''.join(lines)",
"def test_preserve_trailing_spaces_on_lines(self):\n tabfile = TabFile('test',self.fp)\n self.assertEqual(tabfile[0][4],\"A comment\")\n self.assertEqual(tabfile[1][4],\"Comment with a trailing space \")\n self.assertEqual(tabfile[2][4],\".\")",
"def remove(f, l, text):\n raw_lines = text.split(\"\\n\")\n lines = []\n # remove all unwanted empty lines.\n for line in raw_lines:\n if line == \"\" or line.isspace():\n if len(lines) == 0 or lines[-1][-1] == \"\\n\": # if there is already one linebreak, do nothing\n continue\n else:\n lines[-1] += \"\\n\"\n else:\n lines.append(line.strip() + \" \")\n # check for validity of parameters\n if len(lines) < (f+l+1):\n raise ValueError(f\"Text in file {file} shorter than f+l\")\n if f < 0 or l < 0:\n raise ValueError(\"Line parameters must be positive\")\n\n # create the new text bits\n first_lines = (\"\".join(lines[:f])).strip()\n new_text = (\"\".join(lines[f:len(lines)-l])).strip()\n last_lines = (\"\".join(lines[len(lines)-l:])).strip()\n return html.unescape(new_text), first_lines, last_lines",
"def fix_empty_line(source, tokens):\n nb = 0\n for char in reversed(source):\n if char in (\" \", \"\\t\"):\n nb += 1\n else:\n break\n tokens[-1].string = source[-nb:]",
"def text_cleanup(text: str) -> str:\n text.replace('\\n', '')\n return re.sub(r'\\s{2,}', ' ', text)",
"def remove_lines(text, line_count):\n\n text_lines = text.split(\"\\n\")\n\n if len(text_lines) > 0:\n if line_count > 0:\n for index in range(0, line_count):\n text_lines.remove(text_lines[index])\n elif line_count < 0:\n text_lines_r = text_lines\n text_lines_r.reverse()\n\n # print \"baa... \" + str(text_lines) + \"...aab\"\n # print \"baaR... \" + str(text_lines_r) + \"...Raab\"\n\n for index in range(0, line_count):\n text_lines_r.remove(text_lines_r[index])\n\n text_lines = text_lines_r\n text_lines.reverse()\n else:\n pass\n\n updated_text = StringUtils.join_list_elements(text_lines, \"\\n\")\n\n return updated_text",
"def clean_text(txt):\n txt = re.sub(r' +', ' ', txt)\n txt = re.sub(r'\\n\\s*\\n', '\\n\\n', txt)\n return txt",
"def __convert_tabs_to_spaces(self, spaces, tabs, line):\n\n line = line.replace('\\t', ' ')\n spaces += tabs * 4\n return spaces, line",
"def remove_new_line(self, text):\n return text.replace('\\n', ' ')",
"def tab_space(code_edit):\n code_edit.insertPlainText(' ')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new li and parse the block with it as the parent. | def create_item(self, parent, block):
li = util.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block]) | [
"def create_item(parent, block):\r\n li = markdown.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])",
"def ul(self, elem, theme, width):\n block, indent = [], ansi.length(theme.margin.head)\n\n for child in self.children(elem, {u('li')}):\n block += [u('')] + add_margin(self.call(child, width - indent),\n theme.margin.head,\n theme.margin.tail(indent))\n\n return block",
"def ol(self, elem, theme, width):\n block, children = [], self.children(elem, {u('li')})\n\n margins = [theme.margin.head(t + 1) for t in range(len(children))]\n indent = maximum_of(margins, key=ansi.length) # for long lists\n\n for t, child in enumerate(children):\n block += [''] + add_margin(self.call(child, width - indent),\n theme.margin.head(t + 1),\n theme.margin.tail(indent))\n\n return block",
"def _add_list(self, parent_tag, descriptor):\n try: # Try numbers descriptor\n list_items = descriptor['numbers']\n list_tag = self._data.new_tag('ol')\n except KeyError:\n try: # Try bullets descriptor\n list_items = descriptor['bullets']\n list_tag = self._data.new_tag('ul')\n except KeyError:\n raise exceptions.UnknownTransform(descriptor, ['numbers', 'bullets'])\n\n for item in list_items:\n item_tag = self._data.new_tag('li')\n self._set_content(item_tag, item)\n list_tag.append(item_tag)\n parent_tag.append(list_tag)",
"def __handle_new_list_item_token(cls, output_html, next_token, transform_state):\n _ = next_token\n\n transform_state.add_trailing_text, transform_state.add_leading_text = (\n \"</li>\",\n \"<li>\",\n )\n token_parts = [output_html]\n if output_html and output_html[-1] == \">\":\n token_parts.append(ParserHelper.newline_character)\n return \"\".join(token_parts)",
"def as_li(self, ar):\n if bleach is None:\n chunks = [self.short_text]\n else:\n chunks = [bleach.clean(\n self.short_text, tags=self.ALLOWED_TAGS, strip=True)]\n\n by = _(\"{0} by {1}\").format(\n naturaltime(self.created), unicode(self.user)),\n chunks += [\n \" (\", E.tostring(ar.obj2html(self, by)), \")\"\n ]\n if self.more_text:\n chunks.append(\" (...)\")\n\n html = ''.join(chunks)\n return \"<li>\" + html + \"</li>\"",
"def create_simple_list_node( values, parent_name, child_name, attribute_name ):\n\n parent_node = etree.Element( parent_name )\n\n for value in values:\n node = etree.Element( child_name )\n node.attrib[attribute_name] = str( value )\n\n parent_node.append( node )\n\n return parent_node",
"def add_list(self, style=None):\n return ListParagraph(\n self._parent,\n numId=self._parent.generate_numId(),\n style=style if style is not None else self.style,\n level=self.level+1,\n )",
"def handle_starttag(self, tag, attrs):\n\n # flush any data accumulated\n if tag in ('ol', 'ul'):\n self.push_data(False)\n else:\n self.push_data(True)\n\n # retain the hierarchy of nested command, which may be needed\n self.curr_tag.append(tag)\n self.curr_attrs.append(attrs)\n\n if tag == \"div\":\n if self.prev_tag in (\"ol\", \"ul\"):\n self.data_pre.append(MarkdownParser.MARKDOWN_NEWSECTION)\n\n if tag == \"strong\":\n self.data_pre.append(self.bold)\n self.data_post.insert(0, self.bold)\n\n elif tag == \"em\":\n self.data_pre.append(self.italic)\n self.data_post.insert(0, self.italic)\n\n elif tag == \"s\":\n self.data_pre.append(self.strikeout)\n self.data_post.insert(0, self.strikeout)\n\n elif tag == \"u\":\n self.data_pre.append(self.underscore)\n self.data_post.insert(0, self.underscore)\n\n elif tag == \"ol\":\n self.curr_list.append(self.list_number) # number to be incremented with every <li>\n\n elif tag == \"ul\":\n if self.list_bullets:\n self.curr_list.append(self.list_bullets.pop(0)) # get the symbol to use\n else:\n # default list marker\n self.curr_list.append(MarkdownParser.DEFAULT_LIST)\n\n elif tag == \"li\":\n # add proper # of spaces\n self.data_pre.append(MarkdownParser.MARKDOWN_NEWLINE)\n self.data_pre.append((\" \" * self.indent) * len(self.curr_list))\n\n if not isinstance(self.curr_list[-1], int):\n self.data_pre.append('{} '.format(self.curr_list[-1]))\n else:\n num = self.curr_list.pop()\n num = num+1\n self.curr_list.append(num)\n self.data_pre.append(\"{}. \".format(num))\n\n elif tag == \"a\":\n href = self.get_attr(attrs, 'href')\n self.data_pre.extend([\"[{}]\".format(href), '('])\n self.data_post.insert(0, \")\")\n\n elif tag in ('h', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'):\n if tag == 'h':\n tag = 'h1'\n # get the number of header and use as index into the header array\n idx = int(tag[-1:])-1\n if idx < len(self.headers):\n self.data_pre.append(\"{} \".format(self.headers[idx]))\n\n self.data_post.append(MarkdownParser.MARKDOWN_NEWLINE)\n\n elif tag == \"blockquote\":\n self.data_pre.append(self.blockquote)\n self.data_post.insert(0, self.blockquote)\n\n elif tag not in MarkdownParser.SUPPORTED_TAGS:\n self.log.warning(\"Unknown html tag: {}\".format(tag))\n self.data_post.insert(0, MarkdownParser.MARKDOWN_NEWLINE)\n\n # determine if styling is needed\n style = self.get_attr(attrs, 'style')\n if style:\n rgb = self.get_style_attr(style, 'color')\n if rgb:\n rgb_hex = self.convert_rgb(self.get_rgb(rgb))\n self.data_pre.append(\"{{color:{0}}}\".format(rgb_hex))\n self.data_post.insert(0, \"{color}\")\n\n # format monospace data blocks\n font_family = self.get_style_attr(style, 'font-family')\n if font_family and font_family == \"monospace\":\n if isinstance(self.monospace, list):\n self.data_pre.append(self.monospace[0])\n self.data_post.insert(0, self.monospace[1])\n else:\n self.data_pre.append(self.monospace)\n self.data_post.insert(0, self.monospace)",
"def __handle_start_html_block_token(cls, output_html, next_token, transform_state):\n _ = next_token\n\n transform_state.is_in_html_block = True\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n else:\n previous_token = transform_state.actual_tokens[\n transform_state.actual_token_index - 1\n ]\n POGGER.debug(\">previous_token>$>\", previous_token)\n token_parts.append(output_html)\n if previous_token.is_list_end:\n token_parts.append(ParserHelper.newline_character)\n elif previous_token.is_paragraph_end:\n if not transform_state.is_in_loose_list:\n token_parts.append(ParserHelper.newline_character)\n return \"\".join(token_parts)",
"def __init__(self, parent=None):\r\n super(SequenceListingTree, self).__init__(parent)",
"def createListNodeFromListInput(self, input):\n if input ==None:\n return None\n l1 = cur = ListNode(input[0])\n for i in range(1, len(input)):\n newListNode = ListNode(input[i])\n cur.next = newListNode\n cur = cur.next\n return l1",
"def __init__(self, menu_list, attr, pos, body):\n \n content = [urwid.AttrWrap(SelText(\" \" + w), None, attr[1])\n for w in menu_list]\n\n #Calculate width and height of the menu widget:\n height = len(menu_list)\n width = 0\n for entry in menu_list:\n if len(entry) > width:\n width = len(entry)\n\n #Create the ListBox widget and put it on top of body:\n self._listbox = urwid.AttrWrap(urwid.ListBox(content), attr[0])\n overlay = urwid.Overlay(self._listbox, body, ('fixed left', pos[0]),\n width + 2, ('fixed top', pos[1]), height)\n\n urwid.WidgetWrap.__init__(self, overlay)",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def create_fake_parent(el: bs4.Tag) -> _FakeParent:\n\n return _FakeParent(el)",
"def visit_list_item(self, node):\n if len(node.children)==0 or node.children[0].tagname not in \\\n ['paragraph', 'compact_paragraph']:\n self.extend_node_attrs(node, bias=0)\n\n self.visit_list_item_original(self, node)\n\n # For compound list items (e.g. bullet point with two paragraphs):\n # the second paragraph should be recorded as a paragraph, not as\n # a `list_item`\n node._ucomment_num_nodes = 0",
"def create_block_parser(self) -> BlockParser:\n parser = BlockParser()\n for processor in self.get_block_processors():\n parser.add_processor(processor(parser))\n return parser",
"def render_list_item(item: DatedNode) -> str:\n logging.debug(item.node[\"content\"])\n html: str = \"\"\n html += \"<li>\"\n html += item.node[\"content\"]\n html += \" <a href='\" + item.link + \"'>Link</a>\"\n if item.node[\"note\"]:\n html += \"<br/>\"\n html += \"<small>\" + item.node[\"note\"] + \"</small>\"\n html += \"</li>\"\n return html",
"def create_ll_with_loop(ll):\n index = 4\n for i in range(0, 10):\n insert_node_at(ll.head, i, 0)\n\n cur_node = ll.head\n while cur_node.next is not None:\n cur_node = cur_node.next\n\n loop_node = ll.get_node_at_index(index)\n cur_node.next = loop_node",
"def parse_list_chunk(chunk):\n\n indent_amount = 4 # number of spaces to indicate a level of indentation\n outlines = []\n last_indent = 0\n for line in chunk:\n sline = line.strip()\n if sline.startswith('- '):\n list_type = 'ul'\n elif re.match(r'^[\\d]\\. ', sline): # match digit, dot, space\n list_type = 'ol'\n else:\n list_type = None\n if list_type:\n indent_spaces = len(line) - len(line.lstrip())\n indent = indent_spaces / indent_amount\n assert not indent_spaces % indent_amount, \"Bad indent amount\"\n assert indent <= last_indent + 1, \"Indented by more than one level\"\n last_indent = indent\n if list_type == 'ul':\n line_without_markup = sline[2:]\n else:\n line_without_markup = sline[3:]\n outlines.append((list_type, indent, line_without_markup))\n else:\n # if not an li, is just part of existing li on new line\n last_tuple = outlines[-1]\n outlines[-1] = (last_tuple[0], last_tuple[1], last_tuple[2] + line)\n return outlines"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get level of indent based on list level. | def get_level(self, parent, block):
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent | [
"def get_level(list_, level):\n level = deepcopy(level)\n if len(level) > 0:\n i = level.pop(0)\n return get_level(list_[i], level)\n else:\n return list_",
"def get_indent(node, level=0):\n if node.parent:\n level += 1\n return get_indent(node.parent, level)\n return level",
"def indent_level(self):\n return self.container['indent_level']",
"def get_level():\n return LEVEL",
"def get_level(self):\r\n return self.__level",
"def levelsOflist(lst):\n level = 1\n for n in lst:\n if n and isinstance(n, (list, tuple)):\n level += levelsOflist(n)\n return level\n return 0",
"def _depth(self, l):\n if isinstance(l, list) and len(l) > 0:\n return 1 + max(self._depth(item) for item in l)\n else:\n return 0",
"def get_level(self, node: Node):\n if node == self.root_node:\n return 1\n\n return 1 + self.get_level(node.parent)",
"def get_level(self, level_num=None):\n if level_num is None:\n level_num = self.current_level\n return self.levels_config['Level %s' % level_num]",
"def findLevel(self, level: 'SoNode') -> \"int\":\n return _coin.SoVRMLLOD_findLevel(self, level)",
"def get_depth(self):\n return self.level_type[1]",
"def get_increment_level(count, default='warning'):\n idx = next((idx for idx, sublist in enumerate(_levels) if\n default in sublist), None)\n return _levels[min(idx + count, len(_levels) - 1)][0].upper()",
"def _indent(self, level: int) -> Text:\n\n return self.indent * level",
"def incIndent():\n if _rootLogger != None:\n curLevel = _rootLogger._logIndentLevel\n _rootLogger.incIndent()\n return curLevel\n else:\n return 0",
"def mario_number(level):\n llist = list(str(level))",
"def get_level(levelName):\n return get_user_levels()[levelName]",
"def get_next_lvl(self):\n next_lvl = self.actual_level + 1\n if next_lvl == (self.max_level + 1):\n next_lvl = 1\n else:\n pass\n return next_lvl",
"def level(self, item, min_level=0, max_level=None, return_index=True):\n n = len(self.dimensions)\n if max_level is not None:\n n = min([max_level + 1, n])\n for lev in range(min_level, n):\n if item in self._labs[lev]:\n if return_index:\n return lev, self._index[self._keys[lev]][item]\n else:\n return lev\n else:\n print(f'\"{item}\" not found')\n return None",
"def getLevelName(lvl):\n return _levelNames.get(lvl, \"Level {}\".format(lvl))",
"def get_level(code):\n course = json_to_dictionary(code)[0]\n return course[\"course\"][\"studyLevelName\"]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove ``>`` from beginning of a line. | def clean(self, line):
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line | [
"def _remove_leading_chars(self, line):\n return line[len(self.leading_chars):]",
"def rm_first_line(text):\n return '\\n'.join(text.split('\\n')[1:])",
"def delete_till_beginning_of_line(text):\n if text.rfind(\"\\n\") == -1:\n return ''\n return text[0:text.rfind(\"\\n\") + 1]",
"def clean_line(line):\n return line.replace(\"\\0\", \"\").strip()",
"def first_trimmed_raw(seg: BaseSegment) -> str:\n s = seg.raw_upper.split(maxsplit=1)\n return s[0] if s else \"\"",
"def _prepare_line(self, line):\r\n return line.rstrip('\\r\\n').strip()",
"def _delete_till_beginning_of_line(self):\n text = self.get_edit_text()\n f_text = delete_till_beginning_of_line(text[:self.edit_pos])\n self.set_edit_text(f_text + text[self.edit_pos:])\n self.set_edit_pos(len(f_text))",
"def _strip_doctest_line(line: str) -> str:\n stripped = re.sub(\"(>>>|\\.\\.\\.)\\s?\", \"\", line)\n\n if re.match(\"\\s*$\", stripped):\n stripped = \"\"\n\n return stripped",
"def prepare(self,line):\n if len(line) > 0 and line[0]==\"`\":\n return self.prepare(line[1:])\n return str(line)",
"def strip_repl_characters(code):\n stripped_lines = []\n for line in code.splitlines():\n if line.startswith('>>> ') or line.startswith('... '):\n stripped_lines.append(line[4:])\n else:\n stripped_lines.append(line)\n return '\\n'.join(stripped_lines)",
"def _remove_prompt(self, line):\n if line.startswith(self.prompt_first):\n return line[len(self.prompt_first):]\n elif line.startswith(self.prompt_next):\n return line[len(self.prompt_next):]\n else:\n return line",
"def delete_to_start_of_line(code_edit):\n textCursor = code_edit.textCursor()\n pos = textCursor.position()\n textCursor.movePosition(QtGui.QTextCursor.StartOfLine)\n textCursor.setPosition(pos, QtGui.QTextCursor.KeepAnchor)\n textCursor.insertText('')",
"def __unlabel_line(self, line):\n\n if line[0:3] == '!*!':\n line = line[3:]\n return line",
"def _trim_at_first_substring(self,sub,s):\n idx = s.find(sub)\n if idx > -1:\n s = s[:idx]\n return s",
"def strip_blank_lines(line):\n while line and not line[0].strip():\n del line[0]\n while line and not line[-1].strip():\n del line[-1]\n return line",
"def remove_new_line(self, text):\n return text.replace('\\n', ' ')",
"def clean(seq):\n return seq.strip().replace(' ', '').replace('\\n', '').replace('\\r', '')",
"def StripLeadingWhitespace(lines):\n return '\\n'.join([s.lstrip() for s in lines.split('\\n')])",
"def ltrim(self, key, start, end):\r\n return self.execute_command(\"LTRIM\", key, start, end)",
"def cleanText(self, line):\n l2 = line.rstrip('\\n') # Remove new lines\n l2 = l2.rstrip(' ') # Remove end spaces\n l2 = l2.split(\"#\", 1)[0] # Remove stuff post '#'\n return l2"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Break a block into list items. | def get_items(self, block):
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG=='ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile('(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items | [
"def blocks(content):\n\n for group in content.split(DIV):\n yield group",
"def cut_to_block_n(items,block=3):\n num_show = int(len(items) / block)\n if num_show > 0:\n results = items[:block * num_show]\n else:\n results = items\n return results",
"def split_list_into_sublists(items, offset):\n chuncks = []\n chunk = []\n chunk_len = 0\n\n for item in items:\n chunk_len += len(item) + 2\n if chunk_len > 80:\n chuncks.append(chunk)\n chunk = []\n chunk_len = len(item) + 2\n chunk.append(item)\n\n if len(chunk) != 0:\n chuncks.append(chunk)\n\n return chuncks",
"def create_item(parent, block):\r\n li = markdown.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])",
"def iter_blocks(self):\n blocks = re.split(r'\\n{2,}', self.args.input.read())\n return (b for b in blocks if b)",
"def collect(cls, block_structure):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def split_list(input_list, result_length):\n# Convert input to int type\n result_length = int(result_length)\n\n# Use yield to hold the lists and compile them until the loop is done iterating\n for i in range(0, len(input_list), result_length):\n yield input_list[i:i+result_length]",
"def handleBlock(block):\n mlines = filter(lambda line : line.startswith('-'), block)\n plines = filter(lambda line : line.startswith('+'), block)\n mcount = len(mlines)\n pcount = len(plines)\n if mcount > pcount:\n plines.extend([''] * (mcount - pcount))\n elif pcount > mcount:\n mlines.extend([''] * (pcount - mcount))\n count = max(mcount, pcount)\n return [(mlines[i],plines[i]) for i in range(count)]",
"def block_splitter(data, block_size):\n if block_size <= 0:\n raise ValueError(\n 'Invalid block size: %s. Value must be greater than 0.'\n % block_size)\n\n for i in xrange(0, len(data), block_size):\n yield data[i:i + block_size]",
"def ul(self, elem, theme, width):\n block, indent = [], ansi.length(theme.margin.head)\n\n for child in self.children(elem, {u('li')}):\n block += [u('')] + add_margin(self.call(child, width - indent),\n theme.margin.head,\n theme.margin.tail(indent))\n\n return block",
"def parse_list_chunk(chunk):\n\n indent_amount = 4 # number of spaces to indicate a level of indentation\n outlines = []\n last_indent = 0\n for line in chunk:\n sline = line.strip()\n if sline.startswith('- '):\n list_type = 'ul'\n elif re.match(r'^[\\d]\\. ', sline): # match digit, dot, space\n list_type = 'ol'\n else:\n list_type = None\n if list_type:\n indent_spaces = len(line) - len(line.lstrip())\n indent = indent_spaces / indent_amount\n assert not indent_spaces % indent_amount, \"Bad indent amount\"\n assert indent <= last_indent + 1, \"Indented by more than one level\"\n last_indent = indent\n if list_type == 'ul':\n line_without_markup = sline[2:]\n else:\n line_without_markup = sline[3:]\n outlines.append((list_type, indent, line_without_markup))\n else:\n # if not an li, is just part of existing li on new line\n last_tuple = outlines[-1]\n outlines[-1] = (last_tuple[0], last_tuple[1], last_tuple[2] + line)\n return outlines",
"def split_paragraph(paragraph):\n\n new_paragraph = []\n\n for paragraph_element in paragraph:\n if paragraph_element[\"kind\"] in [\"code\", \"bold\", \"italic\"]:\n new_paragraph.append(paragraph_element)\n continue\n\n if paragraph_element[\"kind\"] == \"list\":\n items = []\n for item_paragraphs in paragraph_element[\"items\"]:\n new_item_paragraphs = []\n for item_paragraph in item_paragraphs:\n new_item_paragraphs.append(\n split_paragraph(paragraph=item_paragraph)\n )\n items.append(new_item_paragraphs)\n\n paragraph_element[\"items\"] = items\n\n new_paragraph.append(paragraph_element)\n continue\n\n if \"link\" in paragraph_element:\n new_paragraph.append(paragraph_element)\n continue\n\n new_paragraph += split_text(paragraph_element)\n\n return new_paragraph",
"def CreateListFromText(textBlock):\n for charToReplace in SEPARATOR_CHARS:\n textBlock = textBlock.replace(charToReplace, ' ')\n return textBlock.split()",
"def wrap_nested(self):\n for i in range(self.n_blocks):\n block = self.GetBlock(i)\n if not is_pyvista_dataset(block):\n self.SetBlock(i, wrap(block))",
"def split_blocks(space_shape, block_shape, block_halo=None):\n\n warnings.warn(\n \"Please use `kenjutsu.core.split_blocks` instead.\",\n DeprecationWarning\n )\n\n from kenjutsu import core\n\n return core.split_blocks(space_shape, block_shape, block_halo)",
"def Block2List(url, id, item):\r\n hrefs = []\r\n imgsrcs = []\r\n for child in item.iterdescendants():\r\n if child.tag=='a' and child.attrib.has_key('href'):\r\n hrefs.append(child.attrib['href'])\r\n if child.tag=='img' and child.attrib.has_key('src'):\r\n imgsrcs.append(child.attrib['src'])\r\n text = item.text_content().strip()\r\n hrefs = uniqList(hrefs)\r\n imgsrcs = uniqList(imgsrcs)\r\n out_list = [url, id, hrefs, imgsrcs, text]\r\n return out_list",
"def loader_page_chunker_bbm(ull):\n chunks = chunker(ull.find_all(\"li\"), 3)\n for chunk in chunks:\n loader_page_printer(chunk)",
"def group_list_items(self, list_, positions,start=0):\n while start <= len(list_) - positions:\n yield list_[start:start + positions]\n start += positions",
"def ol(self, elem, theme, width):\n block, children = [], self.children(elem, {u('li')})\n\n margins = [theme.margin.head(t + 1) for t in range(len(children))]\n indent = maximum_of(margins, key=ansi.length) # for long lists\n\n for t, child in enumerate(children):\n block += [''] + add_margin(self.call(child, width - indent),\n theme.margin.head(t + 1),\n theme.margin.tail(indent))\n\n return block",
"def blocks():\n i = 0\n while i < len(data):\n try:\n block_data, next_i = rlp.codec.consume_item(data, i)\n except rlp.DecodingError:\n log.fatal('invalid RLP encoding', byte_index=i)\n sys.exit(1) # have to abort as we don't know where to continue\n try:\n if not isinstance(block_data, list) or len(block_data) != 3:\n raise rlp.DeserializationError('', block_data)\n yield eth_protocol.TransientBlock.init_from_rlp(block_data)\n except (IndexError, rlp.DeserializationError):\n log.warning('not a valid block', byte_index=i) # we can still continue\n yield None\n i = next_i"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert AbbrPreprocessor before ReferencePreprocessor. | def extendMarkdown(self, md, md_globals):
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference') | [
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def AddPreprocesorSymbol(self, symbol):\n \n assert(isinstance(symbol, str))\n \n self.preprocessor_symbols.append(symbol)",
"def preprocessing():",
"def preprocessor(*args, **kwargs):\n logger.debug(\"Adding preprocessor from %s\", args)\n return _unwrap(_preprocessors, *args, **kwargs,\n is_list=False, cache_name=\"preprocessor\")",
"def _get_preprocessor(self, ext):\n preprocessor = dj_settings.STATICLINK_PREPROCESSORS.get(ext, False)\n if preprocessor:\n return preprocessor\n raise exceptions.ImproperlyConfigured('Cannot render `%s` in debug mode, set preprocessor (eg `less`) in STATICLINK_PREPROCESSORS config' % ext)",
"def pre_module_decls(self, stream, visitor):\n pass",
"def build_base_preprocessor(self, inplace: bool = False):\n # Categorical Features\n cat_preprocessor, cat_feature_name, cat_tuning_dict = generate_cat_preprocessor(\n **self.preprocessor_params['cat']\n )\n\n # Numerical Features\n num_preprocessor, num_feature_name, num_tuning_dict = generate_num_preprocessor(\n **self.preprocessor_params['num']\n )\n\n # Datetime Features\n date_preprocessor, date_feature_name = generate_date_preprocessor(**self.preprocessor_params['date'])\n\n # Make total FeatureUnion\n transformer_dict_list = [\n {'prefix': 'CAT', 'transformer': cat_preprocessor, 'tuning_params': cat_tuning_dict},\n {'prefix': 'NUM', 'transformer': num_preprocessor, 'tuning_params': num_tuning_dict},\n {'prefix': 'DATE', 'transformer': date_preprocessor},\n ]\n base_preprocessor, self.preprocessor_tuning_params = generate_feature_union(transformer_dict_list)\n\n # Unify self.feature_name\n self.feature_name = cat_feature_name + num_feature_name + date_feature_name\n self.feature_name = [name.lower().replace(' ', '_') for name in self.feature_name]\n\n if inplace:\n self.preprocessor = base_preprocessor\n else:\n return base_preprocessor",
"def _pre_compile(self, content=None):\r\n pass",
"def addnewincludes(inclist1,inclist2):\n #come up with better names!!\n inclist1[0] = inclist1[0] | inclist2[0]\n inclist1[1] = inclist1[1] | inclist2[1]\n inclist1[2] = addnewifdefs(inclist1[2],inclist2[2])\n return(inclist1)",
"def preprocess (self,\r\n source,\r\n output_file=None,\r\n macros=None,\r\n include_dirs=None,\r\n extra_preargs=None,\r\n extra_postargs=None):\r\n pass",
"def prepend(self, bs: BitsType) -> None:\n bs = Bits._create_from_bitstype(bs)\n super().prepend(bs)\n self._pos = 0",
"def addReference(self,addr,referenced):\n return HopperLowLevel.addReference(self.__internal_segment_addr__,addr,referenced)",
"def prepend(self, other):\n self.detach_before()\n self.before = other\n if other != None:\n other.detach_after()\n other.after = self",
"def preprocessing_name(self) -> str:\n return \"preprocessing\"",
"def _add_macro(self, complex_instance):\n macro = Macro(complex_instance.name, complex_instance.primitives)\n if macro not in self.macros:\n self.macros.append(macro)",
"def classpath_prepend(self):",
"def _patch_for_win_loadlibrary(orig_file, patch_file):\n\n for line in orig_file:\n minimal = line.strip().replace(' ', '')\n if minimal == '#include<windows.h>':\n patch_file.write('#include <Python.h>\\n\\n')\n\n patch_file.write(line)",
"def removePreprocessors(self):\n for node in self.xmlRoot.findall(\"preprocessor\"):\n self.xmlRoot.remove(node)",
"def precmd(self, line):\n for handler in self._precmd_handlers:\n line = handler(line)\n\n # After doing preprocessing, pass it off to the super class(es) for\n # whatever they want to do with it.\n line = pdb.Pdb.precmd(self, line)\n\n return line",
"def precompile_process():\r\n SystemParam.MODEL = \"Heisenberg\"\r\n #SystemParam.MODEL= \"Ising\"\r\n SystemParam.SYMMETRY = \"Z2\"\r\n SystemParam.USE_CUSTOM_RANDOM = False\r\n SystemParam.USE_REFLECTION = False\r\n SystemParam.NUM_OF_THREADS = None\r\n SystemParam.only_NN = True\r\n SystemParam.only_NNN = False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add Admonition to Markdown instance. | def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.parser.blockprocessors.add('admonition',
AdmonitionProcessor(md.parser),
'_begin') | [
"def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)",
"def visit_title(self, node):\n # The titles of various admonitions are not commentable as titles.\n # These titles will represented as paragraphs, so there is code\n # in the ``visit_paragraph()`` method to prevent them from commenting.\n if node.parent.tagname in ADMONITION_LABELS:\n self.in_admonition_title = True\n else:\n self.extend_node_attrs(node, bias=0)\n self.visit_title_original(self, node)",
"def depart_title(self, node):\n self.in_admonition_title = False\n SmartyPantsHTMLTranslator.depart_title(self, node)",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)",
"def add_content(self, more_content: Any, no_docstring: bool = False):\n\n\t\tname = format_annotation(self.object)\n\t\tcontent = StringList([_(\"Alias of %s\") % name], source='')\n\t\tDataDocumenter.add_content(self, content)",
"def add_comment(self, content, concatenate=True):\n return self.dataset.add_comment(content, concatenate)",
"def set_comment(self, cmt):\n if cmt and cmt[:8] == 'Ansible:':\n self.marker = 'Ansible'\n self.pre_comment = True\n self.comment = cmt[8:].lstrip()\n else:\n self.comment = cmt",
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n\n md.parser.blockprocessors.add('details', DetailsProcessor(md.parser), '_begin')",
"def Example(self, line):\n self._fill = self._indent[self._level].indent + self.INDENT\n self._AddToken(' ' * self._fill + line, Token.Markdown.Normal)\n self._NewLine()\n self.Content()\n self._fill = 0",
"def create_warning_node(self):\n\n item = nodes.paragraph()\n item.append(nodes.warning('', nodes.inline(text=\"This code example can be not actual for this version\")))\n\n return item",
"def markdown_link(title, url):\n return \"[{}]({})\".format(title, url)",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def add_note_to_dataset(self, text_to_add):\n try:\n note_id = __datasets__.current.get_note_id()\n except AttributeError:\n # The dataset may be already deleted?\n return False\n if note_id:\n __notes__.add_auto_text_to_note(note_id, text_to_add)\n else:\n # There was no note yet. Create it and add the text.\n note_id = __notes__.new_note()\n __datasets__.current.set_note_id(note_id)\n __notes__.add_auto_text_to_note(note_id, text_to_add)",
"def add_heading(self, text):\r\n self.html += '<div class=\"heading\">%s</div>\\n' % (text)",
"async def animeadd(self, ctx, anime):\n anime = anime.lower()\n config = SafeConfigParser()\n config.read('anime.ini')\n if config.has_option('anime', anime):\n await self.bot.say('This anime is already in the list!')\n else:\n config.set('anime', '{}'.format(anime), 'No descripton set. Use `animedescset \"{}\" \"description\"` to set a description.'.format(anime))\n with open('anime.ini', 'w') as f:\n config.write(f)\n embed = discord.Embed(title='Added anime!', description='Your suggestion has been successfully recorded. Remember to add a description!', color=0x00FF99)\n await self.bot.say(embed=embed)",
"def add_annotation(self, dg_ann, **more_attrs):\n # First, get the embedding element for all annotations.\n anns_el = self.find_or_create_annotations()\n # Second, create an appropriate element for the new annotation.\n if dg_ann.user is not None:\n username = dg_ann.user.username\n else:\n username = ''\n ann_el = etree.SubElement(\n anns_el,\n self.ANNOTATION_ELEM,\n id=str(dg_ann.pk),\n program_version=dg_ann.program_version,\n date_saved=self.format_datetime(dg_ann.date_saved),\n user=username)\n for name, val in more_attrs.iteritems():\n ann_el.set(name, val)\n if 'offensive' in settings.EXTRA_QUESTIONS:\n ann_el.set('offensive', str(dg_ann.offensive))\n if 'accent' in settings.EXTRA_QUESTIONS:\n ann_el.set('accent', dg_ann.accent or \"native\")\n if 'quality' in settings.EXTRA_QUESTIONS:\n ann_el.set('quality',\n ('clear' if dg_ann.quality == 1 else 'noisy'))\n if dg_ann.notes:\n ann_el.text = dg_ann.notes"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign attrs to element. | def assign_attrs(self, elem, attrs):
for k, v in get_attrs(attrs):
if k == '.':
# add to class
cls = elem.get('class')
if cls:
elem.set('class', '%s %s' % (cls, v))
else:
elem.set('class', v)
else:
# assign attr k with v
elem.set(self.sanitize_name(k), v) | [
"def setattrs(self, attrs):\n for k, v in attrs:\n self.setattr(k, v)",
"def put_elem_attr(self, elem_blk_id, elem_attrs):\n self.__ex_put_elem_attr(elem_blk_id, elem_attrs)",
"def set_attribute(self,att,val):\r\n self.attributes[att] = val",
"def attributes(self, attributes: \"dict\"):\n self._attrs[\"attributes\"] = attributes",
"def onSetAttr(self, attr, vals, opts):\n pass",
"def setAttributes (\n\n self,\n owner = None, \n filePath = None,\n bibtex = None,\n key = None\n ) :\n\n\n # in case\n \n if utilities.isEmpty( self.attributeList ) : self.attributeList = [ ]\n\n if utilities.isEmpty( self.valueList ) : self.valueList = [ ]\n\n if not utilities.isEmpty( filePath ) :\n\n self.filePath = self.normalizePath( filePath )\n\n self.setAttribute( \"file\", self.filePath )\n\n if not utilities.isEmpty( owner ) :\n\n self.owner = utilities.string( owner, format = \"title\", default = \"\" )\n\n self.setAttribute( \"owner\", self.owner )\n\n if not utilities.isEmpty( bibtex ) :\n\n self.bibtex = str( bibtex )\n\n self.setAttribute( \"bibtex\", self.bibtex )\n\n if not utilities.isEmpty( key ) :\n\n self.key = str( key )",
"def _set_netcdf_attributes(root, attrs):\n for key, val in attrs.items():\n setattr(root, key, val)",
"def set_elem_attrs(attr_parts, elem):\n for attr_part in attr_parts:\n attr_part = attr_part[1:] # chop off '@'\n nv_parts = attr_part.split('=')\n attr_name = nv_parts[0]\n if len(nv_parts) == 1:\n attr_val = ''\n else:\n attr_val = nv_parts[1]\n if attr_val[0] in (\"'\", '\"'):\n attr_val = attr_val[1:-1] # remove quotes\n\n elem.set(attr_name, attr_val)",
"def possibly_init_attrs(self, attrs):\n for key, value in attrs.items():\n if not self.__dict__.has_key(key):\n setattr(self, key, value)",
"def update_attr(field, attr, value):\n\n field.widget.attrs.update({\n attr: value\n })",
"def onAddAttr(self, opts):\n pass",
"def SetAttr(self, attr, value):\n self.__article[attr] = value",
"def set_attributes_randomly(self) -> None:\n for f in self.attributes:\n self.data[f.name] = f.random_value()",
"def set_feature_attributes(self,n,feat):\n feat.initAttributes(len(self.attrs))\n for idx,attr in enumerate(self.attrs):\n name=attr.name()\n typecode=attr.type()\n caster=self.casters[idx]\n item=self.grid.nodes[n]\n\n if name=='node_id':\n feat.setAttribute(idx,caster(n))\n else:\n feat.setAttribute(idx,caster(item[name]))",
"def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm",
"def setCellDefattrs( self, indict ):\n\n for key in indict.keys():\n val = indict[key]\n newattr = \"\"\"%s=\"%s\" \"\"\" % ( key, val )\n\n self.celldefattrs = self.celldefattrs + newattr",
"def handleAttribute(self, attrib, attribObj, parent):\n # Attribute\n element = XmlAttribute(None, parent)\n ns, attrib = split_ns(attrib)\n if ns is not None:\n element.xmlNamespace = ns\n element.attributeName = attrib\n # Attribute value\n string = String(None, element)\n string.defaultValue = attribObj\n element.append(string)\n return element",
"def set_attributes_from_kwargs(self, kwargs):\n for val in self.valid_kwargs:\n if val in kwargs:\n setattr(self, val, kwargs[val])",
"def set_row_attrs(self, row, attrs):\n row_str = idkey_as_str(row)\n attrs_str = _create_attributes_str(attrs)\n fmt = u\"SetRowAttrs(%s,%s,%s)\"\n return PQLQuery(fmt % (self.name, row_str, attrs_str), self.index)",
"def add_additional_attributes(self, attribs: dict):\n for k, v in attribs.items():\n if k not in self.__dict__:\n setattr(self, k, v)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add HilitePostprocessor to Markdown instance. | def extendMarkdown(self, md, md_globals):
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self) | [
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n\n md.parser.blockprocessors.add('details', DetailsProcessor(md.parser), '_begin')",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def apply_markdown( request ):\r\n markup = markdown( request.POST['data'], extensions=['codehilite'] )\r\n return render_to_response( 'utils/markup/markdown/preview.html',\r\n {'preview':markup},\r\n context_instance=RequestContext(request))",
"def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)",
"def includeme(config):\n md_renderer = HighlightRenderer()\n render_markdown = mistune.Markdown(renderer=md_renderer)\n\n renderer_dict = {\".md\": render_markdown, \".txt\": render_text}\n\n def get_markup_renderer(filename):\n name, dot, ext = filename.rpartition(\".\")\n complete_extension = dot + ext\n return renderer_dict.get(complete_extension, None)\n\n config.add_request_method(\n lambda request, filename: get_markup_renderer(filename),\n \"get_markup_renderer\",\n )",
"def register_extension(extension):\n if not extension in markdown_extensions:\n markdown_extensions.append(extension)",
"def math_for_markdown(pelicanobj):\n\n try:\n pelicanobj.settings[\"MARKDOWN\"].setdefault(\"extensions\", []).append(\n MathExtension()\n )\n except Exception:\n sys.excepthook(*sys.exc_info())\n sys.stderr.write(\n \"\\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\\n\"\n )\n sys.stderr.flush()",
"def addPostProcessingStatement(self, command):\n self._postProcessing_.append(command)\n return",
"def codehilite(content):\n return _clean(markdown(content, ['codehilite',]), output_format=settings.PAGEDOWN_MARKDOWN_FORMAT)",
"def render(self):\n renderer = PostRenderer(escape=True, hard_wrap=True)\n markdown = mistune.Markdown(renderer=renderer)\n return markdown(self.text)",
"def set_markdown_extensions(site_settings):\n # Base markdown extensions support \"fenced_code\".\n markdown_extensions = [\"fenced_code\"]\n if site_settings[\"pygments\"]:\n markdown_extensions.extend([\n \"extra\",\n \"codehilite(css_class=hlcode)\",\n \"toc(title=Table of Contents)\"\n ])\n\n return markdown_extensions",
"def register_for_html_transform(\n register_handlers: RegisterHtmlTransformHandlersProtocol,\n ) -> None:\n register_handlers(\n SetextHeadingMarkdownToken,\n SetextHeadingMarkdownToken.__handle_start_setext_heading_token,\n SetextHeadingMarkdownToken.__handle_end_setext_heading_token,\n )",
"def initialize():\n InlineProcessor.__inline_character_handlers = {}\n InlineProcessor.__valid_inline_text_block_sequence_starts = \"\\n\"\n InlineProcessor.register_handlers(\n InlineHelper.code_span_bounds, InlineHelper.handle_inline_backtick\n )\n InlineProcessor.register_handlers(\n InlineHelper.backslash_character, InlineHelper.handle_inline_backslash\n )\n InlineProcessor.register_handlers(\n InlineHelper.character_reference_start_character,\n InlineHelper.handle_character_reference,\n )\n InlineProcessor.register_handlers(\n InlineHelper.angle_bracket_start, InlineHelper.handle_angle_brackets\n )\n for i in InlineProcessor.__inline_processing_needed:\n InlineProcessor.register_handlers(\n i, InlineProcessor.__handle_inline_special_single_character\n )\n InlineProcessor.register_handlers(\n LinkHelper.image_start_sequence[0],\n InlineProcessor.__handle_inline_image_link_start_character,\n )",
"def preprocessor(*args, **kwargs):\n logger.debug(\"Adding preprocessor from %s\", args)\n return _unwrap(_preprocessors, *args, **kwargs,\n is_list=False, cache_name=\"preprocessor\")",
"def format_markdown(self, hwslug):\n # To expose static resources in homework desc directory, we need to\n # convert all \"hw://<path>\" urls to hwstatic view urls.\n def translate_url(u):\n # Get rid of 'hw://' and leading '/'\n u = reform_path(u[5:])\n if u.startswith('/'):\n u = u[1:]\n # translate to hwstatic file\n filename = '%s/%s' % (hwslug, u)\n # NOTE: here I hardcoded the url for hwstatic!\n ret = '/hwstatic/%s' % filename\n # we also need to prepend website base url\n return config.WEBSITE_BASEURL + ret\n\n def format(text):\n return markdown(\n text=text,\n output_format='xhtml1',\n extensions=[\n 'extra',\n 'tables',\n 'smart_strong',\n 'codehilite',\n 'nl2br',\n 'toc',\n 'fenced_code',\n ]\n )\n\n # the description\n desc = UrlMatcher(['hw']).replace(self.desc, translate_url)\n self.formatted_desc = format(desc)\n\n # the solution\n if self.solve:\n solve = UrlMatcher(['hw']).replace(self.solve, translate_url)\n self.formatted_solve = format(solve)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new dd and parse the block with it as the parent. | def create_item(self, parent, block):
dd = etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block]) | [
"def create_item(parent, block):\r\n li = markdown.etree.SubElement(parent, 'li')\r\n self.parser.parseBlocks(li, [block])",
"def _make_record(self, parent, gline):\n\n if parent and gline.tag in (\"CONT\", \"CONC\"):\n # concatenate, only for non-BLOBs\n if parent.tag != \"BLOB\":\n # have to be careful concatenating empty/None values\n value = gline.value\n if gline.tag == \"CONT\":\n value = b\"\\n\" + (value or b\"\")\n if value is not None:\n parent.value = (parent.value or b\"\") + value\n return None\n\n # avoid infinite cycle\n dialect = model.DIALECT_DEFAULT\n if not (gline.level == 0 and gline.tag == \"HEAD\") and self._header:\n dialect = self.dialect\n rec = model.make_record(level=gline.level, xref_id=gline.xref_id,\n tag=gline.tag, value=gline.value,\n sub_records=[], offset=gline.offset,\n dialect=dialect, parser=self)\n\n # add to parent's sub-records list\n if parent:\n parent.sub_records.append(rec)\n\n return rec",
"def _create_loop_from_dict(self, d):\n d_copy = d.copy()\n children = d_copy.pop(u\"children\", [])\n columns = d_copy.pop(u\"columns\", [])\n result = GroupedSection(**d_copy)\n\n # columns is a left over from when this was\n # create_table_from_dict, I will need to clean this up\n for column_dict in columns:\n # If this is a none option for a select all that apply\n # question then we should skip adding it to the result\n if column_dict[u\"name\"] == \"none\":\n continue\n\n column = GroupedSection(**column_dict)\n for child in children:\n question_dict = self._name_and_label_substitutions(\n child, column_dict)\n question = self.create_survey_element_from_dict(question_dict)\n column.add_child(question)\n result.add_child(column)\n if result.name != u\"\":\n return result\n\n # TODO: Verify that nothing breaks if this returns a list\n return result.children",
"def _parse_block(self,idx):\n block_tmp = self._block_list[idx]\n blocktype = self._paragraph_or_table[idx]\n paragraph_count = sum(self._paragraph_or_table[:idx+1])\n table_count = idx + 1 - paragraph_count\n df = DataFrame()\n # paragraph\n if blocktype==1:\n l_runText = [r.text for r in block_tmp.runs]\n l_runID = arange(len(l_runText))\n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['paragraph_ID'] = paragraph_count - 1 # 0-starting index \n # table\n if blocktype==0:\n row_count = 0\n for row in block_tmp.rows:\n cell_count = 0\n for cell in row.cells:\n cell_para_count = 0\n for p in cell.paragraphs:\n l_runText = [r.text for r in p.runs]\n l_runID = arange(len(l_runText)) \n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['table_ID'] = table_count - 1 # 0-starting index\n df['row_ID'] = row_count\n df['cell_ID'] = cell_count\n df['paragraph_ID'] = cell_para_count \n cell_para_count += 1\n cell_count += 1\n row_count += 1\n df['block_ID'] = idx\n self._block_dataframe_list[idx] = df",
"async def _format_device_with_single_partition(\n self, new_disk: Disk\n ) -> UDisks2Block:\n block_device: UDisks2Block = self.sys_dbus.udisks2.get_block_device(\n new_disk.device_object_path\n )\n\n try:\n await block_device.format(FormatType.GPT)\n except DBusError as err:\n capture_exception(err)\n raise HassOSDataDiskError(\n f\"Could not format {new_disk.id}: {err!s}\", _LOGGER.error\n ) from err\n\n await block_device.check_type()\n if not block_device.partition_table:\n raise HassOSDataDiskError(\n \"Block device does not contain a partition table after format, cannot create data partition\",\n _LOGGER.error,\n )\n\n try:\n partition = await block_device.partition_table.create_partition(\n 0, 0, LINUX_DATA_PARTITION_GUID, PARTITION_NAME_EXTERNAL_DATA_DISK\n )\n except DBusError as err:\n capture_exception(err)\n raise HassOSDataDiskError(\n f\"Could not create new data partition: {err!s}\", _LOGGER.error\n ) from err\n\n try:\n partition_block = await UDisks2Block.new(\n partition, self.sys_dbus.bus, sync_properties=False\n )\n except DBusError as err:\n raise HassOSDataDiskError(\n f\"New data partition at {partition} is missing or unusable\",\n _LOGGER.error,\n ) from err\n\n _LOGGER.debug(\n \"New data partition prepared on device %s\", partition_block.device\n )\n return partition_block",
"def parse_dd(fp):\n expr = re.compile(r'[\\x0c]{0,1}(\\w+)\\*?[\\s\\t]*(\\d{1,2})[\\s\\t]*(.*?)'\n r'[\\s\\t]*\\(*(\\d+)\\s*[\\-–]\\s*(\\d+)\\)*\\s*$')\n with open(fp) as f:\n lines = (expr.match(x) for x in f)\n matches = filter(None, lines)\n groups = (x.groups() for x in matches)\n\n df = (pd.DataFrame(list(groups),\n columns=['field', 'width', 'desc', 'start', 'end'])\n .convert_objects(convert_numeric=True))\n return df",
"def parse_dts(text, root_dir=''):\n ver = get_version_info(text)\n text = strip_comments(text)\n dts_lines = split_to_lines(text)\n fdt_obj = FDT()\n if 'version' in ver:\n fdt_obj.header.version = ver['version']\n if 'last_comp_version' in ver:\n fdt_obj.header.last_comp_version = ver['last_comp_version']\n if 'boot_cpuid_phys' in ver:\n fdt_obj.header.boot_cpuid_phys = ver['boot_cpuid_phys']\n # parse entries\n fdt_obj.entries = []\n for line in dts_lines:\n if line.endswith('{'):\n break\n if line.startswith('/memreserve/'):\n line = line.strip(';')\n line = line.split()\n if len(line) != 3 :\n raise Exception()\n fdt_obj.entries.append({'address': int(line[1], 0), 'size': int(line[2], 0)})\n # parse nodes\n curnode = None\n fdt_obj.rootnode = None\n for line in dts_lines:\n if line.endswith('{'):\n # start node\n node_name = line.split()[0]\n new_node = Node(node_name)\n if fdt_obj.rootnode is None:\n fdt_obj.rootnode = new_node\n if curnode is not None:\n curnode.append(new_node)\n new_node.parent = curnode\n curnode = new_node\n elif line.endswith('}'):\n # end node\n if curnode is not None:\n curnode = curnode.parent\n else:\n # properties\n if line.find('=') == -1:\n prop_name = line\n prop_obj = Property(prop_name)\n else:\n line = line.split('=', maxsplit=1)\n prop_name = line[0].rstrip(' ')\n prop_value = line[1].lstrip(' ')\n if prop_value.startswith('<'):\n prop_obj = PropWords(prop_name)\n prop_value = prop_value.replace('<', '').replace('>', '')\n for prop in prop_value.split():\n prop_obj.append(int(prop, 0))\n elif prop_value.startswith('['):\n prop_obj = PropBytes(prop_name)\n prop_value = prop_value.replace('[', '').replace(']', '')\n for prop in prop_value.split():\n prop_obj.append(int(prop, 16))\n elif prop_value.startswith('/incbin/'):\n prop_value = prop_value.replace('/incbin/(\"', '').replace('\")', '')\n prop_value = prop_value.split(',')\n file_path = os.path.join(root_dir, prop_value[0].strip())\n file_offset = int(prop_value.strip(), 0) if len(prop_value) > 1 else 0\n file_size = int(prop_value.strip(), 0) if len(prop_value) > 2 else 0\n if file_path is None or not os.path.exists(file_path):\n raise Exception(\"File path doesn't exist: {}\".format(file_path))\n with open(file_path, \"rb\") as f:\n f.seek(file_offset)\n data = f.read(file_size) if file_size > 0 else f.read()\n prop_obj = PropBytes(prop_name, data)\n elif prop_value.startswith('/plugin/'):\n raise NotImplementedError(\"Not implemented property value: /plugin/\")\n elif prop_value.startswith('/bits/'):\n raise NotImplementedError(\"Not implemented property value: /bits/\")\n else:\n prop_obj = PropStrings(prop_name)\n for prop in prop_value.split('\",'):\n prop = prop.replace('\"', \"\")\n prop = prop.strip()\n prop_obj.append(prop)\n if curnode is not None:\n curnode.append(prop_obj)\n\n return fdt_obj",
"def add_child(self):\n # The new child block\n new_block = ConvBlockGene('decode block',\n parent=self)\n # Add the new block to self.children\n self.children.append(new_block)\n\n pass",
"def load(self, record, zip=None, nameList=None, sortedFlag=True): # reads sorted data\n temps = MDFBlock()\n # block header\n temps.loadHeader(self.fid, self.pointerTodata)\n if temps['id'] in ('##DL', b'##DL'): # data list block\n # link section\n temps['dl_dl_next'] = temps.mdfblockread(self.fid, LINK, 1)\n temps['dl_data'] = {}\n temps['dl_data'][0] = [temps.mdfblockread(self.fid, LINK, 1) for Link in range(temps['link_count'] - 1)]\n # data section\n temps['dl_flags'] = temps.mdfblockread(self.fid, UINT8, 1)\n temps['dl_reserved'] = temps.mdfblockreadBYTE(self.fid, 3)\n temps['dl_count'] = temps.mdfblockread(self.fid, UINT32, 1)\n if temps['dl_flags']: # equal length datalist\n temps['dl_equal_length'] = temps.mdfblockread(self.fid, UINT64, 1)\n else: # datalist defined by byte offset\n temps['dl_offset'] = temps.mdfblockread(self.fid, UINT64, temps['dl_count'])\n if temps['dl_dl_next']:\n index = 1\n while temps['dl_dl_next']: # reads pointers to all data blocks (DT, RD, SD, DZ)\n temp = MDFBlock()\n temp.loadHeader(self.fid, temps['dl_dl_next'])\n temps['dl_dl_next'] = temp.mdfblockread(self.fid, LINK, 1)\n temps['dl_data'][index] = [temp.mdfblockread(self.fid, LINK, 1) for Link in range(temp['link_count'] - 1)]\n index += 1\n if temps['dl_count']:\n # read and concatenate raw blocks\n buf = bytearray()\n for DL in temps['dl_data']:\n for pointer in temps['dl_data'][DL]:\n # read fist data blocks linked by DLBlock to identify data block type\n data_block = MDFBlock()\n data_block.loadHeader(self.fid, pointer)\n if data_block['id'] in ('##DT', '##RD', b'##DT', b'##RD', '##SD', b'##SD'):\n buf.extend(self.fid.read(data_block['length'] - 24))\n elif data_block['id'] in ('##DZ', b'##DZ'):\n data_block['dz_org_block_type'] = data_block.mdfblockreadCHAR(self.fid, 2)\n data_block['dz_zip_type'] = data_block.mdfblockread(self.fid, UINT8, 1)\n data_block['dz_reserved'] = data_block.mdfblockreadBYTE(self.fid, 1)\n data_block['dz_zip_parameter'] = data_block.mdfblockread(self.fid, UINT32, 1)\n data_block['dz_org_data_length'] = data_block.mdfblockread(self.fid, UINT64, 1)\n data_block['dz_data_length'] = data_block.mdfblockread(self.fid, UINT64, 1)\n data_block['data'] = decompress_datablock(self.fid.read(data_block['dz_data_length']),\n data_block['dz_zip_type'],\n data_block['dz_zip_parameter'], data_block['dz_org_data_length'])\n buf.extend(data_block['data'])\n data_block['id'] = '##DT' # do not uncompress in DATABlock function\n data_block['data'] = buf\n temps['data'] = DATABlock(record, parent_block=data_block, channelList=nameList, sortedFlag=sortedFlag)\n else: # empty datalist\n temps['data'] = None\n elif temps['id'] in ('##HL', b'##HL'): # header list block for DZBlock\n # link section\n temps['hl_dl_first'] = temps.mdfblockread(self.fid, LINK, 1)\n # data section\n temps['hl_flags'] = temps.mdfblockread(self.fid, UINT16, 1)\n temps['hl_zip_type'] = temps.mdfblockread(self.fid, UINT8, 1)\n temps['hl_reserved'] = temps.mdfblockreadBYTE(self.fid, 5)\n self.pointerTodata = temps['hl_dl_first']\n temps['data'] = self.load(record, zip=temps['hl_zip_type'], nameList=nameList, sortedFlag=sortedFlag)\n elif temps['id'] in ('##DT', '##RD', b'##DT', b'##RD'): # normal sorted data block, direct read\n temps['data'] = record.readSortedRecord(self.fid, self.pointerTodata, channelList=nameList)\n elif temps['id'] in ('##SD', b'##SD'): # VLSD\n temps['data'] = self.fid.read(temps['length'] - 24)\n temps['data'] = DATABlock(record, parent_block=temps, channelList=nameList, sortedFlag=sortedFlag)\n elif temps['id'] in ('##DZ', b'##DZ'): # zipped data block\n temps['dz_org_block_type'] = temps.mdfblockreadCHAR(self.fid, 2)\n temps['dz_zip_type'] = temps.mdfblockread(self.fid, UINT8, 1)\n temps['dz_reserved'] = temps.mdfblockreadBYTE(self.fid, 1)\n temps['dz_zip_parameter'] = temps.mdfblockread(self.fid, UINT32, 1)\n temps['dz_org_data_length'] = temps.mdfblockread(self.fid, UINT64, 1)\n temps['dz_data_length'] = temps.mdfblockread(self.fid, UINT64, 1)\n temps['data'] = self.fid.read(temps['dz_data_length'])\n temps['data'] = DATABlock(record, parent_block=temps, channelList=nameList, sortedFlag=sortedFlag)\n else:\n raise Exception('unknown data block')\n return temps['data']",
"def copy(self, parent):\n out = Block(self.type)\n out.pins = dict((k, v.copy(out)) for k, v in self.pins.items())\n out.mirrored = self.mirrored\n out.rotation = self.rotation\n\n out.name = self.name\n out.groups = self.groups\n\n out.size = self.size\n out.field = parent or self\n\n return out",
"def _make_datablock(self):\n section_ids = sorted(self.sections)\n\n # create all insertion id's, this needs to be done ahead of time\n # as some of the children may have a lower id than their parents\n id_to_insert_id = {}\n row_count = 0\n for section_id in section_ids:\n row_count += len(self.sections[section_id].points)\n id_to_insert_id[section_id] = row_count - 1\n\n datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float)\n datablock[:, COLS.ID] = np.arange(len(datablock))\n datablock[:, COLS.P] = datablock[:, COLS.ID] - 1\n\n sections = []\n insert_index = 0\n for id_ in section_ids:\n sec = self.sections[id_]\n points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id\n\n idx = slice(insert_index, insert_index + len(points))\n datablock[idx, COLS.XYZR] = points\n datablock[idx, COLS.TYPE] = section_type\n datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID)\n sections.append(DataBlockSection(idx, section_type, parent_id))\n insert_index = idx.stop\n\n return datablock, sections",
"def setTree(self, code, child_code, position, factor, yield_, total,\n list_lines, label, type_):\n if code is None: # No-estructured measures\n code = self.getRoot()\n if code == None: # No root\n print \"No-estructured measures. Adding root record\", \n self.setRecord(\"root\", [], 0, \"\", \"\", [0.0,], [(1,1,1970)],\n 0, \"\") \n code = self.getRoot()\n\n if not utils.is_valid_code(code)[0]:\n raise ValueError, utils.mapping(_(\"Invalid parent code: $1\"),\n (code,))\n if not utils.is_valid_code(child_code)[0]:\n raise ValueError, utils.mapping(_(\"Invalid child code: $1 $2\"),\n (code,child_code))\n if not isinstance(position, int):\n raise ValueError, utils.mapping(_(\"Invalid position in measure \"\\\n \"$1, in code $2\"), (parent_code, position))\n # Test circular references\n _all_parent_list = self.getAllParents(code) + [ code ]\n _all_child_list = self.getAllchildren(child_code) + [ child_code ]\n for _parent_code in _all_parent_list:\n if _parent_code in _all_child_list:\n # TODO: change return to except\n print utils.mapping(_(\"Circular Decomposition, parent code: \"\\\n \"$1, child code: $2, repeated code: $3\"),\n (code, child_code, _parent_code))\n return\n\n # Creating reference to parent code in child record\n if child_code in self.__records:\n _child_record = self.__records[child_code]\n else:\n _child_record = self.setRecord(child_code, [], -1, \"\", \"\", [], [],\n \"\", \"\")\n if code in self.__records:\n code = self.__records[code].code\n _child_record.appendParent(code)\n child_code = self.__records[child_code].code\n if code in self.__records:\n # if the code exits retake previous values.\n _record = self.__records[code]\n _child_number = len(_record.children)\n if position == -1: # New child\n position = _child_number\n if position == -2: # No-estructured measures or empty position (error in FIEBDC file)\n positions = _record.getChildPositions(child_code)\n if len(positions) == 1:\n position = positions[0]\n print utils.mapping(_(\"No-estructured measure or empty position. Parent Code: \"\\\n \"$1, Child code: $2, Position: $3\"),(code, child_code, position))\n else:\n position = _child_number\n print utils.mapping(_(\"No-estructured measure or empty position. \"\\\n \"Repeated child in unspecified position. \"\\\n \"It is impossible to determine the position. \"\\\n \"New child is added in the decomposition. \"\\\n \"Parent code: $1, Child code: $2, Position: $3\"),(code, child_code, position))\n if position == _child_number:\n # The record do not have the child\n if not isinstance(factor, float): factor = 1.0\n if not isinstance(yield_, float): yield_ = 1.0\n if not isinstance(total, float): total = 0.0\n if not isinstance(list_lines, list): list_lines = []\n _child = _record.appendChild(child_code, self.getDecimals(),\n factor, yield_, total, list_lines, type_, label)\n elif position < _child_number:\n # The record have the child\n _child = _record.children[position]\n if child_code != \"\" and child_code != _child.code:\n _child.code = child_code\n if factor != \"\" :\n if not isinstance(factor, float):\n factor == 1.0\n _child.budgetMeasures[0].setFactor(factor,\n self.getDecimals(), _record.recordType)\n if yield_ != \"\":\n if not isinstance(yield_, float):\n yield_ = 1.0\n _child.budgetMeasures[0].setYield(yield_, \n self.getDecimals(), _record.recordType)\n _measure = _child.budgetMeasures[0]\n if total != \"\":\n if not isinstance(total, float):\n yield_ = 0.0\n _measure.setMeasure(total, self.getDecimals())\n if isinstance(list_lines, list) and len(list_lines) > 0:\n _measure.buildMeasure(list_lines, type_, self.getDecimals(),\n _record.recordType)\n if isinstance(label, str) and label != \"\" :\n _measure.label = label\n else:\n # TODO: change return for except\n print utils.mapping(_(\"Error: Invalid child position in \"\n \"decomposition. Parent code: $1 Child code: $2 \"\\\n \"Position: $3\"), (code, child_code, position))\n return\n else:\n if child_code == \"\" : \n print utils.mapping(_(\"Error: Empty child code. Parent code: \"\\\n \"$1 Position: $2\"), (code, position))\n return\n if position == -1:\n position = 0\n elif position != 0:\n print utils.mapping(_(\"Error: Invalid child position in \"\\\n \"decomposition. Parent code: $1 Child code: $2 \"\\\n \"Position: $3\"), (code, child_code, position))\n return\n if not isinstance(factor, float):\n factor = 1.0\n if not isinstance(yield_, float):\n yield_ = 1.0\n if not isinstance(total, float):\n total = 1.0\n _record = self.setRecord(code, [], \"\", \"\", \"\", [], [],\n \"\", \"\")\n _child = _record.appendChild(child_code, self.getDecimals(),\n factor, yield_, total, list_lines, type_, label)",
"def loadNode(self, element, parent=None):\n try:\n typeFormat = self.model.formats[element.tag]\n except KeyError:\n typeFormat = nodeformat.NodeFormat(element.tag, self.model.formats,\n element.attrib)\n self.model.formats[element.tag] = typeFormat\n if element.get('item') == 'y':\n node = treenode.TreeNode(parent, element.tag, self.model,\n element.attrib)\n if parent:\n parent.childList.append(node)\n else:\n self.model.root = node\n else: # bare format (no nodes)\n node = None\n for child in element:\n if child.get('item') and node:\n self.loadNode(child, node)\n else:\n if node and child.text:\n node.data[child.tag] = child.text\n if child.get('linkcount'):\n self.model.linkRefCollect.searchForLinks(node,\n child.tag)\n typeFormat.addFieldIfNew(child.tag, child.attrib)\n if node and typeFormat.fieldDict:\n try:\n node.setUniqueId()\n except ValueError:\n oldId = node.uniqueId\n node.setUniqueId(True)\n self.duplicateIdList.append('{0} -> {1}'.format(oldId,\n node.uniqueId))",
"def _extract_sections(data_block):\n structure_block = data_block[:, COLS.TYPE:COLS.COL_COUNT].astype(np.int)\n\n # SWC ID -> structure_block position\n id_map = {-1: -1}\n for i, row in enumerate(structure_block):\n id_map[row[ID]] = i\n\n # end points have either no children, more than one, or are the start\n # of a new gap\n sec_end_pts = _section_end_points(structure_block, id_map)\n\n # a 'gap' is when a section has part of it's segments interleaved\n # with those of another section\n gap_sections = set()\n\n sections = []\n\n def new_section():\n \"\"\"A new_section.\"\"\"\n sections.append(DataBlockSection())\n return sections[-1]\n\n curr_section = new_section()\n\n parent_section = {-1: -1}\n\n for row in structure_block:\n row_id = id_map[row[ID]]\n parent_id = id_map[row[PID]]\n if not curr_section.ids:\n # first in section point is parent\n curr_section.ids.append(parent_id)\n curr_section.ntype = row[TYPE]\n\n gap = parent_id != curr_section.ids[-1]\n\n # If parent is not the previous point, create a section end-point.\n # Else add the point to this section\n if gap:\n sec_end_pts.add(row_id)\n else:\n curr_section.ids.append(row_id)\n\n if row_id in sec_end_pts:\n parent_section[curr_section.ids[-1]] = len(sections) - 1\n # Parent-child discontinuity section\n if gap:\n curr_section = new_section()\n curr_section.ids.extend((parent_id, row_id))\n curr_section.ntype = row[TYPE]\n gap_sections.add(len(sections) - 2)\n elif row_id != len(data_block) - 1:\n # avoid creating an extra DataBlockSection for last row if it's a leaf\n curr_section = new_section()\n\n for sec in sections:\n # get the section parent ID from the id of the first point.\n if sec.ids:\n sec.pid = parent_section[sec.ids[0]]\n\n # join gap sections and \"disable\" first half\n if sec.pid in gap_sections:\n _merge_sections(sections[sec.pid], sec)\n\n # TODO find a way to remove empty sections. Currently they are\n # required to maintain tree integrity.\n return sections",
"def prepare_content_tree(self) -> None:\n super().prepare_content_tree()\n\n self.set_numbering()\n\n # Adding the id's of split text in 'new_child_nodes1' list.\n self.split_text_nodes()\n\n # Creating paragraphs and add all id's in 'new_child_nodes2' list.\n self.create_paragraphs()",
"def __init__(self, tree):\n self.tree = tree\n ControlRecord = self._record(\"Control\", Record, tree.records.control)\n DataRecord = self._record(\"Data\", Record, tree.records.data)\n DataRecords = defaultdict(lambda: DataRecord)\n for branch in subtree_walk(tree.segments.idoc.segments):\n name = branch.name\n DataRecords[name] = self._record(name, DataRecord, branch)\n ns = {\n \"__slots__\": [],\n \"ControlRecord\": ControlRecord,\n \"DataRecord\": DataRecord,\n \"DataRecords\": DataRecords,\n }\n self.doc = type(self.tree.segments.idoc.name, (IDoc,), ns)",
"def create_fake_parent(el: bs4.Tag) -> _FakeParent:\n\n return _FakeParent(el)",
"def init_from_parent(cls, parent, coinbase, nonce=b'', extra_data=b'',\n timestamp=int(time.time()), uncles=[], env=None):\n header = BlockHeader(prevhash=parent.hash,\n uncles_hash=utils.sha3(rlp.encode(uncles)),\n coinbase=coinbase,\n state_root=parent.state_root,\n tx_list_root=trie.BLANK_ROOT,\n receipts_root=trie.BLANK_ROOT,\n bloom=0,\n difficulty=calc_difficulty(parent, timestamp),\n mixhash='',\n number=parent.number + 1,\n gas_limit=calc_gaslimit(parent),\n gas_used=0,\n timestamp=timestamp,\n extra_data=extra_data,\n nonce=nonce)\n block = Block(header, [], uncles, env=env or parent.env,\n parent=parent, making=True)\n block.ancestor_hashes = [parent.hash] + parent.ancestor_hashes\n block.log_listeners = parent.log_listeners\n return block",
"def build_paths_tree(self, d, parent):\n if not d:\n return\n for k, v in d.iteritems():\n pathName = ''\n nodeType = ''\n if k.find('__') > 0:\n pathName = k[:k.find('__')]\n nodeType = k[k.find('__')+2:].capitalize()\n else:\n pathName = k\n nodeType = 'Transform'\n self.child = QtWidgets.QTreeWidgetItem(parent)\n parentName = parent.text(0)\n toolTipStr = parent.toolTip(0)\n if parentName == self.assName:\n self.child.setToolTip(0, pathName)\n else:\n if parentName == '/':\n self.child.setToolTip(0, '/' + pathName)\n else:\n self.child.setToolTip(0, toolTipStr + '/' + pathName)\n self.child.setText(0, pathName)\n self.child.setIcon(0,QtGui.QIcon(self.dictNodeType[nodeType]['imagePath']))\n if v:\n parent.addChild(self.child)\n if isinstance(v, dict):\n self.build_paths_tree(v, self.child)",
"def appendChild(self, child_code, decimals, factor=0.0, yield_=0.0,\n measure=0.0, measure_list=None, type_=None, label=None):\n if measure_list is None:\n measure_list = []\n if type_ is None:\n type_ = \"\"\n if label is None:\n label = \"\"\n _measure = Measure(decimals, self.recordType,\n measure, [], label, factor, yield_)\n if len(measure_list) > 0:\n _measure.buildMeasure( measure_list, type_, decimals,\n self.recordType)\n _position = len(self.__children)\n _child = Decomposition(_position, child_code, [_measure])\n self.__children.append(_child)\n return _child"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add FencedBlockPreprocessor to the Markdown instance. | def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
">normalize_whitespace") | [
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n\n md.parser.blockprocessors.add('details', DetailsProcessor(md.parser), '_begin')",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def add_block(self, new_block):\n self.mediator.add_block(new_block)",
"def add_track_block(self, block):",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def _append_block(self, block_format=None, before_prompt=False):\n self._append_custom(self._insert_block, block_format, before_prompt)",
"def add_child(self):\n # The new child block\n new_block = ConvBlockGene('decode block',\n parent=self)\n # Add the new block to self.children\n self.children.append(new_block)\n\n pass",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def test_markdown_fenced_code_extension(self):\n md_text = dedent(\"\"\"\n ```\n print 'foo'\n ```\n \"\"\")\n\n expected_html = dedent(\"\"\"\n <pre><code>print 'foo'\\n</code></pre>\n \"\"\")\n\n config = load_config(pages=[{'Home': 'index.md'}])\n page, nav = build_page(None, 'index.md', config, md_text)\n page.render(config, nav)\n self.assertEqual(page.content.strip(), expected_html)",
"def __processCodeBlock(self, parentElem, lines, inList):\r\n detabbed, theRest = self.detectTabbed(lines)\r\n pre = etree.SubElement(parentElem, \"pre\")\r\n code = etree.SubElement(pre, \"code\")\r\n text = \"\\n\".join(detabbed).rstrip()+\"\\n\"\r\n code.text = markdown.AtomicString(text)\r\n self.parseChunk(parentElem, theRest, inList)",
"def test_md027_good_fenced_block_in_list_in_block_quote():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md027\",\n \"good_fenced_block_in_list_in_block_quote.md\",\n )\n supplied_arguments = [\n \"--disable-rules\",\n \"md031,md032\",\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )",
"def f_blocks(self, f_blocks):\n \n self._f_blocks = f_blocks",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def __handle_start_fenced_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n token_parts = [output_html]\n if (output_html.endswith(\"</ol>\") or output_html.endswith(\"</ul>\")) or (\n output_html and output_html[-1] != ParserHelper.newline_character\n ):\n token_parts.append(ParserHelper.newline_character)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n True,\n )\n token_parts.append(\"<pre><code\")\n if next_token.extracted_text:\n token_parts.extend([' class=\"language-', next_token.extracted_text, '\"'])\n token_parts.append(\">\")\n return \"\".join(token_parts)",
"def add_block(self, data):\n self.__create_block(data)",
"def _post(self, *args, **kwargs):\n return _frame_detection_swig.preamble_detector_bb_sptr__post(self, *args, **kwargs)",
"def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)",
"def add_block(self, block):\n if isinstance(block, Block):\n if block in self.blocks:\n raise ValueError('Duplicate block:' + block.name + 'already exists.')\n else:\n self.blocks[block] = block",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add pieces to Markdown. | def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.parser = md.parser
self.md = md
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self),
"<reference")
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
"<reference")
# Insert a tree-processor that would actually add the footnote div
# This must be before all other treeprocessors (i.e., inline and
# codehilite) so they can run on the the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
"_begin")
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self),
">amp_substitute") | [
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def process_markdown(questions):\n for question in questions:\n question['question'] = markdown(question['question'])\n for i in range(len(question['answers'])):\n this = question['answers'][i][0]\n question['answers'][i][0] = markdown_nopara(this)\n return questions",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def _split(self):\n text = self.md\n self.parts = parts = []\n self.headers = headers = []\n lines = []\n \n # Split in parts\n for line in text.splitlines():\n if line.startswith(('# ', '## ', '### ', '#### ', '##### ')):\n # Finish pending lines\n parts.append('\\n'.join(lines))\n lines = []\n # Process header\n level = len(line.split(' ')[0])\n title = line.split(' ', 1)[1]\n title_short = title.split('(')[0].split('<')[0].strip().replace('`', '')\n headers.append((level, title_short))\n parts.append((level, title_short, title))\n else:\n lines.append(line)\n parts.append('\\n'.join(lines))\n \n # Now convert all text to html\n for i in range(len(parts)):\n if not isinstance(parts[i], tuple):\n parts[i] = markdown.markdown(parts[i], extensions=[]) + '\\n\\n'",
"def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n\n md.parser.blockprocessors.add('details', DetailsProcessor(md.parser), '_begin')",
"def append_markdown(mark_tmp, result):\n result = all_results_markdown(result)\n for res in result:\n mark_tmp += res\n return mark_tmp",
"def test_markup_markdown(self):\r\n\r\n a = self.new_article('Demo', '''A First Level Header\r\n====================\r\n\r\nA Second Level Header\r\n---------------------\r\n\r\nNow is the time for all good men to come to\r\nthe aid of their country. This is just a\r\nregular paragraph.''', markup=MARKUP_MARKDOWN)\r\n a.do_render_markup()\r\n\r\n print a.rendered_content",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def format_markdown(self, hwslug):\n # To expose static resources in homework desc directory, we need to\n # convert all \"hw://<path>\" urls to hwstatic view urls.\n def translate_url(u):\n # Get rid of 'hw://' and leading '/'\n u = reform_path(u[5:])\n if u.startswith('/'):\n u = u[1:]\n # translate to hwstatic file\n filename = '%s/%s' % (hwslug, u)\n # NOTE: here I hardcoded the url for hwstatic!\n ret = '/hwstatic/%s' % filename\n # we also need to prepend website base url\n return config.WEBSITE_BASEURL + ret\n\n def format(text):\n return markdown(\n text=text,\n output_format='xhtml1',\n extensions=[\n 'extra',\n 'tables',\n 'smart_strong',\n 'codehilite',\n 'nl2br',\n 'toc',\n 'fenced_code',\n ]\n )\n\n # the description\n desc = UrlMatcher(['hw']).replace(self.desc, translate_url)\n self.formatted_desc = format(desc)\n\n # the solution\n if self.solve:\n solve = UrlMatcher(['hw']).replace(self.solve, translate_url)\n self.formatted_solve = format(solve)",
"def _add_paragraphs(self, reference_tag, transform_group, action):\n not_first_paragraph = False\n paragraph_list = transform_group[action]\n\n for descriptor in paragraph_list:\n paragraph_tag = self._data.new_tag('div',\n style='font-family: Segoe UI; font-size: 13px; color: #595959; text-align: justify;') # noqa\n self._set_content(paragraph_tag, descriptor)\n\n # Add a br tag between paragraphs\n # Add 2, if action is left or right\n if not_first_paragraph:\n reference_tag.append(self._get_br_tag())\n if action in ('left', 'right'):\n reference_tag.append(self._get_br_tag())\n\n # Use the action to determine the relation between the paragraph and the reference\n if action in ('left', 'right') and not not_first_paragraph:\n reference_tag.append(paragraph_tag)\n reference_tag = paragraph_tag\n else:\n reference_tag.insert_after(paragraph_tag)\n reference_tag = paragraph_tag\n\n not_first_paragraph = True",
"def Example(self, line):\n self._fill = self._indent[self._level].indent + self.INDENT\n self._AddToken(' ' * self._fill + line, Token.Markdown.Normal)\n self._NewLine()\n self.Content()\n self._fill = 0",
"def tomarkdown(filename, lines):\n\n # add .csv to the filename if necessary\n if not filename.endswith('.md'):\n filename += '.md'\n\n with open(filename, 'a') as f:\n f.write('\\n'.join(lines))",
"def markdown(self, s):\n\n \"\"\"\n Start with some helper functions to process each markdown type.\n Each markdown element has a method to handle the specifics. Each\n method is passed the following parameters:\n\n Arguments:\n m -- a list of the elements parsed for the match. m[0] is\n the full matched substring within s.\n s -- the string to process\n new_str -- the string used to build the replacement string.\n Generally of the format 'stuff{}stuff', where\n 'stuff' is markdown, and {} is replaced with the\n text between the markdown tags.\n\n Returns:\n Modified string with inline markdown element expanded.\n \"\"\"\n def md_vars(m, s, new_str):\n \"\"\"\n Handle inline link and vars: [variable_name]\n\n See docstring in code for argument information.\n \"\"\"\n def makeJitAttrs(params):\n d = {l[0]: l[1] for l in self._special_parameter.regex.findall(params)}\n return d\n\n self.debug.print(\"mdvars(<strong>m[0])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[0])))\n self.debug.print(\"mdvars(<strong>m[1])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[1])))\n self.debug.print(\"mdvars(<strong>s)=</strong><em>{}</em>\".format(HtmlUtils.escape_html(s)))\n jit_attrs = None if not m[3] else makeJitAttrs(m[3])\n if self._namespaces.exists(m[1]):\n # Substitute the variable name with the value\n c, v = self._stripClass(self._namespaces.getValue(m[1], jit_attrs))\n v = self._md_value(v)\n if(not c):\n # print(\"OLD: {}<br />\\nNEW: {}<br />\".format(m[0], v))\n s = s.replace(m[0], v)\n else:\n s = s.replace(m[0], '<{0}{1}>{2}</{0}>'.format('span', c, v))\n else:\n # No need to do anything here, just leave the unknown link/variable alone\n pass\n\n return s\n\n def md_plain(m, s, new_str):\n \"\"\"\n Handle simple replacement markdown. e.g. *foo* or **bar**, etc.\n\n See docstring in code for argument information.\n \"\"\"\n return s.replace(m[0], new_str.format(m[1]))\n\n # A map linking markdown keys to processor functions\n markdownTypes = [\n ('vars', md_vars),\n ('strong', md_plain),\n ('emphasis', md_plain),\n ('ins', md_plain),\n ('del', md_plain),\n ]\n\n self._inc_nesting_level()\n self.debug.print(\"markdown({})\".format(HtmlUtils.escape_html(s)))\n # For each type of markdown\n for key, md_func in markdownTypes:\n md_obj = self._regex_markdown[key]\n matches = findall(md_obj.regex, s) # find all the matches\n for m in matches:\n # for each match, process it\n s = md_func(m, s, md_obj.new_str)\n\n #print(\"RETURN: {}\".format(s))\n self._dec_nesting_level()\n return s # return the processed string",
"def md_for_this_person(student):\n slide_data = {}\n name = student[\"unsw_name\"].split(\",\")\n slide_data[\"name\"] = \"{} {}\".format(name[1], name[0])\n slide_data[\"topic\"] = student[\"h_w_topic\"]\n slide_data[\"repo\"] = student[\"repo_name\"]\n slide_data[\"ghu\"] = student[\"gitHubUsername\"]\n\n md = \"\"\"---\n\n# {topic}\n\n## {name}\n\n{{.background}}\"\"\".format(**slide_data)\n\n for i in range(1, 5):\n md += \"\"\"\\n---\n\n# {0} {{.big}}\n\n## Explaining what it means\\n\"\"\".format(i)\n\n for i in range(5, 15):\n md += \"\"\"\\n---\n\n# {0} {{.big}}\n\n## Going into some detail\\n\"\"\".format(i)\n\n for i in range(15, 21):\n md += \"\"\"\\n---\n\n# {0} {{.big}}\n\n## Explaining why you believe what you do\\n\"\"\".format(i)\n\n md += \"\"\"---\n\n\n---\n\n\n\"\"\"\n return md",
"def markdown_card(\n box: str,\n title: str,\n content: str,\n data: Optional[PackedRecord] = None,\n commands: Optional[List[Command]] = None,\n) -> MarkdownCard:\n return MarkdownCard(\n box,\n title,\n content,\n data,\n commands,\n )",
"def apply_markdown_formatting(list_of_records):\n markdown_string = \"\"\n\n for item in list_of_records:\n line = \"#\" + \" \" + item + \"\\n\\n\"\n markdown_string += line\n\n print(markdown_string)\n return markdown_string",
"def read_md(filename, new_html):\n\tmarkdown = mistune.Markdown()\n\twith open(filename, 'r') as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\tline = line.strip()\n\t\t\tnew_line = markdown(line)\n\t\t\tnew_html.append(new_line)\n\tf.close()\n\treturn new_html",
"def write_markdown_template_body(sections):\n for section in sections:\n print((\"## \" + section[SECTION] + \"\\n\"))\n if CONSIDER in list(section.keys()):\n for consider in section[CONSIDER]:\n print((\"> * \" + consider))\n else:\n print(\"> ...\")\n print(\"\")",
"def add_pieces(self):\n i = 0\n j = 0\n for c in self.fen_pos:\n try:\n a = int(c)\n j += a\n except ValueError:\n if c == \"/\":\n i += 1\n j = 0\n else:\n self.board_array[i, j].add_content(self.Piece(c))\n j += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clear the footnotes on reset, and prepare for a distinct document. | def reset(self):
self.footnotes = OrderedDict()
self.unique_prefix += 1 | [
"def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()",
"def finalize(self):\n # we could not fill out links while parsing (referenced sections where not known),\n # so try to set them now, where the document is complete\n for sec in self.itersections(recursive=True):\n if sec._link is not None:\n sec.link = sec._link\n if sec._include is not None:\n sec.include = sec._include",
"def tearDown(self):\n Documents.documents_list = []",
"def reset(self):\n self.lines = []\n self.total_todos = 0\n self.active_todos = []\n self.done_todos = []",
"def _cleanupBibTex(self, count):\n import bibtexparser\n from bibtexparser.bparser import BibTexParser\n parser = BibTexParser()\n parser.customization = homogeneize_latex_encoding\n bib = bibtexparser.loads(self.refs, parser=parser)\n\n # save results\n from bibtexparser.bwriter import BibTexWriter\n writer = BibTexWriter()\n writer.contents = ['entries']\n writer.indent = ' '\n writer.order_entries_by = ('id')\n self.number = len(bib.entries)\n self.refs = bibtexparser.dumps(bib, writer)",
"def set_dirty(self):\n\t\tself._dirty_heading = True\n\t\tself._dirty_body = True\n\t\tif self._document:\n\t\t\tself._document.set_dirty_document()",
"def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div",
"def refiner_reset(self):\n self._refiner_reset = True",
"def reset():\n global _bibliography\n _bibliography = defaultdict(_bibliography_task_generator)",
"def set_dirty_heading(self):\n\t\tself._dirty_heading = True\n\t\tif self._document:\n\t\t\tself._document.set_dirty_document()",
"def reset_generator(self):\n self.doc_generator = (self._file_to_docs(f, self.doc_class)\n for f in self.corpus_files\n )",
"def clear_journal_entry_terms(self):\n pass",
"def flush_stored_pdf(self):\n\n self._calculated_pdf = None",
"def reset(self):\n self.__template = None",
"def wipe_contents(self):\n\n # We rely on put_expiry_flags to have properly set the source_date,\n # entry_date, and is_expired flags on Notes, as necessary.\n assert self.is_expired\n\n # Delete all related Notes (they will have is_expired == True by now).\n db.delete(self.get_notes(filter_expired=False))\n if self.photo:\n db.delete(self.photo) # Delete the locally stored Photo, if any.\n\n for name, property in self.properties().items():\n # Leave the subdomain, is_expired flag, and timestamps untouched.\n if name not in ['subdomain', 'is_expired',\n 'source_date', 'entry_date', 'expiry_date']:\n setattr(self, name, property.default)\n self.put() # Store the empty placeholder record.",
"def startDocument(self):\n bp()\n self.reset()",
"def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote",
"def clear(self):\r\n self.prepare()\r\n self.buffer[:] = [self.dtext]\r\n self.firstwrite = 1",
"def cleanup(self):\n self.output_standard = ''\n self.template_filename = ''\n self.template_identifier = ''\n self.template = None\n self.translator = None\n self.translator_ugettext = None\n self.translator_ungettext = None\n self.translatable_element_set.clear()\n self.translatable_attribute_set.clear()\n self.namespace_map.clear()\n self.function_map.clear()\n if constants.GENERATE_DEBUG_COMMENTS:\n self.template_lines = []",
"def clear_journal_entry_id_terms(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return ElementTree Element that contains Footnote placeholder. | def findFootnotesPlaceholder(self, root):
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
finder(child)
return None
res = finder(root)
return res | [
"def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div",
"def generate_note(stem: str) -> str:\n note = f\"\"\"\n.. note::\n An *xml* file containing the defaults for the `{stem}` calculator can be created via `-p {stem} -o FILENAME` command line options `\n\"\"\"\n return note",
"def _get_dummy_note(self, uid=0):\n nid = uuid4().hex\n return {\n \"id\": nid,\n \"created\": \"2014-10-31T10:05:00.000000\",\n \"updated\": \"2014-10-31T10:50:00.101010\",\n \"user\": \"dummy-user-id\",\n \"usage_id\": \"dummy-usage-id-\" + str(uid),\n \"course_id\": \"dummy-course-id\",\n \"text\": \"dummy note text \" + nid,\n \"quote\": \"dummy note quote\",\n \"ranges\": [\n {\n \"start\": \"/p[1]\",\n \"end\": \"/p[1]\",\n \"startOffset\": 0,\n \"endOffset\": 10,\n }\n ],\n }",
"def test_xform_empty_question_label_patch_content_add(self):\n xml_template = self.xml_template\n expected = \" \"\n xml_input = xml_template.format(\"\")\n parsed, _ = xform_patch._xform_empty_question_label_patch_content(\n xml_input)\n itext = parsed[\"h:html\"][\"h:head\"][\"model\"][\"itext\"]\n observed = itext[\"translation\"][0][\"text\"][0][\"value\"]\n self.assertIn(expected, observed)",
"def notes_xml(self):\n\n if self.notes == []:\n return ''\n xml = '<Notes>\\n'\n for note in self.notes:\n xml += note\n xml += '</Notes>\\n'\n return xml",
"def test_xform_empty_question_label_patch_content_no_overwrite(self):\n xml_template = self.xml_template\n expected = \"My plain string itext question label\"\n xml_input = xml_template.format(\n \"\"\"<value>{0}</value>\"\"\".format(expected))\n observed, _ = xform_patch._xform_empty_question_label_patch_content(\n xml_input)\n parsed, _ = xform_patch._xform_empty_question_label_patch_content(\n xml_input)\n itext = parsed[\"h:html\"][\"h:head\"][\"model\"][\"itext\"]\n observed = itext[\"translation\"][0][\"text\"][0][\"value\"][1][\"#text\"]\n self.assertEqual(expected, observed)",
"def do_placeholder(parser, token):\n name, params = parse_placeholder(parser, token)\n return PlaceholderNode(name, **params)",
"def create_none( self, n ):\n\t\treturn self.document.createElement( 'none' )",
"def __createXMLElement (name, descr = None, attrs = {}, nsmap = {}):\n\n element = etree.Element(name, attrs, nsmap=nsmap)\n \n if descr != None:\n for match in regex.finditer(descr):\n descr = descr[:match.start()] + \"?\" + descr[match.end():]\n element.text= descr\n\n return (element)",
"def _root_body_(self):\n node = self.worldbody.find(\"./body[@name='{}']\".format(self._root_))\n return node",
"def _default_footer_xml(cls):\n path = os.path.join(\n os.path.split(__file__)[0], '..', 'templates', 'default-footer.xml'\n )\n with open(path, 'rb') as f:\n xml_bytes = f.read()\n return xml_bytes",
"def placeholder(cls):\n return PlugPlaceholder(cls)",
"def body(self) -> Optional[\"HOCRNode\"]:\n return self.html.find(\"body\")",
"def createExtTip(self):\n if self.tipInUse() == True and self.model.isExternalTip() == True:\n return \"\"\" <div id=\"extTip\"></div>\"\"\"\n else:\n return \"\"",
"def get_descendant_footnotes(self, instance):\n if not instance.is_root and instance.node_type != 'table':\n return None\n footnotes = []\n for node in instance.cursor.walk():\n for citation in node.footnotecitations.all():\n subtree = DocCursor(instance.cursor.tree,\n citation.footnote_node.identifier)\n footnotes.append(self.serialize_doc_cursor(subtree))\n return footnotes",
"def create_note_xml(self, guid, text, user, datetime, name=\"\"):\n\n guid = self.create_guid()\n xml = '<Note guid=\"' + guid + '\" '\n xml += 'creatingUser=\"' + user + '\" '\n xml += 'creationDateTime=\"' + datetime + '\" '\n if name != \"\":\n xml += 'name=\"' + name + '\" '\n xml += '>\\n'\n xml += '<PlainTextContent>' + text + '</PlainTextContent>\\n'\n xml += '</Note>\\n'\n self.notes.append(xml)\n noteref = '<NoteRef targetGUID=\"' + guid + '\" />\\n'\n return noteref",
"def empty_template():\n \n text = [html.H3('No details available', style={'backgroundColor':'#327ba8',\n 'color':'white',\n 'padding':'5px',\n 'border-radius': 10})]\n return text",
"def _lt1_dummy_element(msmt):\n e = element.Element('dummy', pulsar = msmt.pulsar_lt1, global_time = True)\n \n e.append(pulse.cp(msmt.T, length=10e-6))\n\n return e",
"def testFootnotes(self, b, u):\n rx = re.compile(r'\\\\f \\+ [^\\\\][^f][^r]')\n if not rx.search(u) == None:\n print('Footnote without back reference in: ' + b)",
"def makeNoteFrame(self):\n\n noteFrame = Frame(self.root, height=60, relief=RAISED, borderwidth=2)\n\n self.noteText = StringVar()\n\n noteLabelArgs = {'textvariable': self.noteText,\n 'height': 5, 'width': 80, 'justify': LEFT,\n 'wraplength': self.defaultWidth - 100}\n self.noteLabel = Label(noteFrame, **noteLabelArgs)\n\n noNotes = \"No notes. Open an existing project or create a new one to\" +\\\n \" import notes.\"\n self.noteText.set(noNotes)\n\n self.noteLabel.pack(side=LEFT, expand=YES)\n\n nextButton = Button(noteFrame, text=\"Next\") \n nextButton.config(command=(lambda: self.outliner.nextNote()))\n nextButton.pack(side=TOP)\n\n prevButton = Button(noteFrame, text=\"Prev\") \n prevButton.config(command=(lambda: self.outliner.prevNote()))\n prevButton.pack(side=BOTTOM)\n\n return noteFrame"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Store a footnote for later retrieval. | def setFootnote(self, id, text):
self.footnotes[id] = text | [
"def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote",
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def add_note(self):\n note_id = __notes__.new_note()\n self.set_note_id(note_id)",
"def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1",
"def set_note(self, **kwargs):\n return self.client.execute(\"order/set_note\", \"POST\", kwargs)",
"def write_note(data, commit='HEAD'):\n command = 'git notes --ref=%s add ' % NOTES_REF\n for k, v in data.iteritems():\n command = '%s -m \"%s: %s\"' % (command, k, v)\n run_command(command)",
"def _add_note_entry(self):\n note = self.faker.sentence()\n instance = models.Note.objects.create(child=self.child, note=note)\n instance.save()\n self._add_tags(instance)",
"def save_note(window, title, body, category=\"Shopping\"):\n\n # creates the note object, after cleaning up the input values\n new_note = Note(title.capitalize().strip(),\n body.capitalize().strip(),\n category.capitalize().strip())\n\n # add the note to the note list\n notes.append(new_note)\n print(\"Title: {}\".format(new_note.get_title()))\n print(\"Body: {}\".format(new_note.get_text()))\n print(\"Category: {}\".format(new_note.get_category()))\n\n # close the window this function was called from\n window.destroy()",
"def add_note(self, note):\n self.note = note\n self.set_box_height_width(self.note)",
"def record_note(self, tag, note):\n if self.record_file:\n rec = [NOTE, tag, note]\n f = open(self.record_file, 'a')\n labeled_dump('note', rec, f, 1)\n f.close()",
"def set_note(self, product, note, options=[]):\n note = str(note)\n item_index = self.__index__(product, options)\n if item_index != -1:\n self._items_list[item_index].note = note\n self.update_session()",
"def add_note(self, trip, destination, note):\n note._id = self.push(note.attrs, trip._id, 'destinations',\n destination._id, 'notes')",
"def create_note(citekey, config, bbt, force, template):\n candidates = bbt.search_citekey_in_bbp(citekey)\n if not candidates:\n click.echo(\"No results found for \" + citekey)\n sys.exit()\n elif len(candidates) != 1:\n click.echo(\"Something wrong happened here. We have too many candidates...\")\n sys.exit()\n else:\n candidate = candidates[0]\n fieldValues = bbt.extract_fields(candidate)\n\n # Fill template\n try:\n note = Note(citekey, fieldValues, config, template)\n except BadTemplateName as e:\n click.echo(e)\n sys.exit()\n\n # Write output file\n notes_dir = Path(config[\"notes\"])\n outfile = notes_dir / f\"{citekey}.md\"\n\n if outfile.exists():\n if force:\n click.echo(f\"Overwriting {str(outfile)}\")\n else:\n choice = click.confirm(\n \"This file already exists. Edit instead?\"\n \"Use --force to overwrite files.\"\n )\n if choice:\n os.system(f\"{config['editor']} {str(outfile)}\")\n else:\n click.echo(f\"Writing {str(outfile)}\")\n\n # Write note\n outfile.write_text(note.render())",
"def save_note():\n save_name = filedialog.asksaveasfilename(title=\"Save Note\",defaultextension=\".txt\",\n initialdir=\"./\",\n filetypes=[(\"Text File\",\"*.txt\"),(\"All Files\",\"*.*\")])\n with open(save_name,\"w\") as f:\n # first 3 lines are font-family font-size and font-option\n f.write(font_family.get()+\"\\n\")\n f.write(str(font_size.get())+\"\\n\")\n f.write(font_option.get()+\"\\n\")\n # write the remaining body\n f.write(input_text.get(\"1.0\",tk.END))",
"def notesCreate(self, note, callback):\n self.callMethodRetLong(u\"notes.create\", note, callback)",
"def create_note(self, notes = None, return_url = False):\n data = {}\n if notes is not None: data['notes'] = notes\n\n return self.__create_object('notes', '', data, return_url)",
"def __storeInFile(self):\r\n with open(self.__fileName,\"w\") as f:\r\n for gr in self.__listNote:\r\n grf=gr.getStudent().getID()+\",\"+gr.getSubject().getID()+\",\"+str(gr.getNote())+'\\n'\r\n f.write(grf)",
"def add_note(self, note):\n cmd = self._repo._repo.git\n cmd.notes('--ref', self.NOTE_REF, 'add', '-f', '-m', note, self.sha)",
"def add_note(self,note):\n q=\"insert into note(msg) values('%s')\"%(note.get_msg())\n try:\n NoteDB.cursor.execute(q)\n NoteDB.db.commit()\n except Exception as e:\n print(e)\n NoteDB.db.rollback()\n raise",
"def persist(self, entry):\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return div of footnotes as et Element. | def makeFootnotesDiv(self, root):
if not list(self.footnotes.keys()):
return None
div = etree.Element("div")
div.set('class', 'footnote')
etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
for id in self.footnotes.keys():
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
self.parser.parseChunk(li, self.footnotes[id])
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
if self.md.output_format not in ['html5', 'xhtml5']:
backlink.set("rev", "footnote") # Invalid in HTML5
backlink.set("class", "footnote-backref")
backlink.set("title", "Jump back to footnote %d in the text" % \
(self.footnotes.index(id)+1))
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div | [
"def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div",
"def get_descendant_footnotes(self, instance):\n if not instance.is_root and instance.node_type != 'table':\n return None\n footnotes = []\n for node in instance.cursor.walk():\n for citation in node.footnotecitations.all():\n subtree = DocCursor(instance.cursor.tree,\n citation.footnote_node.identifier)\n footnotes.append(self.serialize_doc_cursor(subtree))\n return footnotes",
"def result_nodes(\n self,\n document: \"docutils.nodes.document\",\n env: \"BuildEnvironment\",\n node: \"docutils.nodes.Element\",\n is_ref: bool,\n ) -> Tuple[List[\"docutils.nodes.Node\"], List[\"docutils.nodes.system_message\"]]:\n if not node.get(\"refdomain\"):\n assert node[\"reftype\"] == \"footcite\"\n node[\"refdomain\"] = \"footcite\"\n node[\"reftype\"] = \"p\"\n foot_domain = cast(\"BibtexFootDomain\", self.env.get_domain(\"footcite\"))\n keys = [key.strip() for key in self.target.split(\",\")] # type: ignore\n try:\n foot_bibliography = env.temp_data[\"bibtex_foot_bibliography\"]\n except KeyError:\n env.temp_data[\n \"bibtex_foot_bibliography\"\n ] = foot_bibliography = foot_domain.bibliography_header.deepcopy()\n foot_old_refs = env.temp_data.setdefault(\"bibtex_foot_old_refs\", set())\n foot_new_refs = env.temp_data.setdefault(\"bibtex_foot_new_refs\", set())\n style = find_plugin(\n \"pybtex.style.formatting\", self.config.bibtex_default_style\n )()\n references = []\n domain = cast(\"BibtexDomain\", self.env.get_domain(\"cite\"))\n # count only incremented at directive, see foot_directives run method\n footbibliography_count = env.temp_data.setdefault(\n \"bibtex_footbibliography_count\", 0\n )\n footcite_names = env.temp_data.setdefault(\"bibtex_footcite_names\", {})\n for key in keys:\n entry = domain.bibdata.data.entries.get(key)\n if entry is not None:\n formatted_entry = style.format_entry(label=\"\", entry=entry)\n if key not in (foot_old_refs | foot_new_refs):\n footnote = docutils.nodes.footnote(auto=1)\n # no automatic ids for footnotes: force non-empty template\n template: str = (\n env.app.config.bibtex_footcite_id\n if env.app.config.bibtex_footcite_id\n else \"footcite-{key}\"\n )\n raw_id = template.format(\n footbibliography_count=footbibliography_count + 1, key=entry.key\n )\n # format name with make_id for consistency with cite role\n name = make_id(raw_id)\n footnote[\"names\"] += [name]\n footcite_names[entry.key] = name\n footnote += domain.backend.paragraph(formatted_entry)\n document.note_autofootnote(footnote)\n document.note_explicit_target(footnote, footnote)\n node_text_transform(footnote)\n foot_bibliography += footnote\n foot_new_refs.add(key)\n references.append(\n (\n entry,\n formatted_entry,\n FootReferenceInfo(\n key=entry.key,\n refname=footcite_names[entry.key],\n document=document,\n ),\n )\n )\n else:\n logger.warning(\n 'could not find bibtex key \"%s\"' % key,\n location=(env.docname, self.lineno),\n type=\"bibtex\",\n subtype=\"key_not_found\",\n )\n ref_nodes = format_references(\n foot_domain.reference_style, node[\"reftype\"], references\n ).render(domain.backend)\n return ref_nodes, []",
"def dom_element(self):\n data = self.data\n dom_element = data.dom_element\n return dom_element",
"def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n root = markdown.etree.Element(\"div\")\r\n self.parseChunk(root, '\\n'.join(lines))\r\n return markdown.etree.ElementTree(root)",
"def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElement = dom.documentElement\n if self.description is not None:\n domElement.setAttribute('description', self.description)\n e = dom.createTextNode(self.filename)\n domElement.appendChild(e)\n\n return domElement",
"def depart_htb_html(self, node):\n # Need to close two divs\n self.depart_admonition(node)\n self.depart_admonition(node)",
"def notes_xml(self):\n\n if self.notes == []:\n return ''\n xml = '<Notes>\\n'\n for note in self.notes:\n xml += note\n xml += '</Notes>\\n'\n return xml",
"def set_notes_from_content(notes_holder):\n dom = BeautifulSoup(notes_holder.content, \"html.parser\")\n notes_ids = (note_element['id'] for note_element in dom.find_all('adele-note'))\n notes_holder.notes = set(Note.query.filter(Note.id.in_(notes_ids)).all())",
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def to_etree(self):\n elements_to_rtn = [] # A list of elements that will be returned\n # and then appended to the body\n annotation_body = Element('annotation')\n # TO RETURN\n elements_to_rtn.append(annotation_body)\n\n mention_id = SubElement(annotation_body, 'mention')\n mention_id.set('id', self.id)\n\n annotator_id = SubElement(annotation_body, 'annotator')\n annotator_id.set('id', 'eHOST_2010')\n annotator_id.text = self.annotator\n\n span = SubElement(annotation_body, 'span', {'start': str(self.span_in_document[0]),\n 'end': str(self.span_in_document[1])})\n spanned_text = SubElement(annotation_body, 'spannedText')\n spanned_text.text = self.text\n creation_date = SubElement(annotation_body, 'creationDate')\n creation_date.text = self.datetime\n\n\n # Now create class_mention\n class_mention = Element(\"classMention\")\n class_mention.set(\"id\", self.id)\n # TO RETURN\n elements_to_rtn.append(class_mention)\n #mention_class.set('id', self.classification)\n mention_class = SubElement(class_mention, 'mentionClass')\n mention_class.set('id', self.annotation_type)\n mention_class.text = self.text\n\n # Add attributes\n # ASSERTION\n # These fields point to stringSlotMention fields that contain the attributes\n slot_mention_assertion_id = self.id + '1'\n\n has_slot_mention_assertion = SubElement(class_mention, 'hasSlotMention')\n has_slot_mention_assertion.set('id', slot_mention_assertion_id)\n\n string_slot_mention_assertion = Element('stringSlotMention')\n # TO RETURN\n elements_to_rtn.append(string_slot_mention_assertion)\n string_slot_mention_assertion.set('id', slot_mention_assertion_id)\n mention_slot_assertion = SubElement(string_slot_mention_assertion, 'mentionSlot')\n mention_slot_assertion.set('id', 'assertion')\n string_slot_mention_value_assertion = SubElement(string_slot_mention_assertion, 'stringSlotMentionValue')\n string_slot_mention_value_assertion.set('value', self.attributes['assertion'])\n\n # TEMPORALITY\n slot_mention_temporality_id = self.id + '2'\n has_slot_mention_temporality = SubElement(class_mention, 'hasSlotMention')\n has_slot_mention_temporality.set('id', slot_mention_temporality_id)\n\n string_slot_mention_temporality = Element('stringSlotMention')\n # TO RETURN\n elements_to_rtn.append(string_slot_mention_temporality)\n string_slot_mention_temporality.set('id', slot_mention_temporality_id)\n mention_slot_temporality = SubElement(string_slot_mention_temporality, 'mentionSlot')\n mention_slot_temporality.set('id', 'temporality')\n string_slot_mention_value_temporality = SubElement(string_slot_mention_temporality, 'stringSlotMentionValue')\n string_slot_mention_value_temporality.set('value', self.attributes['temporality'])\n\n if self.annotation_type != 'Evidence of SSI':\n return elements_to_rtn\n\n\n # CLASSIFICATION\n # Add 'classification' field for 'infection_type'\n slot_mention_classification_id = self.id + '3'\n has_slot_mention_classification = SubElement(class_mention, 'hasSlotMention')\n has_slot_mention_classification.set('id', slot_mention_classification_id)\n\n string_slot_mention_classification = Element('stringSlotMention')\n # TO RETURN\n elements_to_rtn.append(string_slot_mention_classification)\n string_slot_mention_classification.set('id', slot_mention_classification_id)\n mention_slot_classification = SubElement(string_slot_mention_classification, 'mentionSlot')\n mention_slot_classification.set('id', 'classification')\n string_slot_mention_value_classification = SubElement(string_slot_mention_classification, 'stringSlotMentionValue')\n string_slot_mention_value_classification.set('value', self.attributes['ssi_class'])\n\n\n\n\n return elements_to_rtn\n #return annotation_body, class_mention",
"def findContent(self):\n article = \"\"\n content = self.soup.find(id='page-container')\n text = content.find(id='js-article-text')\n for para in text.find_all('p', {'class':'mol-para-with-font'}):\n para.text.replace(\"\\xa0\", \" \")\n article += \" \"+para.text\n return article",
"def parse_fact(body,contType):\n\n \"\"\" param body: The site content from get_fact\n param contType: The content from process\n return content: The content in text \"\"\"\n\n parsed = BeautifulSoup(body, 'html5lib')\n if contType == \"news\" :\n fact = parsed.find('div', id='breaking-news')\n elif contType == \"fact\" :\n fact = parsed.find('div', id='content')\n return fact.text.strip()",
"def get_emblem(self):\n return None",
"def inner_html(self):\r\n return self.delegate.InnerHtml",
"def create_test_html():\n return lxml.html.fromstring(\"\"\"<html>\n <head>\n </head>\n <body>\n <div class=\"test\">Some <em>text</em></div>\n <img src=\"some_location\" alt=\"Alt text\" width=540>\n More <b>text</b>\n </body>\n </html>\"\"\")",
"def outer_html(self):\r\n return self.delegate.OuterHtml",
"def build_footer():\n return html.Div(\n children=[\n html.P(\"-MH- 2020\"),\n html.P(\"This app is dedicated to my precious baby girl\"),\n ],\n className=\"footer\",\n )",
"def getImplementation(self):\n return DOMImplementation()",
"def get_wrapper(self, tag):\n return self.objects[tag]['wrapper']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an instance of the FootnoteExtension | def makeExtension(configs=[]):
return FootnoteExtension(configs=configs) | [
"def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div",
"def get_plugin_manager():\n pm = pluggy.PluginManager(\"bitex\")\n pm.add_hookspecs(AnnouncePluginHookSpec)\n pm.load_setuptools_entrypoints(\"bitex\")\n pm.register(AnnouncePluginHookImpl)\n return pm",
"def PLUGIN_ENTRY(): # pylint: disable=invalid-name\n return BapView()",
"def create_notes_obj():\n notes = []\n\n return notes",
"def get_descendant_footnotes(self, instance):\n if not instance.is_root and instance.node_type != 'table':\n return None\n footnotes = []\n for node in instance.cursor.walk():\n for citation in node.footnotecitations.all():\n subtree = DocCursor(instance.cursor.tree,\n citation.footnote_node.identifier)\n footnotes.append(self.serialize_doc_cursor(subtree))\n return footnotes",
"def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta",
"def create_note(\n self, *, label: str | None = None, note: str, **other_settings: Any\n ) -> praw.models.ModNote:\n return self.thing.subreddit.mod.notes.create(\n label=label, note=note, thing=self.thing, **other_settings\n )",
"def instantiate_extensions(self, template):\n return [ ext_cls(template) for ext_cls in self.extensions ]",
"def object(cls, stem):\n # add my suffix to the {stem}\n return stem + cls.extension_object",
"def get_note(self, note_id):\n return self.__get_object('notes', None, note_id)",
"def create_footprint(band_id, points):\n return(manifest.Footprint(band_id=band_id, points=points))",
"def testFootnotes(self, b, u):\n rx = re.compile(r'\\\\f \\+ [^\\\\][^f][^r]')\n if not rx.search(u) == None:\n print('Footnote without back reference in: ' + b)",
"def as_gtin(self, addon = None) -> \"ISSN13\":\n return ISSN13(self, addon)",
"def get_extn(self):\n\n\t\treturn self.__extn",
"def melodic_minor(note):\n\n har = harmonic_minor(note)\n har[5] = notes.augment(har[5])\n return har",
"def load_extension_object(name):\n module = None\n if \".\" not in name:\n try:\n module = import_module(f\"marko.ext.{name}\")\n except ImportError:\n pass\n if module is None:\n try:\n module = import_module(name)\n except ImportError:\n raise ImportError(\n f\"Extension {name} cannot be found. Please check the name.\"\n )\n\n try:\n maker = getattr(module, \"make_extension\")\n except AttributeError:\n raise AttributeError(\n f\"Module {name} does not have 'make_extension' attributte.\"\n )\n return maker",
"def __init__(self, note):\n super(NoteItemData, self).__init__()\n self._note = note",
"def register_extension(extension):\n if not extension in markdown_extensions:\n markdown_extensions.append(extension)",
"def makeFootnoteId(self, id):\r\n return 'fn:%s' % id",
"def get_note(self):\n cmd = self._repo._repo.git\n try:\n return cmd.notes('--ref', self.NOTE_REF, 'show', self.sha)\n except GitCommandError:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure id is unique in set of ids. Append '_1', '_2'... if not | def unique(id, ids):
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
ids.add(id)
return id | [
"def test_unique_based_on_id(self):\n unique = misc.unique_based_on_id\n self.assertSequenceEqual(unique([]), [])\n self.assertSequenceEqual(unique([1, 2, 3]), [1, 2, 3])\n self.assertSequenceEqual(unique([1, 1, 3]), [1, 3])\n self.assertSequenceEqual(unique([[], [], 3]), [[], [], 3])",
"def _unique_id(self, id):\n while id in self.IDs:\n m = IDCOUNT_RE.match(id)\n if m:\n id = '%s_%d'% (m.group(1), int(m.group(2))+1)\n else:\n id = '%s_%d'% (id, 1)\n self.IDs.append(id)\n return id",
"def _checkIdUniqueness(self, id):\n if id == 'time':\n logger.warn(\"Specifying 'time' as a variable is dangerous! Are you \"\n \"sure you know what you're doing?\")\n elif id == 'default':\n logger.warn(\"'default' is a reserved keyword in C. This will cause \"\n \"problems using the C-based integrator.\")\n elif id[0].isdigit():\n raise ValueError(\"The id %s is invalid. ids must not start with a \"\n \"number.\" % id)\n if id in list(self.variables.keys())\\\n or id in list(self.reactions.keys())\\\n or id in list(self.functionDefinitions.keys())\\\n or id in list(self.events.keys())\\\n or id in list(self.constraints.keys())\\\n or id == self.id:\n raise ValueError('The id %s is already in use!' % id)",
"def make_unique(self):\r\n\r\n\t\tseen = {}\r\n\r\n\t\tfor motif in self:\r\n\t\t\tm_id = motif.id\r\n\t\t\tif m_id not in seen:\r\n\t\t\t\tseen[m_id] = 1\r\n\t\t\telse:\r\n\t\t\t\tnew_id = motif.id + \"_\" + str(seen[m_id])\r\n\t\t\t\tmotif.id = new_id\r\n\t\t\t\tseen[m_id] += 1\r\n\r\n\t\treturn(self)",
"def testCantorCreatesUniqueIds(self):\n unique_ids = set()\n for species in range(1000):\n for guild in range(1000):\n ref = cantor_pairing(species, guild)\n self.assertNotIn(ref, unique_ids)\n unique_ids.add(ref)",
"def build_file_id(file_title, file_to_id_map, existing_ids):\n file_id = base_id = re.sub(r\"[\\[\\]\\(\\)#]\", \"\", file_title.lower().replace(\"_\", \"-\").replace(\" \", \"-\"))\n count = 1\n while file_id in existing_ids or file_id in list(file_to_id_map.values()):\n file_id = base_id + \"-\" + str(count)\n count += 1\n\n return file_id",
"def _generate_unique_reply_to_id(cls):\n for _ in range(base_models.MAX_RETRIES):\n new_id = utils.convert_to_hash(\n '%s' % (utils.get_random_int(base_models.RAND_RANGE)),\n REPLY_TO_ID_LENGTH)\n if not cls.get_by_reply_to_id(new_id):\n return new_id\n\n raise Exception('Unique id generator is producing too many collisions.')",
"def fix_duplicate_subject_identifiers_pair1(run=False):\n\n # 40 identifiers to be replaced.\n # {duplicate: replacement}\n new_identifiers = [\n ('066-14120007-5', '066-11120007-5'),\n ('066-14120008-6', '066-11120008-6'),\n ('066-14120009-0', '066-11120009-0'),\n ('066-14120010-1', '066-11120010-1'),\n ('066-14160011-4', '066-11160011-4'),\n ('066-14170008-5', '066-11170008-5'),\n ('066-14170009-6', '066-11170009-6'),\n ('066-14170010-0', '066-11170010-0'),\n ('066-14170011-1', '066-11170011-1'),\n ('066-14170012-2', '066-11170012-2'),\n ('066-14180006-0', '066-11180006-0'),\n ('066-14830011-6', '066-11830011-6'),\n ('066-14830012-0', '066-11830012-0'),\n ('066-14830013-1', '066-11830013-1'),\n ('066-14830014-2', '066-11830014-2'),\n ('066-14860014-0', '066-11860014-0'),\n ('066-14860015-1', '066-11860015-1'),\n ('066-14860016-2', '066-11860016-2'),\n ('066-14860017-3', '066-11860017-3'),\n ('066-14860018-4', '066-11860018-4'),\n ('066-14860019-5', '066-11860019-5'),\n ('066-14860020-6', '066-11860020-6'),\n ('066-14860021-0', '066-11860021-0'),\n ('066-14860022-1', '066-11860022-1'),\n ('066-14860023-2', '066-11860023-2'),\n ('066-14890013-4', '066-11890013-4'),\n ('066-14890014-5', '066-11890014-5'),\n ('066-14210017-2', '066-12210017-2'),\n ('066-14210018-3', '066-12210018-3'),\n ('066-14210019-4', '066-12210019-4'),\n ('066-14210020-5', '066-12210020-5'),\n ('066-14210021-6', '066-12210021-6'),\n ('066-14210022-0', '066-12210022-0'),\n ('066-14210023-1', '066-12210023-1'),\n ('066-14210024-2', '066-12210024-2'),\n ('066-14210025-3', '066-12210025-3'),\n ('066-14210026-4', '066-12210026-4'),\n ('066-14230012-5', '066-12230012-5'),\n ('066-14300009-2', '066-12300009-2'),\n ('066-14300010-3', '066-12300010-3')]\n # convert to dictionary\n duplicates = {item[0]: item[1] for item in new_identifiers}\n\n # fix 40 instances in RegisteredSubject\n n = 0\n for registered_subject in RegisteredSubject.objects.all():\n if registered_subject.subject_identifier in duplicates.keys():\n n += 1\n registered_subject.subject_identifier_aka = registered_subject.subject_identifier\n registered_subject.subject_identifier = duplicates[registered_subject.subject_identifier]\n print '{} has replaced {}'.format(registered_subject.subject_identifier, registered_subject.subject_identifier_aka)\n if run:\n registered_subject.save_base(raw=True, update_fields='subject_identifier', 'subject_identifier_aka')\n\n # fix 40 instances in SubjectConsent\n m = 0\n for subject_consent in SubjectConsent.objects.all():\n if subject_consent.subject_identifier in duplicates.keys():\n m += 1\n subject_consent.subject_identifier_aka = subject_consent.subject_identifier\n subject_consent.subject_identifier = duplicates[subject_consent.subject_identifier]\n print '{} has replaced {}'.format(subject_consent.subject_identifier, subject_consent.subject_identifier_aka)\n if run:\n subject_consent.save_base(raw=True, update_fields='subject_identifier', 'subject_identifier_aka')\n\n print 'Done. Replaced {} subject_identifiers in RegisteredSubject and {} in SubjectConsent.'.format(n, m)",
"def test_identity_is_unique() -> None:\n notifications: set[str] = set()\n for _ in range(1000):\n notifications.add(Notification(\"test\").identity)\n assert len(notifications) == 1000",
"def getuniqueIDs_gff(row):\n if row[\"duplicated_ID\"] is False: return row[\"ID\"]\n else: return \"%s-%i\"%(row[\"ID\"], row[\"numeric_idx\"])",
"def is_new_id(_id):\n return isinstance(_id, text_type) and _id[0] == '_'",
"def _merge_id(ids):\r\n sym_id = \"***************\"\r\n for id1 in ids:\r\n sym_id_temp = \"\"\r\n if len(id1) < len(sym_id):\r\n continue\r\n\r\n for i in range(0, len(sym_id)):\r\n if sym_id[i] == \"*\":\r\n sym_id_temp += id1[i]\r\n else:\r\n sym_id_temp += sym_id[i]\r\n sym_id = sym_id_temp\r\n\r\n return sym_id",
"def generate_uniquie_id(list_employee):\n list_employee_id = []\n for i in range(len(list_employee)):\n list_employee_id.insert(len(list_employee_id), list_employee[i])\n\n valid = False\n while valid == False:\n new_id = random.randint(100000, 999999)\n if not new_id in list_employee_id:\n valid = True\n return new_id",
"def _findUniqueId(self, id):\n ids = self.context.objectIds()\n\n if id not in ids:\n return id\n\n idx = 1\n while idx <= RENAME_AFTER_CREATION_ATTEMPTS:\n new_id = \"%s-%d\" % (id, idx)\n if not new_id in ids:\n return new_id\n idx += 1\n\n return None",
"def add_ids(self, items):\n for item in items:\n self.add_id(item)",
"def make_fields_unique(self, fields):\r\n for i in range(0, len(fields)):\r\n for j in range(i+1, len(fields)):\r\n if fields[i] == fields[j]:\r\n fields[j] += \"'\"",
"def _generate_id(cls, intent):\n id_prefix = '%s.' % intent\n\n for _ in range(base_models.MAX_RETRIES):\n new_id = '%s.%s' % (\n id_prefix,\n utils.convert_to_hash(\n str(utils.get_random_int(base_models.RAND_RANGE)),\n base_models.ID_LENGTH))\n if not cls.get_by_id(new_id):\n return new_id\n\n raise Exception(\n 'The id generator for SentEmailModel is producing too many '\n 'collisions.')",
"def verify_ids_unique(self, incidents, get_unique_id):\n check_unique = set()\n for incident in incidents:\n unique_fire_id = get_unique_id(incident)\n if unique_fire_id in check_unique:\n raise Exception(F\"Fire ID not unique: {unique_fire_id}\")\n check_unique.add(unique_fire_id)",
"def check_duplicate_insert(x, my_set):\r\n return",
"def _generate_unique_ids(self, heads, rels, tails):\n self.heads = OrderedSet(heads)\n self.tails = OrderedSet(tails)\n self.relations = OrderedSet(rels)\n\n # construct sets of unique heads, tails and a set of shared entities\n unique_heads = self.heads - self.tails\n shared_entities = self.heads & self.tails\n unique_tails = self.tails - self.heads\n\n idx = 0\n for entity in unique_heads:\n self.entity2idx[entity] = idx\n self.idx2entity[idx] = entity\n idx += 1\n for entity in shared_entities:\n self.entity2idx[entity] = idx\n self.idx2entity[idx] = entity\n idx += 1\n for entity in unique_tails:\n self.entity2idx[entity] = idx\n self.idx2entity[idx] = entity\n idx += 1\n\n # Now add relations\n idx = 0\n for rel in self.relations:\n self.rel2idx[rel] = idx\n self.idx2rel[idx] = rel\n idx += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add MetaPreprocessor to Markdown instance. | def extendMarkdown(self, md, md_globals):
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin") | [
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def extendMarkdown(self, md, md_globals):\n md.registerExtension(self)\n\n md.parser.blockprocessors.add('details', DetailsProcessor(md.parser), '_begin')",
"def addmeta(self, meta):\n if self.metas is None:\n self.metas = []\n self.metas.append(meta)\n return self",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)",
"def preprocessor(*args, **kwargs):\n logger.debug(\"Adding preprocessor from %s\", args)\n return _unwrap(_preprocessors, *args, **kwargs,\n is_list=False, cache_name=\"preprocessor\")",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def set_meta( self, dataset, **kwd ):\n data_lines = 0\n for line in file( dataset.file_name ):\n line = line.strip()\n if line and not line.startswith( '#' ):\n data_lines += 1\n dataset.metadata.data_lines = data_lines",
"def _add_meta(cls, blob):\n # TODO schema metadata, I don't know enough to do this correctly yet\n #schema_version = cls.validator_class.ID_OF(cls.validator_class.META_SCHEMA)\n #schema['$schema'] = schema_version\n\n #schema_id = '#/temp/' + cls.__name__ + '.json' # FIXME version ...\n #schema['$id'] = schema_id",
"def add_meta_info(self, meta_info):\r\n self.meta_info = {**self.meta_info, **meta_info}",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)",
"def addMeta(self, path='.'):\n if not path.endswith('/'): path += '/'\n if path != './': self.addBase(path)\n addon_forename = getAddonFirstName(path)\n addFile(path + 'MANIFEST.in', 'recursive-include ' + addon_forename + ' *\\nrecursive-include docs *\\ninclude *.rst\\nglobal-exclude *.pyc\\nglobal-exclude *.pyo\\n')\n addFile(path + 'README.rst', 'Introduction\\n============\\n\\n\\\nAn addon for Plone, aiming to [be so useful, you never want to miss it again].\\n')\n addDirs(path + 'docs')\n addDocs(path)\n setSetupPy(path + 'setup.py')",
"def _add_macro(self, complex_instance):\n macro = Macro(complex_instance.name, complex_instance.primitives)\n if macro not in self.macros:\n self.macros.append(macro)",
"def register(self) -> None:\n if self not in sys.meta_path:\n sys.meta_path.append(self)",
"def add_from_line(self, text=\"\"):\n self.indexed = False\n\n # match the header to the meta format\n text = text.rstrip()\n m = self.meta_entry_regexp.match(text)\n entry = []\n if m:\n # get the tag and content\n tag = m.group(1)\n content = m.group(2)\n content = content.rstrip(\">\")\n\n # get the pairs in the meta line\n pairs = content.split(\",\")\n for p in pairs:\n items = p.split(\"=\")\n if len(items) > 2:\n continue\n if len(items) == 2:\n entry.append((items[0], items[1]))\n else:\n entry.append((tag, items[0]))\n\n # add the meta information\n self.meta.append((tag, entry))\n else:\n raise ValueError(\"Could not parse meta line: %s\" % text)",
"def applyMeta(self):\n self.interface.setMeta('kernel', 'phatk2 r%s' % self.REVISION)\n self.interface.setMeta('device',\n self.device.name.replace('\\x00','').strip())\n self.interface.setMeta('cores', self.device.max_compute_units)",
"def AddPreprocesorSymbol(self, symbol):\n \n assert(isinstance(symbol, str))\n \n self.preprocessor_symbols.append(symbol)",
"def add_plim_renderer(config, extension, mako_settings_prefix='mako.', preprocessor='plim.preprocessor'):\r\n renderer_factory = MakoRendererFactory()\r\n config.add_renderer(extension, renderer_factory)\r\n\r\n def register():\r\n settings = copy.copy(config.registry.settings)\r\n settings['{prefix}preprocessor'.format(prefix=mako_settings_prefix)] = preprocessor\r\n\r\n opts = parse_options_from_settings(settings, mako_settings_prefix, config.maybe_dotted)\r\n lookup = PkgResourceTemplateLookup(**opts)\r\n\r\n renderer_factory.lookup = lookup\r\n\r\n # read about config.action() at\r\n # http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/extconfig.html#using-config-action-in-a-directive\r\n config.action(('plim-renderer', extension), register)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a url from the label, a base, and an end. | def build_url(label, base, end):
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end) | [
"def make_url(base_url,start_record, per_page,page):\n final_url = base_url+f'from={start_record}&count={per_page}&page={page}'\n return final_url",
"def construct_url(context, request):",
"def __url_builder(self, endpoint: str, **kwargs: dict) -> str:\n\n endpoint = self.__clean_endpoints_string(endpoint)\n if kwargs != {}:\n endpoint = endpoint.format(**kwargs)\n elif type(endpoint) == tuple:\n endpoint = endpoint[0]\n endpoint += \"&api_key={}\".format(API_KEY)\n return endpoint",
"def _construct_url(self, interface, suffix=''):\n # %22 is the encoding for double quotes (\") in urls.\n # % escapes the % character.\n # Double quotes are necessary in the url because switch ports contain\n # forward slashes (/), ex. 101/0/10 is encoded as \"101/0/10\".\n return '%(hostname)s/rest/config/running/interface/' \\\n '%(interface_type)s/%%22%(interface)s%%22%(suffix)s' \\\n % {\n 'hostname': self.hostname,\n 'interface_type': self.interface_type,\n 'interface': interface,\n 'suffix': '/switchport/%s' % suffix if suffix else ''\n }",
"def __make_url(self, uri):\n return '{}{}'.format(self.base_url, uri)",
"def build_base_url(host, port, protocol):\n\n base_url = \"%s://%s\" % (protocol, host)\n if protocol.lower() == \"http\" and int(port) != 80:\n base_url += \":%d\" % int(port)\n elif protocol.lower() == \"https\" and int(port) != 443:\n base_url += \":%d\" % int(port)\n base_url += \"/\"\n return base_url",
"def _generate_url(endpoint):\n\n if is_collection(endpoint):\n resource = map(str, endpoint)\n resource = '/'.join(endpoint)\n else:\n resource = endpoint\n\n return (settings.base_url + resource)",
"def build_url(self, template: str, **kwargs) -> str:\n quoted = {k: quote(v) for k, v in kwargs.items()}\n suffix = template.format(**quoted).lstrip(\"/\")\n return \"{prefix}/{suffix}\".format(prefix=self.api_root, suffix=suffix)",
"def _ger_full_url(self, endpoint):\r\n return '{}{}{}'.format(self.url, self._base_path, endpoint)",
"def baseURL():",
"def make_url(script_name, base_request=None, **fields):\n\n request = apply(WebRequest, (script_name, base_request), fields)\n return request.AsUrl()",
"def make_entity_base_url(url):\n return url if url.endswith(\"/\") else url + \"/\"",
"def build_url(self, endpoint: str, use_api: bool = False) -> str:\n\n if use_api:\n url = self.api_resource + endpoint\n else:\n url = self.resource + endpoint\n\n return url",
"def _build_url(self, route):\n return \"{0}/{1}\".format(self.base_url, route)",
"def build_url(base_url,slot_start,slot_end):\n\n if is_timezoneless(slot_start) or is_timezoneless(slot_end):\n raise ValueError(\"Whoa, whoa, whoa! One of those times is unzoned!\")\n # Since a slot_end that is too far in the future results\n # in a 400 (reason = \"Bad Request\"), limit how far in\n # the future slot_end may be\n arbitrary_limit = datetime.now(pytz.utc) + timedelta(hours = 1)\n if slot_end.astimezone(pytz.utc) > arbitrary_limit:\n slot_end = arbitrary_limit\n\n date_format = '%Y-%m-%d'\n time_format = '%H%M%S'\n url_parts = [slot_start.astimezone(pytz.utc).strftime(date_format),\n slot_start.astimezone(pytz.utc).strftime(time_format),\n slot_end.astimezone(pytz.utc).strftime(date_format),\n slot_end.astimezone(pytz.utc).strftime(time_format)]\n\n url = base_url + '/'.join(url_parts)\n return url",
"def create_url(fmt: str) -> str:\n return fmt % tracker_url",
"def build_url(base, additional_params=None):\n url = urllib.parse.urlparse(base)\n query_params = {}\n query_params.update(urllib.parse.parse_qsl(url.query, True))\n if additional_params is not None:\n query_params.update(additional_params)\n for k, v in additional_params.items():\n if v is None:\n query_params.pop(k)\n\n return urllib.parse.urlunparse(\n (\n url.scheme,\n url.netloc,\n url.path,\n url.params,\n urllib.parse.urlencode(query_params),\n url.fragment,\n )\n )",
"def url(self, ns, target):\n ns, url, title = self[ns]\n maxargnum = max([0] + [int(a[1:]) for a in\n re.findall(InterWikiMap._argspec_re, url)])\n target, query, fragment = split_url_into_path_query_fragment(target)\n if maxargnum > 0:\n args = target.split(':', (maxargnum - 1))\n else:\n args = [target]\n url = self._expand_or_append(url, args)\n ntarget, nquery, nfragment = split_url_into_path_query_fragment(url)\n if query and nquery:\n nquery = '%s&%s' % (nquery, query[1:])\n else:\n nquery = nquery or query\n nfragment = fragment or nfragment # user provided takes precedence\n expanded_url = ntarget + nquery + nfragment\n expanded_title = self._expand(title, args)\n if expanded_title == title:\n expanded_title = _(\"%(target)s in %(name)s\",\n target=target, name=title)\n return expanded_url, expanded_title",
"def create_url(self, URL):\r\n return '{0}{1}'.format(self.url, URL)",
"def _buildUrl(self, url_parts=None):\n url = [\n self.base_url,\n \"?\",\n \"&\".join(url_parts),\n \"&api_key=\",\n self.api_key\n ]\n return ''.join(url)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the default set of inline patterns for Markdown. | def build_inlinepatterns(md_instance, **kwargs):
inlinePatterns = odict.OrderedDict()
inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
inlinePatterns["image_reference"] = \
ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance)
inlinePatterns["short_reference"] = \
ReferencePattern(SHORT_REF_RE, md_instance)
inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
if md_instance.safeMode != 'escape':
inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
if md_instance.smart_emphasis:
inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
else:
inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
return inlinePatterns | [
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def markdown(self, s):\n\n \"\"\"\n Start with some helper functions to process each markdown type.\n Each markdown element has a method to handle the specifics. Each\n method is passed the following parameters:\n\n Arguments:\n m -- a list of the elements parsed for the match. m[0] is\n the full matched substring within s.\n s -- the string to process\n new_str -- the string used to build the replacement string.\n Generally of the format 'stuff{}stuff', where\n 'stuff' is markdown, and {} is replaced with the\n text between the markdown tags.\n\n Returns:\n Modified string with inline markdown element expanded.\n \"\"\"\n def md_vars(m, s, new_str):\n \"\"\"\n Handle inline link and vars: [variable_name]\n\n See docstring in code for argument information.\n \"\"\"\n def makeJitAttrs(params):\n d = {l[0]: l[1] for l in self._special_parameter.regex.findall(params)}\n return d\n\n self.debug.print(\"mdvars(<strong>m[0])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[0])))\n self.debug.print(\"mdvars(<strong>m[1])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[1])))\n self.debug.print(\"mdvars(<strong>s)=</strong><em>{}</em>\".format(HtmlUtils.escape_html(s)))\n jit_attrs = None if not m[3] else makeJitAttrs(m[3])\n if self._namespaces.exists(m[1]):\n # Substitute the variable name with the value\n c, v = self._stripClass(self._namespaces.getValue(m[1], jit_attrs))\n v = self._md_value(v)\n if(not c):\n # print(\"OLD: {}<br />\\nNEW: {}<br />\".format(m[0], v))\n s = s.replace(m[0], v)\n else:\n s = s.replace(m[0], '<{0}{1}>{2}</{0}>'.format('span', c, v))\n else:\n # No need to do anything here, just leave the unknown link/variable alone\n pass\n\n return s\n\n def md_plain(m, s, new_str):\n \"\"\"\n Handle simple replacement markdown. e.g. *foo* or **bar**, etc.\n\n See docstring in code for argument information.\n \"\"\"\n return s.replace(m[0], new_str.format(m[1]))\n\n # A map linking markdown keys to processor functions\n markdownTypes = [\n ('vars', md_vars),\n ('strong', md_plain),\n ('emphasis', md_plain),\n ('ins', md_plain),\n ('del', md_plain),\n ]\n\n self._inc_nesting_level()\n self.debug.print(\"markdown({})\".format(HtmlUtils.escape_html(s)))\n # For each type of markdown\n for key, md_func in markdownTypes:\n md_obj = self._regex_markdown[key]\n matches = findall(md_obj.regex, s) # find all the matches\n for m in matches:\n # for each match, process it\n s = md_func(m, s, md_obj.new_str)\n\n #print(\"RETURN: {}\".format(s))\n self._dec_nesting_level()\n return s # return the processed string",
"def process_markdown(questions):\n for question in questions:\n question['question'] = markdown(question['question'])\n for i in range(len(question['answers'])):\n this = question['answers'][i][0]\n question['answers'][i][0] = markdown_nopara(this)\n return questions",
"def set_markdown_extensions(site_settings):\n # Base markdown extensions support \"fenced_code\".\n markdown_extensions = [\"fenced_code\"]\n if site_settings[\"pygments\"]:\n markdown_extensions.extend([\n \"extra\",\n \"codehilite(css_class=hlcode)\",\n \"toc(title=Table of Contents)\"\n ])\n\n return markdown_extensions",
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def apply_markdown_formatting(list_of_records):\n markdown_string = \"\"\n\n for item in list_of_records:\n line = \"#\" + \" \" + item + \"\\n\\n\"\n markdown_string += line\n\n print(markdown_string)\n return markdown_string",
"def _create_default_regexes() -> None:\n _regex_cache.update({\n # categories\n 'category': (r'\\[\\[ *(?:%s)\\s*:.*?\\]\\]',\n lambda site: '|'.join(site.namespaces[14])),\n 'comment': re.compile(r'<!--[\\s\\S]*?-->'),\n # files\n 'file': (FILE_LINK_REGEX, lambda site: '|'.join(site.namespaces[6])),\n # section headers\n 'header': re.compile(\n r'(?:(?<=\\n)|\\A)(?:<!--[\\s\\S]*?-->)*'\n r'(=(?:[^\\n]|<!--[\\s\\S]*?-->)+=)'\n r' *(?:<!--[\\s\\S]*?--> *)*(?=\\n|\\Z)'),\n # external links\n 'hyperlink': compileLinkR(),\n # also finds links to foreign sites with preleading \":\"\n 'interwiki': (\n r'\\[\\[:?(%s)\\s?:[^\\]]*\\]\\]\\s*',\n lambda site: '|'.join(\n ignore_case(i) for i in site.validLanguageLinks()\n + list(site.family.obsolete.keys()))),\n # Module invocations (currently only Lua)\n 'invoke': (\n r'\\{\\{\\s*\\#(?:%s):[\\s\\S]*?\\}\\}',\n lambda site: '|'.join(\n ignore_case(mw) for mw in site.getmagicwords('invoke'))),\n # this matches internal wikilinks, but also interwiki, categories, and\n # images.\n 'link': re.compile(r'\\[\\[[^\\]|]*(\\|[^\\]]*)?\\]\\]'),\n # pagelist tag (used in Proofread extension).\n 'pagelist': re.compile(r'<{}[\\s\\S]*?/>'\n .format(ignore_case('pagelist'))),\n # Wikibase property inclusions\n 'property': (\n r'\\{\\{\\s*\\#(?:%s):\\s*[Pp]\\d+.*?\\}\\}',\n lambda site: '|'.join(\n ignore_case(mw) for mw in site.getmagicwords('property'))),\n # lines that start with a colon or more will be indented\n 'startcolon': re.compile(r'(?:(?<=\\n)|\\A):(.*?)(?=\\n|\\Z)'),\n # lines that start with a space are shown in a monospace font and\n # have whitespace preserved.\n 'startspace': re.compile(r'(?:(?<=\\n)|\\A) (.*?)(?=\\n|\\Z)'),\n # tables often have whitespace that is used to improve wiki\n # source code readability.\n # TODO: handle nested tables.\n 'table': re.compile(\n r'(?:(?<=\\n)|\\A){\\|[\\S\\s]*?\\n\\|}|%s' % _tag_pattern('table')),\n 'template': NESTED_TEMPLATE_REGEX,\n })",
"def markdown(self):\n cfg_fname = '%s.md' % self._configurable.__name__.lower()\n\n markdown = [self.doc]\n\n if self._style is True:\n markdown.append(\n 'This key must be set to a dictionary. Its structure is defined '\n '[here](%s). Not specifying the key is equivalent to specifying '\n 'an empty dictionary.' % cfg_fname)\n yield self.key, '\\n\\n'.join(markdown)\n return\n\n markdown.append(\n 'More information about this structure may be found [here](%s).' % cfg_fname)\n\n segue = 'The following configuration keys are used to configure this structure.'\n if self._optional:\n segue += (' This structure is optional, so it is legal to not specify '\n 'any of them, except when this structure is required by context.')\n markdown.append(segue)\n\n for loader in self._configurable.loaders:\n for key, _ in loader.markdown():\n markdown.append('### `%s%s`' % (self.prefix, key))\n #doc = '\\n\\n'.join((\n #'#' + paragraph if paragraph.startswith('###') else paragraph\n #for paragraph in doc.split('\\n\\n')))\n #markdown.append(doc)\n markdown.append('This key is documented [here](%s#%s).' % (cfg_fname, key))\n\n markdown = '\\n\\n'.join(markdown)\n\n if self.prefix:\n yield '%s*' % self.prefix, markdown\n else:\n yield '%s%s keys' % (self.key[0].upper(), self.key[1:]), markdown",
"def test_biolink_markdown(self):\n self.directory_generator(\n \"markdown_no_image\", MarkdownGenerator, serialize_args=dict(image_dir=False)\n )\n # self.directory_generator('markdown_image', MarkdownGenerator, serialize_args=dict(image_dir=True))",
"def generate_markdown(pelican):\n global enabled\n if not enabled:\n return\n\n include_regex = pelican.settings.get('PELIGRAM_INCLUDE_REGEX')\n media_patterns=pelican.settings.get(\"PELIGRAM_MEDIA_PATTERNS\", DEFAULT_INSTAGRAM_MEDIA_PATTERNS)\n\n if include_regex:\n pattern = re.compile(include_regex)\n is_included = lambda name: pattern.match(name)\n else:\n is_included = lambda name: not name.startswith('.')\n\n in_path = instagram_data_path(pelican)\n logger.debug(\"pelican-gram started\")\n processor=_processor(pelican,in_path)\n for dirpath, _, filenames in os.walk(in_path):\n for filename in filenames:\n if is_included(filename):\n if filename.endswith('.json'):\n logger.debug(f\"Processing file: {filename}\")\n media_filenames=sum(list(map(lambda pattern: fnmatch.filter(filenames,path.splitext(filename)[0]+pattern),media_patterns)),[])\n processor.process_instagram_metadata(filename,media_filenames)",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def _handleInline(self, line):\r\n\r\n if not(line):\r\n return [self.doc.createTextNode(' ')]\r\n\r\n for pattern in self.inlinePatterns:\r\n list = self._applyPattern( line, pattern)\r\n if list: return list\r\n\r\n return [self.doc.createTextNode(line)]",
"def parse_md_config(self, source):\n md1 = markdown.Markdown(extensions=[\"markdown.extensions.meta\"])\n md1.convert(source)\n md_meta = getattr(md1, \"Meta\")\n\n # recreate an instance of Markdown object\n md2 = markdown.Markdown(extensions=self.ext_list)\n if self.to64:\n if not self.base_dir:\n raise ValueError(\n \"base dir is required while convert from text and enable convert local image to base64\")\n md2.inlinePatterns[\"image_link\"] = ImageCheckPattern(self.base_dir, md2)\n return md2, md_meta",
"def apply_markdown( request ):\r\n markup = markdown( request.POST['data'], extensions=['codehilite'] )\r\n return render_to_response( 'utils/markup/markdown/preview.html',\r\n {'preview':markup},\r\n context_instance=RequestContext(request))",
"def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)",
"def _get_all_markdown_links(root: Path, files: List[Path]) -> List[_MarkdownLink]:\n pattern = re.compile(r\"\\[(?P<string>.+?)\\]\\((?P<link>\\S+?)\\)\")\n links = []\n for file in files:\n for match in pattern.finditer(file.read_text()):\n links.append(_MarkdownLink(root, file, match.group(\"string\"), match.group(\"link\")))\n return links",
"def references_to_markdown(references):\n\n pybtex_style = find_plugin('pybtex.style.formatting', 'plain')()\n pybtex_md_backend = find_plugin('pybtex.backends', 'markdown')\n pybtex_parser = Parser()\n\n # hack to not print labels (may remove this later)\n def write_entry(self, key, label, text):\n self.output(u'%s \\n' % text)\n pybtex_md_backend.write_entry = write_entry\n pybtex_md_backend = pybtex_md_backend()\n\n data = pybtex_parser.parse_stream(StringIO(references))\n data_formatted = pybtex_style.format_entries(data.entries.itervalues())\n output = StringIO()\n pybtex_md_backend.write_to_stream(data_formatted, output)\n\n # add blockquote style\n references_md = '> {}'.format(output.getvalue())\n references_md.replace('\\n', '\\n> ')\n\n return references_md"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return unescaped text given text with an inline placeholder. | def unescape(self, text):
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError:
return text
def itertext(el):
' Reimplement Element.itertext for older python versions '
tag = el.tag
if not isinstance(tag, util.string_type) and tag is not None:
return
if el.text:
yield el.text
for e in el:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
def get_stash(m):
id = m.group(1)
if id in stash:
value = stash.get(id)
if isinstance(value, util.string_type):
return value
else:
# An etree Element - return text content only
return ''.join(itertext(value))
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) | [
"def templatize(self, text, context):\n return Template(\"{% autoescape off %}\" + text + \"{% endautoescape %}\").render(context)",
"def get_inline_expression(self, text):\n text = text.strip()\n if not text.startswith(self.inline_tags[0]) or not text.endswith(\n self.inline_tags[1]\n ):\n return\n\n return text[2:-2]",
"def codeblock(text):\n import textwrap # this is a slow import, do it lazy\n return textwrap.dedent(text).strip('\\n')",
"def _replacePlaceholders(self, text, phrases):\n for i, phrase in enumerate(phrases):\n text = text.replace('~%d~' % i, phrases[i])\n\n return text",
"def strip_markup(styled_text: str) -> str:\n t = markup.render(styled_text)\n return t.plain",
"def get_text_from_placeholder(placeholder, language=None, request=None):\n if not placeholder:\n return ''\n if not language:\n language = get_current_language()\n if not request:\n request = get_request(language)\n\n bits = []\n plugins = placeholder.cmsplugin_set.filter(language=language)\n for base_plugin in plugins:\n instance, plugin_type = base_plugin.get_plugin_instance()\n if instance is None:\n continue\n bits.append(instance.render_plugin(context=RequestContext(request)))\n return force_unicode(strip_tags(' '.join(bits)))",
"def strip_newsgroup_quoting(text):\n ...",
"def set_placeholder_text(self, placeholder_text):\n self.widget.SetPlaceHolderText(placeholder_text)",
"def inline(expression):\n return expression",
"def __process_text(self, text):\n return text.replace('\\n', '||')",
"def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),\n self.strip(example))",
"def add_escapement_back_for_not_comments(text):\n return text.replace(COMMENT_MARKER, ESCAPE_SYM+COMMENT_MARKER)",
"def StripAnsiText(text):\n return sgr_re.sub(\"\", text)",
"def plain(self, elem, theme):\n return theme.render(elem.text)",
"def remove_brackets_with_content(text: str):\n pattern = r' ?\\(.*?\\)'\n\n return re.sub(pattern, '', text)",
"def text_transformations(self, text):\n txt = super(TextModel, self).text_transformations(text)\n return re.sub('~+', '~', txt)",
"def text_line(self):\n text = self.text.replace('[x]', '')\n text = text.replace('\\n', ' ')\n text = text.replace('$', '')\n text = re.sub(\"(<b>)(.*?)(</b>)\", \"**\\g<2>**\", text)\n text = re.sub(\"(<i>)(.*?)(</i>)\", \"_\\g<2>_\", text)\n return text",
"def FilterInput(self, txt):\n txt = RE_ANSI_START.sub('', txt)\n return RE_ANSI_END.sub('', txt)",
"def unwrap(text, wrapstr='\\n '):\n return text.replace(wrapstr, '').strip()",
"def unescape(text):\n\n return __entity_regex.sub(__replacement_for_entity, text)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return unescaped text given text with an inline placeholder. | def unescape(self, text):
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError:
return text
def get_stash(m):
id = m.group(1)
value = stash.get(id)
if value is not None:
try:
return self.markdown.serializer(value)
except:
return '\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) | [
"def templatize(self, text, context):\n return Template(\"{% autoescape off %}\" + text + \"{% endautoescape %}\").render(context)",
"def get_inline_expression(self, text):\n text = text.strip()\n if not text.startswith(self.inline_tags[0]) or not text.endswith(\n self.inline_tags[1]\n ):\n return\n\n return text[2:-2]",
"def codeblock(text):\n import textwrap # this is a slow import, do it lazy\n return textwrap.dedent(text).strip('\\n')",
"def _replacePlaceholders(self, text, phrases):\n for i, phrase in enumerate(phrases):\n text = text.replace('~%d~' % i, phrases[i])\n\n return text",
"def strip_markup(styled_text: str) -> str:\n t = markup.render(styled_text)\n return t.plain",
"def get_text_from_placeholder(placeholder, language=None, request=None):\n if not placeholder:\n return ''\n if not language:\n language = get_current_language()\n if not request:\n request = get_request(language)\n\n bits = []\n plugins = placeholder.cmsplugin_set.filter(language=language)\n for base_plugin in plugins:\n instance, plugin_type = base_plugin.get_plugin_instance()\n if instance is None:\n continue\n bits.append(instance.render_plugin(context=RequestContext(request)))\n return force_unicode(strip_tags(' '.join(bits)))",
"def strip_newsgroup_quoting(text):\n ...",
"def set_placeholder_text(self, placeholder_text):\n self.widget.SetPlaceHolderText(placeholder_text)",
"def inline(expression):\n return expression",
"def __process_text(self, text):\n return text.replace('\\n', '||')",
"def testTemplateInline(self):\n example = 'Hello [location]'\n template = '{{ inline example }}'\n self.parser['example'] = self.tmpl(example)\n self.assertEqual(self.strip(str(self.tmpl(template, parser=self.parser))),\n self.strip(example))",
"def add_escapement_back_for_not_comments(text):\n return text.replace(COMMENT_MARKER, ESCAPE_SYM+COMMENT_MARKER)",
"def StripAnsiText(text):\n return sgr_re.sub(\"\", text)",
"def plain(self, elem, theme):\n return theme.render(elem.text)",
"def remove_brackets_with_content(text: str):\n pattern = r' ?\\(.*?\\)'\n\n return re.sub(pattern, '', text)",
"def text_transformations(self, text):\n txt = super(TextModel, self).text_transformations(text)\n return re.sub('~+', '~', txt)",
"def text_line(self):\n text = self.text.replace('[x]', '')\n text = text.replace('\\n', ' ')\n text = text.replace('$', '')\n text = re.sub(\"(<b>)(.*?)(</b>)\", \"**\\g<2>**\", text)\n text = re.sub(\"(<i>)(.*?)(</i>)\", \"_\\g<2>_\", text)\n return text",
"def FilterInput(self, txt):\n txt = RE_ANSI_START.sub('', txt)\n return RE_ANSI_END.sub('', txt)",
"def unwrap(text, wrapstr='\\n '):\n return text.replace(wrapstr, '').strip()",
"def unescape(text):\n\n return __entity_regex.sub(__replacement_for_entity, text)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sanitize a url against xss attacks in "safe_mode". | def sanitize_url(self, url):
url = url.replace(' ', '%20')
if not self.markdown.safeMode:
# Return immediately bipassing parsing.
return url
try:
scheme, netloc, path, params, query, fragment = url = urlparse(url)
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
locless_schemes = ['', 'mailto', 'news']
allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
if scheme not in allowed_schemes:
# Not a known (allowed) scheme. Not safe.
return ''
if netloc == '' and scheme not in locless_schemes:
# This should not happen. Treat as suspect.
return ''
for part in url[2:]:
if ":" in part:
# A colon in "path", "parameters", "query" or "fragment" is suspect.
return ''
# Url passes all tests. Return url as-is.
return urlunparse(url) | [
"def _sanitizeURL(self, couchURL):\n return couchURL",
"def safe_uri(uri):\n path, query, frag = split_path(uri)\n safe = True\n for part in (path, query, frag):\n safe = safe and safe_chars_regex.search(part)\n return safe",
"def make_valid_url(self, url):\n\n # Replace spaces between words\n # with '%20'.\n # For example http://www.foo.com/bar/this file.html\n # Fix: Use regex instead of blind\n # replacement.\n if self.wspacere.search(url):\n url = re.sub(r'\\s', '%20', url)\n \n return url",
"def clean_url(url) -> str:\n if 'http' not in url:\n return f'http://{url}'\n return url",
"def validateURL(url):",
"def clean_review_url(self, url):\n url = URL(url)\n if not url.host:\n url = self.base_url.join(url)\n return url",
"def __cleanUrl(self, url):\n cleanurl = QUrl(url)\n if cleanurl.password():\n # don't save the password in the history\n cleanurl.setPassword(\"\")\n if cleanurl.host():\n # convert host to lower case\n cleanurl.setHost(url.host().lower())\n \n return cleanurl",
"def escape_url(raw):\n return html.escape(quote(html.unescape(raw), safe=\"/#:()*?=%@+,&\"))",
"def _safe_clean(self, untrusted_text, strip_characters=False):\n try:\n return clean(untrusted_text, strip=strip_characters)\n except KeyError:\n current_app.logger.warn('A malicious user tried to crash the application by '\n 'sending illegal UTF-8 in an URI or other untrusted '\n 'user input.')\n abort(400)",
"def sanitize_input(self, untrusted_text, strip_characters=False):\n try:\n # Test if the untrusted text is percent encoded\n # before running bleech.\n if unquote(untrusted_text) != untrusted_text:\n use_percent_encoding = True\n else:\n use_percent_encoding = False\n\n return self._sanitize_input(untrusted_text,\n strip_characters=strip_characters,\n percent_encoded=use_percent_encoding)\n\n except UnicodeDecodeError:\n current_app.logger.warn('A malicious user tried to crash the application '\n 'by sending non-unicode input in a GET request')\n abort(400)",
"def is_safe_url(url, host):\r\n if not url:\r\n return False\r\n\r\n parsed = urllib.parse.urlparse(url)\r\n\r\n return ((not parsed.netloc or parsed.netloc == host) and\r\n (not parsed.scheme or parsed.scheme in [\"http\", \"https\"]))",
"def Sanitize(Content): # for your protection\n \n ### strip any illegal HTML\n Content = re.sub(r\"(?is)<.+?>\", HTMLChecker, Content)\n\n ### validate any links\n Content = re.sub(r'(?is)(<A .*?HREF=\")(.+?)(\".*?>)', LinkChecker, Content)\n \n ### then escape any funky characters\n ### TODO: is this really neccesary for the database?\n \n # Content = re.escape(Content)\n\n return Content",
"def fix_url(self, url: str) -> str:\r\n return re.sub(r'((/|^)\\w\\w)_(\\w\\w(/|$|.))', r'\\1-\\3',\r\n (url or '').replace('/content/asp/', '/'), count=1)",
"def repair_broken_urls(line):\n def _chop_spaces_in_url_match(m):\n \"\"\"Suppresses spaces in a matched URL.\"\"\"\n return m.group(1).replace(\" \", \"\")\n for ptn in re_list_url_repair_patterns:\n line = ptn.sub(_chop_spaces_in_url_match, line)\n return line",
"def normalize_url(self, url):\n match = self.url_matcher.match(url)\n url = match.group(0)\n url = self.url_matcher.sub(\"https://arxiv.org/abs/\\\\3\", url)\n return url",
"def _ensure_quoted(url):\n return requests.compat.quote(requests.compat.unquote(url))",
"def sanitize_shared_url(self, shared_url):\n if self.DEBUG:\n print('\\nSanitizing', shared_url, '-> ', end='')\n\n if shared_url is None:\n return ''\n\n result = re.sub(r\"(.*app\\.box\\.com\\/)(shared\\/static)(\\/.*)\", r\"\\1s\\3\", shared_url)\n\n if self.DEBUG:\n print(result)\n return result",
"def _safe_clean(untrusted_text, strip_characters=False):\n try:\n return clean(untrusted_text, strip=strip_characters)\n except KeyError:\n logger.warn('A malicious user tried to crash the application by '\n 'sending illegal UTF-8 in an URI or other untrusted '\n 'user input.')\n raise HTTPBadRequest(\"Non-unicode input, please try again.\")",
"def sanitize_domain(domain):\n whitelist_pattern = re.compile(r\"[^\\.\\-_a-zA-Z0-9]\")\n return whitelist_pattern.sub(\"\", domain)",
"def sanitize(self, s):\n s = s.lower().replace(\" \", \"\").replace(\"-\", \"\").replace(\",\", \"\").replace(\":\", \"\").replace(\"&\",\"and\").replace(\"(\",\"\").replace(\")\",\"\").strip()\n # Additional sanitization rules\n s = s.replace(\"sulphate\",\"sulfate\")\n return s"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the value of the item at the given zerobased index. | def value_for_index(self, index):
return self[self.keyOrder[index]] | [
"def __getitem__( self, index ) :\n\n return( self.zas[index] )",
"def value_at(self, index):\n index = np.where(self.indices == index)[0]\n return self.data[index] if index.size != 0 else 0",
"def __getitem__(self, index=0):\n if index < 0:\n index = len(self) + index\n return self._get(index)",
"def __getitem__(self, index):\n return self.q[index]",
"def get(self, index: 'int const') -> \"float\":\n return _coin.SoShininessElement_get(self, index)",
"def get(self, index: int) -> int: \n i = 0\n cur = self.head\n while cur is not None:\n if i==index:\n return cur.val\n i+=1\n cur = cur.nextNode\n return -1",
"def get(self, index: int) -> int:\n if index >= self.length:\n return -1\n else:\n i = 0\n tmp_node: Node = self.first\n while i < index:\n tmp_node = tmp_node.next\n i += 1\n return tmp_node.val if tmp_node else -1",
"def value_at(self, pos):\n return self.data[self._data_index(pos)]",
"def getItem(self, level, index):\n return self.memory[level].getItem(index)",
"def get_by_index(self, index):\n return self.tile_list[index]",
"def get_byindex(self, index):\n return self.dict_pref[self.polygon_objs[index]]",
"def __getitem__(self, i):\n return self._data[i]",
"def __getitem__(self, index):\n return self.attribute_values[index]",
"def variant(self, index):\r\n # Allows negative indices, starting from the end\r\n return self._parts[1 + index if index >= 0 else index]",
"def get_card_value(self, index):\n return VALUES[self._hand[index].get_rank()]",
"def get(self, index: 'int const') -> \"float\":\n return _coin.SoTransparencyElement_get(self, index)",
"def getSlot(self, index: int) -> InventoryItem:\r\n\t\treturn self._content[index]",
"def getitem(list, index, default=None):\r\n try:\r\n return list[index]\r\n except IndexError:\r\n return default",
"def get_value_or_none(data, index):\n try:\n return data[index]\n except IndexError:\n return None",
"def get_value_for_column_and_index(self, column, index):\n return self._data[column][index]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts the key, value pair before the item with the given index. | def insert(self, index, key, value):
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value) | [
"def insert_before_element(self, item, element):\n if item is not None and element is not None:\n element.insert_before(item)\n else:\n raise IndexError",
"def insert_at(self, index, item):\n ptr = self.head\n if ptr is None:\n self.head = SinglyLinkedListElement(self, item, None)\n self.tail = self.head\n self.size += 1\n return\n i = 0\n while ptr is not None and ptr.data is not None:\n if i == index:\n ptr.insert(item)\n ptr = ptr.next\n i += 1",
"def insert_at_index(self, index: int, items: list) -> None:\n for i in range(len(items)):\n self.entries.insert(index + i, items[i])\n self.list_size += len(items)",
"def insert_before(self, func, index):\n self.procedure.insert(index, func)",
"def insert(self, index: int, item: Any) -> None:\n self.contents.insert(index, item)\n return self",
"def insert_at_index(self, index, item):\n # Check if the given index is out of range and if so raise an error\n if not (0 <= index <= self.size):\n raise ValueError('List index out of range: {}'.format(index))\n\n if index == 0:\n self.prepend(item)\n elif index == self.size:\n self.append(item)\n else:\n new_node = Node(item)\n curr = self.head\n for i in range(index): # curr will become the node that comes after the new_node we want to insert\n curr = curr.next\n print(curr.data)\n curr.previous.next = new_node # setting the prev's next of the curr to become new_node\n new_node.next = curr # setting the new_node to point to correct next node\n new_node.previous = curr.previous # setting the new_node to point to correct previous node\n curr.previous = new_node # curr's prev is now the new node instead of the old curr's prev\n self.size += 1",
"def insert_at(self, index: Union[int, Int], value: T) -> None:\r\n self.insert(index=index, value=value)",
"def insert_at_index(self, index, item):\n if not (0 <= index <= self.size):\n raise ValueError('List index out of range: {}'.format(index))\n if index == self.size:\n self.append(item)\n return\n if index == 0:\n self.prepend(item)\n return\n current_node = self.head\n count = 1\n while count < index: \n current_node = current_node.next\n count += 1\n new_node = Node(item)\n new_node.next = current_node.next\n current_node.next = new_node\n self.size += 1",
"def insert(self, i, item):\n if item != None and item not in self:\n list.insert(self, i, item)",
"def insert_before(self, key, data):\n node = ListNode(data)\n p = self.head\n while p.next is not None:\n if p.next.data == key:\n node.next = p.next\n p.next = node\n p = p.next",
"def insert(self, index, item):\r\n if index > len(self):\r\n raise IndexError\r\n\r\n # Create new node containing the item\r\n new_node = Node(item)\r\n\r\n if index == 0:\r\n new_node.next = self.first\r\n self.first = new_node\r\n else:\r\n # Iterate to (index-1)-th node\r\n curr = self.first\r\n for i in range(index - 1):\r\n curr = curr.next\r\n\r\n # Update links to insert new node\r\n new_node.next = curr.next\r\n curr.next = new_node",
"def insertAtIndex(self, index, value):\n current = self._traverse(index)\n nextNode = Node(value, current.getNext())\n # set the node after the node at index-1\n current.setNext(nextNode)\n self.size += 1",
"def insert_at_position(DL, n, data):\n DL.insert(data, n - 1)",
"def insert_parameter(\n sig: Signature, index: int, param: Parameter\n) -> Signature:\n parameters = list(sig.parameters.values())\n parameters.insert(index, param)\n\n return sig.replace(parameters=parameters)",
"def insert(self, index, elem):\n self._elements.insert(index, elem)",
"def insert(self, index, s):\n raise NotImplementedError",
"def insert(self, item: 'void *', insertbefore: 'int const') -> \"void\":\n return _coin.SbPList_insert(self, item, insertbefore)",
"def _insert(self, obj, index):\n\n if self._n == self._capacity:\n self._resize(2 * self._capacity)\n self._n += 1\n for k in reversed(xrange(index, self._n)): # Goes backwards through the list to move the values along in the array\n self._A[k] = self._A[k-1]\n\n self._A[index] = obj",
"def insert(self, item: 'SbVec3f', insertbefore: 'int const') -> \"void\":\n return _coin.SbVec3fList_insert(self, item, insertbefore)",
"def insert_line(self, line, index):\n self.line_map.insert(index, line)\n for i, x in enumerate(self.line_map):\n x.index = i"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return index or None for a given location. | def index_for_location(self, location):
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i | [
"def index_or_none(l, item, *args):\n\n try:\n idx = l.index(item, *args)\n except ValueError:\n idx = None\n return idx",
"def find_index_corresponding_store(job, route):\n store_id_to_find = job.store['id']\n index_to_return = None\n for i in range(len(route.tour)):\n location = route.tour[i]\n if isinstance(location, Store):\n store_id = location.id\n if store_id_to_find == store_id:\n logging.debug(\"Corresponding store for job \" + str(job) + \" is \" + str(location))\n index_to_return = i\n if index_to_return == None:\n logging.warning(\"Couldn't find store for job \" + str(job))\n return index_to_return",
"def eid_from_loc(self, loc):\n if loc is None:\n return 0\n for eid, this_loc in self.items():\n if loc == this_loc:\n return eid\n return -1",
"def _get_tile_index(regex, location):\n pattern = re.compile(regex)\n match = pattern.search(location)\n if match:\n tile_index0 = match.group('tile_index0')\n tile_index1 = match.group('tile_index1')\n else:\n tile_index0 = '999'\n tile_index1 = '999'\n return tile_index0, tile_index1",
"def find_node(self, location):\r\n for node in self.all_nodes:\r\n if node.is_at(location):\r\n return node\r\n return None",
"def get_index(item: Optional[List], value: int):\n if item is None:\n return None\n else:\n try:\n return item.index(value)\n except ValueError:\n return None",
"def get_location(self, location=None, location_id=None):\n self._load_locations()\n if location:\n if location in self.locations:\n return self.locations[location]\n return None\n for location in self.locations:\n if location.id == location_id:\n return location\n return None",
"def station_by_location(self, location):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if location == _[\"geometry\"][\"coordinates\"]]\n log.debug(\"searching for location {} found {}\".format(location, station))\n return station[0]\n except:\n log.debug(\"searching for location {} found None\")\n return None",
"def find_index_of(self, pattern):\n\n if not self._my_list_ok:\n self._check_my_list()\n try:\n return self.my_info[self.my_info.filename.str.contains(pattern)].index[0]\n except IndexError:\n print(\"find_index_of exception: no match found.\")\n # return None (type(returnedValue) = None)\n pass",
"def get_index(self, x):\n if x in self.d:\n return self.d[x]\n else:\n return None",
"def _get_idx(self, coord, is_y):\n if is_y:\n if coord < 0 or coord > len(self.grid) * self.tileLength:\n # TODO handle off grid case\n return None\n else:\n if coord < 0 or coord > len(self.grid[0]) * self.tileLength:\n # TODO handle off grid case\n return None\n\n coord -= (self.tileLength / 2)\n if (-self.tileLength / 2) < coord < 0:\n return 0\n\n else:\n low_estimate = int(coord // self.tileLength)\n offset = coord % self.tileLength\n ret = low_estimate + \\\n 1 if offset > (self.tileLength / 2) else low_estimate\n return ret\n # if is_y:\n # return (len(self.grid) - 1) - ret\n # else:\n # return ret",
"def GetSubNodeByLocation(self, location):\n for sub_node in self.sub_nodes:\n sub_node_location = getattr(sub_node.path_spec, 'location', None)\n if location == sub_node_location:\n return sub_node\n\n return None",
"def get_index(col, index, default=None):\n if abs(index) < len(col):\n return default\n return col[index]",
"def get(self, location):\n\n (r, c) = location\n if not (0 <= r and r < self.number_of_rows):\n return 0\n if not (0 <= c and c < self.number_of_columns):\n return 0\n return self.array[self.start_row + r][self.start_column + c]",
"def _get_index(self, beacon_config, label):\n\n indexes = [index for index, item in enumerate(beacon_config) if label in item]\n if not indexes:\n return -1\n else:\n return indexes[0]",
"def get_index(self, label):\n\t\treturn self._label_to_index[label]",
"def _location_for(self, idx, side=\"right\") -> _SubIndex:\n if idx == 0 or not self:\n return _SubIndex(0, 0)\n\n # When the index is out of bounds, we fall back to the last element\n if idx >= len(self):\n return _SubIndex(\n len(self.collections) - 1,\n len(self.collections[-1]),\n )\n\n part_no = np.searchsorted(self._offsets, idx, side)\n\n if part_no == 0:\n local_idx = idx\n else:\n local_idx = idx - self._offsets[part_no - 1]\n\n return _SubIndex(part_no, local_idx)",
"def index_of(self, name):\n\n info = self.info_of(name)\n return info[self.INDEX]",
"def get_index(self, key: str) -> Optional[int]:\r\n i = 0\r\n m = 0\r\n while self.hash_table[self.horner_hash(key) + (i ** 2) - m] is not None:\r\n if self.hash_table[self.horner_hash(key) + (i ** 2) - m][0] != key:\r\n i = i + 1\r\n if self.table_size <= self.horner_hash(key) + (i ** 2) - m:\r\n m = m + self.table_size\r\n continue\r\n return self.horner_hash(key) + (i ** 2) - m\r\n return None",
"def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the default postprocessors for Markdown. | def build_postprocessors(md_instance, **kwargs):
postprocessors = odict.OrderedDict()
postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance)
postprocessors["amp_substitute"] = AndSubstitutePostprocessor()
postprocessors["unescape"] = UnescapePostprocessor()
return postprocessors | [
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def _build_post_processor_list_from_args(self) -> typing.List[PostProcessor]:\n post_processors: typing.List[PostProcessor] = []\n if self._args.pp_trim_trailing_whitespace:\n post_processors.append(TrimTrailingWhitespace())\n if hasattr(self._args, \"pp_max_emptylines\") and self._args.pp_max_emptylines is not None:\n post_processors.append(LimitEmptyLines(self._args.pp_max_emptylines))\n if hasattr(self._args, \"pp_run_program\") and self._args.pp_run_program is not None:\n post_processors.append(self._build_ext_program_postprocessor(self._args.pp_run_program))\n\n post_processors.append(SetFileMode(self._args.file_mode))\n\n return post_processors",
"def get_default_postprocessors() -> Sequence[PostProcessor]:\n # For compatibility with libxml2\n return [datatypelibrary]",
"def default_processors(self):\n return []",
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def process_markdown(questions):\n for question in questions:\n question['question'] = markdown(question['question'])\n for i in range(len(question['answers'])):\n this = question['answers'][i][0]\n question['answers'][i][0] = markdown_nopara(this)\n return questions",
"def get_pages_content(self):\n\n #TODO other markup langage (piece of cake)\n for page in self.postlist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n if page['markup']=='markdown':\n page['content']=self.md.convert(page['pre_content'])\n\n\n for page in self.pagelist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n if page['markup']=='markdown':\n page['content']=self.md.convert(page['pre_content'])",
"def register_processors(self) -> None:\n self.add_processor(RStudioServerCodeProcessor())\n self.add_processor(GenericFileChangeProcessor())\n self.add_processor(RStudioServerPlaintextProcessor())\n self.add_processor(RStudioServerImageExtractorProcessor())\n self.add_processor(ActivityDetailLimitProcessor())\n self.add_processor(ActivityShowBasicProcessor())",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def parse_md_config(self, source):\n md1 = markdown.Markdown(extensions=[\"markdown.extensions.meta\"])\n md1.convert(source)\n md_meta = getattr(md1, \"Meta\")\n\n # recreate an instance of Markdown object\n md2 = markdown.Markdown(extensions=self.ext_list)\n if self.to64:\n if not self.base_dir:\n raise ValueError(\n \"base dir is required while convert from text and enable convert local image to base64\")\n md2.inlinePatterns[\"image_link\"] = ImageCheckPattern(self.base_dir, md2)\n return md2, md_meta",
"def generate_posts(self, post_template_path, posts): \n # use `not posts` rather than `len(posts)` to match PEP8\n if not posts or post_template_path == '':\n return [], []\n \n posts_meta = []\n writables = []\n for post in posts:\n per_meta = {}\n # Delegate the metadata from post itself to the tempoary containers\n # for generator global usage\n # TODO: make it a class member?\n for k, v in post.get_meta().items():\n per_meta[k] = v\n\n # trim post.title to get rid of double quotation mark\n if 'title' in per_meta:\n per_meta['title'] = utility.trim_double_quotation_mark(per_meta['title'])\n\n # TODO: markdown parse\n per_meta['__raw_content'] = post.get_content()\n per_meta['content'] = markdown.markdown(post.get_content())\n\n if 'date' in per_meta:\n # TODO: which is more efficient? regexp before or try...catch\n # block\n pd = utility.try_convert_date_str(per_meta['date'])\n\n url_dir = '/'.join(['post', str(pd.year), str(pd.month), str(pd.day), \\\n '-'.join(str(x) for x in per_meta['__file_name'])])\n url = os.path.join(url_dir, self.__default_file_name)\n #os.makedirs(os.path.join(self._abs_dir, url_dir))\n #file_path = os.path.join(self._abs_dir, url)\n\n result = self.__template_helper(post_template_path, \\\n post=per_meta, site=self.__site_dict)\n #with codecs.open(file_path, 'w', 'utf-8') as post_fd:\n # post_fd.write(result)\n w = Writable(url, result)\n writables.append(w)\n per_meta['url'] = url_dir\n posts_meta.append(per_meta)\n else:\n _logger.warning(\"Cannot find date information for post %s\", per_meta['title'])\n\n print(\"Successfully parse all posts!\")\n return writables, posts_meta",
"def preprocess(self):\n for key in self.markdown.keys():\n # data goes to this file \n f = open(key + \".txt\", \"wb\")\n # clean the data up before writing to file\n largeString = \"\\n\".join(self.markdown[key])\n sentences = self.get_sentences(largeString)\n for sentence in sentences:\n x = self.remove_chars(sentence) \n y = self.tokenize_punc(x)\n # write data to file sentence by sentence\n f.write(y.lstrip() + '\\n')\n f.close()",
"def set_markdown_extensions(site_settings):\n # Base markdown extensions support \"fenced_code\".\n markdown_extensions = [\"fenced_code\"]\n if site_settings[\"pygments\"]:\n markdown_extensions.extend([\n \"extra\",\n \"codehilite(css_class=hlcode)\",\n \"toc(title=Table of Contents)\"\n ])\n\n return markdown_extensions",
"def _init_with_config(self):\n self.app_name = self.config.app_name\n\n if '.*' not in self.config.processors_tag_regex:\n self.config.processors_tag_regex.append('.*')\n self.processors = []\n for processor_tag_regex in self.config.processors_tag_regex:\n self.processors.append(\n Processor(processor_tag_regex))",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def scan_posts(self):\n if not self._scanned:\n print \"Scanning posts \",\n targets = set([])\n for wildcard, destination, _, use_in_feeds in self.config['post_pages']:\n print \".\",\n for base_path in glob.glob(wildcard):\n post = Post(base_path, destination, use_in_feeds,\n self.config['TRANSLATIONS'],\n self.config['DEFAULT_LANG'],\n self.config['BLOG_URL'],\n self.get_compile_html(base_path),\n self.MESSAGES)\n for lang, langpath in self.config['TRANSLATIONS'].items():\n dest = (destination, langpath, post.pagenames[lang])\n if dest in targets:\n raise Exception(\n 'Duplicated output path %r in post %r' %\n (post.pagenames[lang], base_path))\n targets.add(dest)\n self.global_data[post.post_name] = post\n if post.use_in_feeds:\n self.posts_per_year[\n str(post.date.year)].append(post.post_name)\n for tag in post.tags:\n self.posts_per_tag[tag].append(post.post_name)\n else:\n self.pages.append(post)\n for name, post in self.global_data.items():\n self.timeline.append(post)\n self.timeline.sort(cmp=lambda a, b: cmp(a.date, b.date))\n self.timeline.reverse()\n post_timeline = [p for p in self.timeline if p.use_in_feeds]\n for i, p in enumerate(post_timeline[1:]):\n p.next_post = post_timeline[i]\n for i, p in enumerate(post_timeline[:-1]):\n p.prev_post = post_timeline[i + 1]\n self._scanned = True\n print \"done!\"",
"def markdown(self, s):\n\n \"\"\"\n Start with some helper functions to process each markdown type.\n Each markdown element has a method to handle the specifics. Each\n method is passed the following parameters:\n\n Arguments:\n m -- a list of the elements parsed for the match. m[0] is\n the full matched substring within s.\n s -- the string to process\n new_str -- the string used to build the replacement string.\n Generally of the format 'stuff{}stuff', where\n 'stuff' is markdown, and {} is replaced with the\n text between the markdown tags.\n\n Returns:\n Modified string with inline markdown element expanded.\n \"\"\"\n def md_vars(m, s, new_str):\n \"\"\"\n Handle inline link and vars: [variable_name]\n\n See docstring in code for argument information.\n \"\"\"\n def makeJitAttrs(params):\n d = {l[0]: l[1] for l in self._special_parameter.regex.findall(params)}\n return d\n\n self.debug.print(\"mdvars(<strong>m[0])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[0])))\n self.debug.print(\"mdvars(<strong>m[1])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[1])))\n self.debug.print(\"mdvars(<strong>s)=</strong><em>{}</em>\".format(HtmlUtils.escape_html(s)))\n jit_attrs = None if not m[3] else makeJitAttrs(m[3])\n if self._namespaces.exists(m[1]):\n # Substitute the variable name with the value\n c, v = self._stripClass(self._namespaces.getValue(m[1], jit_attrs))\n v = self._md_value(v)\n if(not c):\n # print(\"OLD: {}<br />\\nNEW: {}<br />\".format(m[0], v))\n s = s.replace(m[0], v)\n else:\n s = s.replace(m[0], '<{0}{1}>{2}</{0}>'.format('span', c, v))\n else:\n # No need to do anything here, just leave the unknown link/variable alone\n pass\n\n return s\n\n def md_plain(m, s, new_str):\n \"\"\"\n Handle simple replacement markdown. e.g. *foo* or **bar**, etc.\n\n See docstring in code for argument information.\n \"\"\"\n return s.replace(m[0], new_str.format(m[1]))\n\n # A map linking markdown keys to processor functions\n markdownTypes = [\n ('vars', md_vars),\n ('strong', md_plain),\n ('emphasis', md_plain),\n ('ins', md_plain),\n ('del', md_plain),\n ]\n\n self._inc_nesting_level()\n self.debug.print(\"markdown({})\".format(HtmlUtils.escape_html(s)))\n # For each type of markdown\n for key, md_func in markdownTypes:\n md_obj = self._regex_markdown[key]\n matches = findall(md_obj.regex, s) # find all the matches\n for m in matches:\n # for each match, process it\n s = md_func(m, s, md_obj.new_str)\n\n #print(\"RETURN: {}\".format(s))\n self._dec_nesting_level()\n return s # return the processed string",
"def get_inline_processors(self) -> List[Type[InlineProcessor]]:\n return [\n BackslashEscapeProcessor,\n EntityReferenceProcessor,\n CodeSpanProcessor,\n EmphasisProcessor,\n LinkOpenerProcessor,\n LinkCloserProcessor,\n URIAutolinkProcessor,\n EmailAutolinkProcessor,\n RawHTMLProcessor,\n HardLinebreakProcessor, # TODO: docutils does not support hardline break\n StrikethroughProcessor,\n ]",
"def recursive_processing(self, base_dir, target_dir, it):\n try:\n file_dir, dirs, files = next(it)\n except StopIteration:\n return '', []\n readme_files = {'README.md', 'README.rst', 'README.txt'}\n if readme_files.intersection(files):\n foutdir = file_dir.replace(base_dir, target_dir)\n create_dirs(foutdir)\n this_nbps = [\n NotebookProcessor(\n infile=f,\n outfile=os.path.join(foutdir, os.path.basename(f)),\n disable_warnings=self.disable_warnings,\n preprocess=(\n (self.preprocess is True or f in self.preprocess) and\n not (self.dont_preprocess is True or\n f in self.dont_preprocess)),\n clear=((self.clear is True or f in self.clear) and not\n (self.dont_clear is True or f in self.dont_clear)),\n code_example=self.code_examples.get(f),\n supplementary_files=self.supplementary_files.get(f),\n other_supplementary_files=self.osf.get(f),\n thumbnail_figure=self.thumbnail_figures.get(f),\n url=self.get_url(f.replace(base_dir, '')),\n binder_url=self.get_binder_url(f.replace(base_dir, '')),\n **self._nbp_kws)\n for f in map(lambda f: os.path.join(file_dir, f),\n filter(self.pattern.match, files))]\n readme_file = next(iter(readme_files.intersection(files)))\n else:\n return '', []\n labels = OrderedDict()\n this_label = 'gallery_' + foutdir.replace(os.path.sep, '_')\n if this_label.endswith('_'):\n this_label = this_label[:-1]\n for d in dirs:\n label, nbps = self.recursive_processing(\n base_dir, target_dir, it)\n if label:\n labels[label] = nbps\n s = \".. _%s:\\n\\n\" % this_label\n\n if readme_file.endswith('.md'):\n s += spr.check_output(\n ['pandoc', os.path.join(file_dir, readme_file),\n '-t', 'rst']).decode('utf-8').rstrip() + '\\n\\n'\n else:\n with open(os.path.join(file_dir, readme_file)) as f:\n s += f.read().rstrip() + '\\n\\n'\n\n if self.toctree_depth:\n s += \"\\n\\n.. toctree::\"\n if self.toctree_depth > 0:\n s += \"\\n :maxdepth: %d\" % self.toctree_depth\n s += \"\\n\\n\"\n s += ''.join(' %s\\n' % os.path.splitext(os.path.basename(\n nbp.get_out_file()))[0] for nbp in this_nbps)\n for d in dirs:\n findex = os.path.join(d, 'index.rst')\n if os.path.exists(os.path.join(foutdir, findex)):\n s += ' %s\\n' % os.path.splitext(findex)[0]\n\n s += '\\n'\n\n for nbp in this_nbps:\n code_div = nbp.code_div\n if code_div is not None:\n s += code_div + '\\n'\n else:\n s += nbp.thumbnail_div + '\\n'\n s += \"\\n.. raw:: html\\n\\n <div style='clear:both'></div>\\n\"\n for label, nbps in labels.items():\n s += '\\n.. only:: html\\n\\n .. rubric:: :ref:`%s`\\n\\n' % (\n label)\n for nbp in nbps:\n code_div = nbp.code_div\n if code_div is not None:\n s += code_div + '\\n'\n else:\n s += nbp.thumbnail_div + '\\n'\n s += \"\\n.. raw:: html\\n\\n <div style='clear:both'></div>\\n\"\n\n s += '\\n'\n\n with open(os.path.join(foutdir, 'index.rst'), 'w') as f:\n f.write(s)\n return this_label, list(chain(this_nbps, *labels.values()))",
"def generate_markdown(pelican):\n global enabled\n if not enabled:\n return\n\n include_regex = pelican.settings.get('PELIGRAM_INCLUDE_REGEX')\n media_patterns=pelican.settings.get(\"PELIGRAM_MEDIA_PATTERNS\", DEFAULT_INSTAGRAM_MEDIA_PATTERNS)\n\n if include_regex:\n pattern = re.compile(include_regex)\n is_included = lambda name: pattern.match(name)\n else:\n is_included = lambda name: not name.startswith('.')\n\n in_path = instagram_data_path(pelican)\n logger.debug(\"pelican-gram started\")\n processor=_processor(pelican,in_path)\n for dirpath, _, filenames in os.walk(in_path):\n for filename in filenames:\n if is_included(filename):\n if filename.endswith('.json'):\n logger.debug(f\"Processing file: {filename}\")\n media_filenames=sum(list(map(lambda pattern: fnmatch.filter(filenames,path.splitext(filename)[0]+pattern),media_patterns)),[])\n processor.process_instagram_metadata(filename,media_filenames)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate over html stash and restore "safe" html. | def run(self, text):
for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.markdown.safeMode).lower() == 'remove':
html = ''
else:
html = self.markdown.html_replacement_text
if self.isblocklevel(html) and (safe or not self.markdown.safeMode):
text = text.replace("<p>%s</p>" %
(self.markdown.htmlStash.get_placeholder(i)),
html + "\n")
text = text.replace(self.markdown.htmlStash.get_placeholder(i),
html)
return text | [
"def _clear_from_html(self, elem):\n if type(elem) == str:\n return html.unescape(elem)\n elif type(elem) == dict:\n return {self._clear_from_html(k): self._clear_from_html(v) for k, v in elem.items()}\n elif type(elem) == list:\n return [self._clear_from_html(el) for el in elem]\n else:\n return elem",
"def clean_html(context, data):\n doc = _get_html_document(context, data)\n if doc is None:\n context.emit(data=data)\n return\n\n remove_paths = context.params.get(\"remove_paths\")\n for path in ensure_list(remove_paths):\n for el in doc.xpath(path):\n el.drop_tree()\n\n html_text = html.tostring(doc, pretty_print=True)\n content_hash = context.store_data(html_text)\n data[\"content_hash\"] = content_hash\n context.emit(data=data)",
"def UndoSafeForHTML(escaped_string):\n raw_string = escaped_string.replace('<', '<')\n raw_string = raw_string.replace('>', '>')\n raw_string = raw_string.replace('"', '\"')\n raw_string = raw_string.replace('&', '&')\n return raw_string",
"def sanitize_html(html):\r\n p = html5lib.HTMLParser(tokenizer=HTMLSanitizer,\r\n tree=treebuilders.getTreeBuilder(\"dom\"))\r\n dom_tree = p.parseFragment(html)\r\n walker = treewalkers.getTreeWalker(\"dom\")\r\n stream = walker(dom_tree)\r\n s = serializer.HTMLSerializer(omit_optional_tags=False,\r\n quote_attr_values=True)\r\n output_generator = s.serialize(stream)\r\n return u''.join(output_generator)",
"def bleach_html(html): \n \n clean = bleach.clean(html).strip()\n \n # if the source doesn't include <p> tags, enforce them.\n if not re.search(r'^<p>', clean):\n clean = \"<p>%s</p>\"%clean\n \n # now the template can treat this string as HTML safely\n return mark_safe(clean)",
"def sanitize_html(html_code):\n attributes = bleach.ALLOWED_ATTRIBUTES.copy()\n attributes.update({\n '*': ['class', 'style', 'id'],\n 'audio': ['controls', 'autobuffer', 'autoplay', 'src'],\n 'img': ['src', 'width', 'height', 'class']\n })\n output = bleach.clean(\n html_code,\n protocols=bleach.ALLOWED_PROTOCOLS + ['data'],\n tags=bleach.ALLOWED_TAGS + ['div', 'p', 'audio', 'pre', 'img', 'span'],\n styles=['white-space'],\n attributes=attributes\n )\n return output",
"def sanitize_django_items(string):\n out = string\n out = out.replace(\"{{\", \"{{\")\n out = out.replace(\"}}\", \"}}\")\n out = out.replace(\"{%\", \"{%\")\n out = out.replace(\"%}\", \"%}\")\n out = out.replace(\">\", \">\")\n out = out.replace(\"<\", \"<\")\n out = out.replace(\"\\n\", \"<br/>\")\n return out",
"def set_UnescapedHTML(self, value):\n super(HTMLEscapeInputSet, self)._set_input('UnescapedHTML', value)",
"def fixMalformedHTML(self, backup=False, restored=False):\n html = self.driver.page_source\n html = re.sub('<td>\\s+<td valign=\"middle\">', '<td valign=\"middle\">', html, flags=re.I)\n html = re.sub('</td>\\s+<td>', '</td>', html, flags=re.I)\n # Parse the (hopefully) not-busted HTML\n soup = BeautifulSoup(html, \"html5lib\")\n # Extract info from table rows..\n rows = soup.table.table.tbody.find_all('tr', recursive=False)\n \n if backup:\n self.createDictData(rows)\n elif restored:\n self.createDictDataRestoredFile(rows) # some new function here for doing \n else:\n return None",
"def sanitize(html, strip_whitespace=False):\n TreeBuilder = html5lib.treebuilders.getTreeBuilder(\"lxml\")\n parser = html5lib.HTMLParser(tree=TreeBuilder, tokenizer=ReadableTokenizer)\n tree = parser.parse(html)\n walker = ReadableTreewalker(tree)\n serializer = HTMLSerializer(strip_whitespace=strip_whitespace)\n return serializer.render(walker)",
"def HTML(html): # pylint: disable=invalid-name\n return markupsafe.Markup(html)",
"def clean_highlighted_code(html):\n cleaner = clean.Cleaner(allow_tags=['pre'], remove_unknown_tags=False)\n for el in html.findall('.//pre'):\n p = el.getparent()\n cleaned = cleaner.clean_html(el)\n p.replace(el, cleaned)",
"def escape_html(html):\n #boileeeeeeerplate\n return unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('\"', '"').replace(\"'\", ''')",
"def filterHtml(self, body):\n output = ''\n soup = BeautifulSoup(body, \"html.parser\")\n for script in soup([\"script\", \"style\"]):\n script.extract()\n text = soup.find_all(text=True)\n for t in text:\n if t == \"\\\\n\":\n continue\n if len(t) > 2:\n # als er nog blacklisted elements in zitten, haal ze eruit.\n if t.parent.name not in self.blacklist:\n output += '{} '.format(t.strip())\n try:\n t = t.replace(\"\\\\n\", \"\")\n t = t.replace(\"\\\\t\", \"\")\n except:\n ctx.log.error(\"stripping failed\")\n\n return output",
"def restoreContent(self):\n ...",
"def current_html(self):\n return str(self.__filter_soup(self.stack_soup[-1]))",
"def strip_tags(html):\n\n s = HTMLStripper()\n s.feed(html)\n stripped = s.get_data()\n # Remove extra spaces\n return ' '.join(filter(None, stripped.split(' ')))",
"def dump_store_html(self):\n store_content = request.urlopen(self.store_url).read()\n soup = BeautifulSoup(store_content)\n with open('store_content.html', mode='w') as f:\n f.write(soup.prettify())\n print('Wrote store_content.html')",
"def preprocess_html(dom):\n dom = instrument_dom(dom)\n dom = clean_tags(dom)\n if config.INLINE_SPACE_INSERTION['ENABLE']:\n dom = insert_inline_spaces(dom) \n dom = remove_tables(dom)\n if config.LIST_DETECTION['ENABLE']:\n dom = detect_lists(dom)\n if config.DETECT_HEADERS['ENABLE']:\n dom = detect_headers(dom)\n dom_text = get_dom_string(dom)\n # assert False\n return dom_text",
"def _transform_tag(self, tag):\n\n def _(e):\n if isinstance(e, bs4.element.Comment): # do not modify comments\n return\n if e.name in ['script']: # do not modify contents of 'script' tag\n return\n if isinstance(e, bs4.element.NavigableString): # has no children\n e.replaceWith(self._transform_element_text(e))\n return\n for i in e.children:\n _(i)\n\n for el in self.soup.find(tag):\n _(el)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the default set of preprocessors used by Markdown. | def build_preprocessors(md_instance, **kwargs):
preprocessors = odict.OrderedDict()
preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
if md_instance.safeMode != 'escape':
preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance)
preprocessors["reference"] = ReferencePreprocessor(md_instance)
return preprocessors | [
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def preprocessing():",
"def default_processors(self):\n return []",
"def preprocess (self,\r\n source,\r\n output_file=None,\r\n macros=None,\r\n include_dirs=None,\r\n extra_preargs=None,\r\n extra_postargs=None):\r\n pass",
"def text_preparation_pipeline(preprocessor):\n text_preparation_pipeline = (\n TextPreparationPipelineFactory.create_default_text_preparation_pipeline()\n )\n text_preparation_pipeline.preprocessors = [preprocessor]\n return text_preparation_pipeline",
"def get_supported_preprocessors(\n ) -> typing.List[typing.List[PreprocessorsTypes]]:\n return",
"def preprocessor(*args, **kwargs):\n logger.debug(\"Adding preprocessor from %s\", args)\n return _unwrap(_preprocessors, *args, **kwargs,\n is_list=False, cache_name=\"preprocessor\")",
"def gen_preprocess_options (macros, include_dirs):\r\n # XXX it would be nice (mainly aesthetic, and so we don't generate\r\n # stupid-looking command lines) to go over 'macros' and eliminate\r\n # redundant definitions/undefinitions (ie. ensure that only the\r\n # latest mention of a particular macro winds up on the command\r\n # line). I don't think it's essential, though, since most (all?)\r\n # Unix C compilers only pay attention to the latest -D or -U\r\n # mention of a macro on their command line. Similar situation for\r\n # 'include_dirs'. I'm punting on both for now. Anyways, weeding out\r\n # redundancies like this should probably be the province of\r\n # CCompiler, since the data structures used are inherited from it\r\n # and therefore common to all CCompiler classes.\r\n\r\n pp_opts = []\r\n for macro in macros:\r\n\r\n if not (type (macro) is TupleType and\r\n 1 <= len (macro) <= 2):\r\n raise TypeError, \\\r\n (\"bad macro definition '%s': \" +\r\n \"each element of 'macros' list must be a 1- or 2-tuple\") % \\\r\n macro\r\n\r\n if len (macro) == 1: # undefine this macro\r\n pp_opts.append (\"-U%s\" % macro[0])\r\n elif len (macro) == 2:\r\n if macro[1] is None: # define with no explicit value\r\n pp_opts.append (\"-D%s\" % macro[0])\r\n else:\r\n # XXX *don't* need to be clever about quoting the\r\n # macro value here, because we're going to avoid the\r\n # shell at all costs when we spawn the command!\r\n pp_opts.append (\"-D%s=%s\" % macro)\r\n\r\n for dir in include_dirs:\r\n pp_opts.append (\"-I%s\" % dir)\r\n\r\n return pp_opts",
"def _preprocess_data(self, learn_set, test_set, preprocessors):\n for p_type, preprocessor in preprocessors:\n if p_type == \"B\":\n learn_set = preprocessor(learn_set)\n test_set = preprocessor(test_set)\n for p_type, preprocessor in preprocessors:\n if p_type == \"L\":\n learn_set = preprocessor(learn_set)\n elif p_type == \"T\":\n test_set = preprocessor(test_set)\n elif p_type == \"LT\":\n (learn_set, test_set) = preprocessor(learn_set, test_set)\n return learn_set, test_set",
"def gen_text_preprocessor():\n def clean_str(string):\n misspellings = {\n r'pur ': 'purple',\n r'fea-': 'feather',\n r'wh-': 'white',\n r'whie': 'white',\n r'wh ': 'white',\n r'or ': 'orange',\n r'or-': 'orange',\n r'orge': 'orange',\n r'winngs': 'wings',\n r'feathes': 'feathers',\n }\n\n for expr, subst in misspellings.items():\n string = re.sub(expr, subst, string)\n\n # Replace '(' with ' '\n string = re.sub(r'\\(', ' ', string)\n string = re.sub(r',', ' ', string)\n string = re.sub(r'-', ' ', string)\n string = re.sub(r'~+', ' ', string)\n\n # Replace multiple spaces with a single space.\n string = re.sub(r'\\s+', ' ', string).strip()\n\n string = re.sub(r'\"+', '', string)\n return string\n\n return data.Pipeline(clean_str)",
"def _build_post_processor_list_from_args(self) -> typing.List[PostProcessor]:\n post_processors: typing.List[PostProcessor] = []\n if self._args.pp_trim_trailing_whitespace:\n post_processors.append(TrimTrailingWhitespace())\n if hasattr(self._args, \"pp_max_emptylines\") and self._args.pp_max_emptylines is not None:\n post_processors.append(LimitEmptyLines(self._args.pp_max_emptylines))\n if hasattr(self._args, \"pp_run_program\") and self._args.pp_run_program is not None:\n post_processors.append(self._build_ext_program_postprocessor(self._args.pp_run_program))\n\n post_processors.append(SetFileMode(self._args.file_mode))\n\n return post_processors",
"def get_supported_preprocessors(\n ) -> typing.List[typing.List[PreprocessorsTypes]]:\n return [\n [PreprocessorsTypes.IDENTITY],\n [PreprocessorsTypes.COUNTER, PreprocessorsTypes.COUNT_VECTORIZER],\n [PreprocessorsTypes.COUNTER, PreprocessorsTypes.COUNT_VECTORIZER],\n [PreprocessorsTypes.COUNTER],\n [PreprocessorsTypes.COUNT_VECTORIZER, PreprocessorsTypes.N_GRAMS],\n [PreprocessorsTypes.K_BINS_DISCRETIZER],\n [PreprocessorsTypes.K_BINS_DISCRETIZER],\n [PreprocessorsTypes.K_BINS_DISCRETIZER]\n ]",
"def get_inline_processors(self) -> List[Type[InlineProcessor]]:\n return [\n BackslashEscapeProcessor,\n EntityReferenceProcessor,\n CodeSpanProcessor,\n EmphasisProcessor,\n LinkOpenerProcessor,\n LinkCloserProcessor,\n URIAutolinkProcessor,\n EmailAutolinkProcessor,\n RawHTMLProcessor,\n HardLinebreakProcessor, # TODO: docutils does not support hardline break\n StrikethroughProcessor,\n ]",
"def _create_default_regexes() -> None:\n _regex_cache.update({\n # categories\n 'category': (r'\\[\\[ *(?:%s)\\s*:.*?\\]\\]',\n lambda site: '|'.join(site.namespaces[14])),\n 'comment': re.compile(r'<!--[\\s\\S]*?-->'),\n # files\n 'file': (FILE_LINK_REGEX, lambda site: '|'.join(site.namespaces[6])),\n # section headers\n 'header': re.compile(\n r'(?:(?<=\\n)|\\A)(?:<!--[\\s\\S]*?-->)*'\n r'(=(?:[^\\n]|<!--[\\s\\S]*?-->)+=)'\n r' *(?:<!--[\\s\\S]*?--> *)*(?=\\n|\\Z)'),\n # external links\n 'hyperlink': compileLinkR(),\n # also finds links to foreign sites with preleading \":\"\n 'interwiki': (\n r'\\[\\[:?(%s)\\s?:[^\\]]*\\]\\]\\s*',\n lambda site: '|'.join(\n ignore_case(i) for i in site.validLanguageLinks()\n + list(site.family.obsolete.keys()))),\n # Module invocations (currently only Lua)\n 'invoke': (\n r'\\{\\{\\s*\\#(?:%s):[\\s\\S]*?\\}\\}',\n lambda site: '|'.join(\n ignore_case(mw) for mw in site.getmagicwords('invoke'))),\n # this matches internal wikilinks, but also interwiki, categories, and\n # images.\n 'link': re.compile(r'\\[\\[[^\\]|]*(\\|[^\\]]*)?\\]\\]'),\n # pagelist tag (used in Proofread extension).\n 'pagelist': re.compile(r'<{}[\\s\\S]*?/>'\n .format(ignore_case('pagelist'))),\n # Wikibase property inclusions\n 'property': (\n r'\\{\\{\\s*\\#(?:%s):\\s*[Pp]\\d+.*?\\}\\}',\n lambda site: '|'.join(\n ignore_case(mw) for mw in site.getmagicwords('property'))),\n # lines that start with a colon or more will be indented\n 'startcolon': re.compile(r'(?:(?<=\\n)|\\A):(.*?)(?=\\n|\\Z)'),\n # lines that start with a space are shown in a monospace font and\n # have whitespace preserved.\n 'startspace': re.compile(r'(?:(?<=\\n)|\\A) (.*?)(?=\\n|\\Z)'),\n # tables often have whitespace that is used to improve wiki\n # source code readability.\n # TODO: handle nested tables.\n 'table': re.compile(\n r'(?:(?<=\\n)|\\A){\\|[\\S\\s]*?\\n\\|}|%s' % _tag_pattern('table')),\n 'template': NESTED_TEMPLATE_REGEX,\n })",
"def build_base_preprocessor(self, inplace: bool = False):\n # Categorical Features\n cat_preprocessor, cat_feature_name, cat_tuning_dict = generate_cat_preprocessor(\n **self.preprocessor_params['cat']\n )\n\n # Numerical Features\n num_preprocessor, num_feature_name, num_tuning_dict = generate_num_preprocessor(\n **self.preprocessor_params['num']\n )\n\n # Datetime Features\n date_preprocessor, date_feature_name = generate_date_preprocessor(**self.preprocessor_params['date'])\n\n # Make total FeatureUnion\n transformer_dict_list = [\n {'prefix': 'CAT', 'transformer': cat_preprocessor, 'tuning_params': cat_tuning_dict},\n {'prefix': 'NUM', 'transformer': num_preprocessor, 'tuning_params': num_tuning_dict},\n {'prefix': 'DATE', 'transformer': date_preprocessor},\n ]\n base_preprocessor, self.preprocessor_tuning_params = generate_feature_union(transformer_dict_list)\n\n # Unify self.feature_name\n self.feature_name = cat_feature_name + num_feature_name + date_feature_name\n self.feature_name = [name.lower().replace(' ', '_') for name in self.feature_name]\n\n if inplace:\n self.preprocessor = base_preprocessor\n else:\n return base_preprocessor",
"def _default_preprocess(self, *args, **kws):\n raise CompileError(\"preprocess() not implemented\")",
"def _preprocessSource(klass, source, file, preprocessors):\n if not isinstance(preprocessors, (list, tuple)):\n preprocessors = [preprocessors]\n for preprocessor in preprocessors:\n preprocessor = klass._normalizePreprocessorArg(preprocessor)\n source, file = preprocessor.preprocess(source, file)\n return source, file",
"def _create_preprocess_chain(pre_id_list, kwargs_list):\n chain = None\n pre_list = []\n for i, pre_id in enumerate(pre_id_list):\n chain = CPreProcess.create(\n pre_id, preprocess=chain, **kwargs_list[i])\n pre_list.append(CPreProcess.create(pre_id, **kwargs_list[i]))\n\n return chain, pre_list",
"def preprocessing_name(self) -> str:\n return \"preprocessing\"",
"def setup_text_preproc_ui(self):\n self.preprocessing_options = {\n \"lower_case\": True,\n \"remove_punctuation\": True,\n \"expand_contractions\": True,\n \"remove_stopwords\": False,\n \"lemmatize\": False,\n \"spell_correction\": False\n }\n proc_labels = [\n 'Convert to lowercase', 'Remove punctuation',\n 'Expand contractions', 'Remove stopwords',\n 'Lemmatize'\n ]\n row = 0\n for k, v in self.preprocessing_options.items():\n chkbox = QCheckBox(' '.join(k.split('_')))\n chkbox.setChecked(v)\n chkbox.stateChanged.connect(lambda state, o=k, :\n self._update_preprocessing_options(\n o, state)\n )\n self.text_proc_grid.addWidget(chkbox, row, 0)\n self.text_preprocessing_checkboxes.append(chkbox)\n row = row + 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Same effect as concatenating the strings in items, finding the character to which stringindex refers in that string, and returning the item in which that character resides. | def _stringindex_to_listindex(self, stringindex, items):
items.append('dummy')
i, count = 0, 0
while count <= stringindex:
count += len(items[i])
i += 1
return i - 1 | [
"def __getitem__(self, item):\r\n if isinstance(item, slice):\r\n # Slices must be handled specially.\r\n return self._slice(item)\r\n try:\r\n self._char_indexes[item]\r\n except IndexError:\r\n raise IndexError(\"ANSIString Index out of range\")\r\n # Get character codes after the index as well.\r\n if self._char_indexes[-1] == self._char_indexes[item]:\r\n append_tail = self._get_interleving(item + 1)\r\n else:\r\n append_tail = ''\r\n item = self._char_indexes[item]\r\n\r\n clean = self._raw_string[item]\r\n result = ''\r\n # Get the character they're after, and replay all escape sequences\r\n # previous to it.\r\n for index in range(0, item + 1):\r\n if index in self._code_indexes:\r\n result += self._raw_string[index]\r\n return ANSIString(result + clean + append_tail, decoded=True)",
"def get_items_in_string(items, string, return_which='all'):\n s = str(string).lower()\n if type(items)==dict: # map to standard name\n # generated maps with current names as keys\n current_to_final_names_dict = {}\n for final_name, current_names in items.items():\n for current_name in current_names:\n current_to_final_names_dict[current_name] = final_name\n # Create list\n item_in_string = [current_to_final_names_dict[item] for item in current_to_final_names_dict.keys()\n if item.lower() in s]\n else:\n item_in_string = [item for item in items if item.lower() in s]\n if len(item_in_string) > 0:\n if return_which=='all':\n return ' '.join(item_in_string)\n elif type(return_which)==int:\n return item_in_string[return_which]\n else:\n print(\"Do not understand value of 'return_which'\")\n else:\n return False",
"def searchindex(myindex,myitem):\n\tmylen=len(myitem)\n\tmyindexlen=len(myindex)\n\tstartpos=0\n\twhile ((startpos+8)<myindexlen):\n\t\tmytestlen=decodeint(myindex[startpos:startpos+4])\n\t\tif mytestlen==mylen:\n\t\t\tif myitem==myindex[startpos+4:startpos+4+mytestlen]:\n\t\t\t\t#found\n\t\t\t\tdatapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);\n\t\t\t\tdatalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);\n\t\t\t\treturn datapos, datalen\n\t\tstartpos=startpos+mytestlen+12",
"def getsubString(w, c):\n count = 0\n for x in w:\n #print x\n if x == c:\n break\n count=count+1\n return w[:count]",
"def __getitem__(self, i: 'int') -> \"SbString *\":\n return _coin.SbStringList___getitem__(self, i)",
"def test_it_returns_the_second_index_of_the_char(self):\n self.assertEqual(second_index(\"sims\", \"s\"), 3)\n self.assertEqual(second_index(\"find the river\", \"e\"), 12)\n self.assertEqual(second_index(\"hi\", \" \"), None)\n self.assertEqual(second_index(\"three occurrences\", \"r\"), 10)",
"def get_row(string, index):\n start = index // SUDOKU_SIDE * SUDOKU_SIDE # index of the left element in row.\n end = start + SUDOKU_SIDE\n return (string[i] for i in range(start, end))",
"def insert_at(index, my_chr, my_str):\n if index < 0 or index > len(my_str):\n print \"invalid index\"\n return my_str\n # use substring and add given index\n return my_str[:index] + my_chr + my_str[index:]",
"def __getitem__(self, item):\n return self.search(item)",
"def item_one(items):\n return items[0] if len(items) > 0 else ''",
"def __getitem__(self, i: 'int') -> \"SbString const &\":\n return _coin.SoMFString___getitem__(self, i)",
"def get_column(string, index):\n remainder = index % SUDOKU_SIDE\n return (string[SUDOKU_SIDE * i + remainder] for i in range(SUDOKU_SIDE))",
"def __getitem__(self, index: int) -> str:\n return self.categories()[index]",
"def rel_str_position(self, idx):\n home_str = self.id_string(idx)\n idx -= 1\n for i in range(home_str - 1):\n idx -= len(self.dnastrings[i][1]) + 1\n \n return idx + 1",
"def equip_from_index(self, item_index: int) -> str:\n try:\n item = self.items[item_index] # Find the item to be equipped\n temp = self.gear[item.slot] # Temporarily store the currently equipped item (if any)\n self.gear[item.slot] = item # Equip item\n self.remove(item) # Remove equipped item from inventory\n if temp is not None:\n self.append(temp)\n return f\"You swapped {temp.name} to {item.name}\"\n else:\n return f\"You equip {item.name}\"\n except KeyError:\n return \"You can't equip that\"\n except IndexError:\n return \"There's nothing in that inventory space\"",
"def get_item_text(self, widget, index):\n return widget.GetString(index)",
"def second_index_my(text: str, symbol: str):\n # your code here\n res = text.find(symbol, text.find(symbol)+1)\n return res if res != -1 else None",
"def search(string, char): \n \n index = 0 \n while index < len(string): \n if string[index] == char: \n return index\n index = index + 1 \n return -1",
"def get_ascii_character(index):\n\n return characters.characters[index]",
"def __getitem__(self, i: 'int') -> \"char\":\n return _coin.SbString___getitem__(self, i)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build the default treeprocessors for Markdown. | def build_treeprocessors(md_instance, **kwargs):
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors | [
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def create_tree(markdown):\n global blocks, pos\n # parse markdown\n blocks = parse_markdown(markdown)\n if config.DEBUG_MODE:\n print('[DEBUG]: Parsed markdown')\n print(blocks)\n\n # create root node\n title = blocks[0].content.get_clean()\n root = Node(title)\n\n # recursively generate children\n pos = 1\n while pos < len(blocks):\n c = recurse()\n if c:\n root.add_child(c)\n \n\n # clean up tree\n root = root.retract()\n return root",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def recursive_processing(self, base_dir, target_dir, it):\n try:\n file_dir, dirs, files = next(it)\n except StopIteration:\n return '', []\n readme_files = {'README.md', 'README.rst', 'README.txt'}\n if readme_files.intersection(files):\n foutdir = file_dir.replace(base_dir, target_dir)\n create_dirs(foutdir)\n this_nbps = [\n NotebookProcessor(\n infile=f,\n outfile=os.path.join(foutdir, os.path.basename(f)),\n disable_warnings=self.disable_warnings,\n preprocess=(\n (self.preprocess is True or f in self.preprocess) and\n not (self.dont_preprocess is True or\n f in self.dont_preprocess)),\n clear=((self.clear is True or f in self.clear) and not\n (self.dont_clear is True or f in self.dont_clear)),\n code_example=self.code_examples.get(f),\n supplementary_files=self.supplementary_files.get(f),\n other_supplementary_files=self.osf.get(f),\n thumbnail_figure=self.thumbnail_figures.get(f),\n url=self.get_url(f.replace(base_dir, '')),\n binder_url=self.get_binder_url(f.replace(base_dir, '')),\n **self._nbp_kws)\n for f in map(lambda f: os.path.join(file_dir, f),\n filter(self.pattern.match, files))]\n readme_file = next(iter(readme_files.intersection(files)))\n else:\n return '', []\n labels = OrderedDict()\n this_label = 'gallery_' + foutdir.replace(os.path.sep, '_')\n if this_label.endswith('_'):\n this_label = this_label[:-1]\n for d in dirs:\n label, nbps = self.recursive_processing(\n base_dir, target_dir, it)\n if label:\n labels[label] = nbps\n s = \".. _%s:\\n\\n\" % this_label\n\n if readme_file.endswith('.md'):\n s += spr.check_output(\n ['pandoc', os.path.join(file_dir, readme_file),\n '-t', 'rst']).decode('utf-8').rstrip() + '\\n\\n'\n else:\n with open(os.path.join(file_dir, readme_file)) as f:\n s += f.read().rstrip() + '\\n\\n'\n\n if self.toctree_depth:\n s += \"\\n\\n.. toctree::\"\n if self.toctree_depth > 0:\n s += \"\\n :maxdepth: %d\" % self.toctree_depth\n s += \"\\n\\n\"\n s += ''.join(' %s\\n' % os.path.splitext(os.path.basename(\n nbp.get_out_file()))[0] for nbp in this_nbps)\n for d in dirs:\n findex = os.path.join(d, 'index.rst')\n if os.path.exists(os.path.join(foutdir, findex)):\n s += ' %s\\n' % os.path.splitext(findex)[0]\n\n s += '\\n'\n\n for nbp in this_nbps:\n code_div = nbp.code_div\n if code_div is not None:\n s += code_div + '\\n'\n else:\n s += nbp.thumbnail_div + '\\n'\n s += \"\\n.. raw:: html\\n\\n <div style='clear:both'></div>\\n\"\n for label, nbps in labels.items():\n s += '\\n.. only:: html\\n\\n .. rubric:: :ref:`%s`\\n\\n' % (\n label)\n for nbp in nbps:\n code_div = nbp.code_div\n if code_div is not None:\n s += code_div + '\\n'\n else:\n s += nbp.thumbnail_div + '\\n'\n s += \"\\n.. raw:: html\\n\\n <div style='clear:both'></div>\\n\"\n\n s += '\\n'\n\n with open(os.path.join(foutdir, 'index.rst'), 'w') as f:\n f.write(s)\n return this_label, list(chain(this_nbps, *labels.values()))",
"def prepare_content_tree(self) -> None:\n super().prepare_content_tree()\n\n self.set_numbering()\n\n # Adding the id's of split text in 'new_child_nodes1' list.\n self.split_text_nodes()\n\n # Creating paragraphs and add all id's in 'new_child_nodes2' list.\n self.create_paragraphs()",
"def extendMarkdown(self, md):\r\n # md.registerExtension(self)\r\n md.preprocessors.register(SpoilerblockPreprocessor(md), 'spoiler_block', 29) # Must be < 30\r",
"def assemble_doctree(self):\r\n master = self.config.master_doc\r\n if hasattr(self, \"doctree_\"):\r\n tree = self.doctree_\r\n else:\r\n raise AttributeError(\r\n \"Attribute 'doctree_' is not present. Call method finalize().\")\r\n tree = inline_all_toctrees(\r\n self, set(), master, tree, darkgreen, [master])\r\n tree['docname'] = master\r\n self.env.resolve_references(tree, master, self)\r\n self.fix_refuris(tree)\r\n return tree",
"def css_tree(self) -> Tree:\n from rich.columns import Columns\n from rich.console import Group\n from rich.panel import Panel\n\n from .widget import Widget\n\n def render_info(node: DOMNode) -> Columns:\n \"\"\"Render a node for the tree.\"\"\"\n if isinstance(node, Widget):\n info = Columns(\n [\n Pretty(node),\n highlighter(f\"region={node.region!r}\"),\n highlighter(\n f\"virtual_size={node.virtual_size!r}\",\n ),\n ]\n )\n else:\n info = Columns([Pretty(node)])\n return info\n\n highlighter = ReprHighlighter()\n tree = Tree(render_info(self))\n\n def add_children(tree: Tree, node: DOMNode) -> None:\n \"\"\"Add children to the tree.\"\"\"\n for child in node.children:\n info: RenderableType = render_info(child)\n css = child.styles.css\n if css:\n info = Group(\n info,\n Panel.fit(\n Text(child.styles.css),\n border_style=\"dim\",\n title=\"css\",\n title_align=\"left\",\n ),\n )\n branch = tree.add(info)\n if tree.children:\n add_children(branch, child)\n\n add_children(tree, self)\n return tree",
"def generatedocs():\n fe_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../fastestimator')\n save_dir = os.path.join(tempfile.gettempdir(), 'fe')\n #insert project path to system path to later detect the modules in project\n sys.path.insert(0, fe_path)\n #parent directory where all the markdown files will be stored\n\n for subdirs, dirs, files in os.walk(fe_path, topdown=True):\n for f in files:\n fname, ext = os.path.splitext(os.path.basename(f))\n if not f.startswith('_') and ext == '.py':\n #if f == 'pggan.py':\n f_path = os.path.join(subdirs, f)\n mod_dir = os.path.relpath(f_path, fe_path)\n mod = mod_dir.replace('/', '.')\n if subdirs == fe_path:\n save_path = os.path.join(*[save_dir, 'fe'])\n else:\n save_path = os.path.join(*[save_dir, os.path.relpath(subdirs, fe_path)])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n mdtexts = extractmarkdown(mod, save_path)\n return save_dir",
"def setup_wiki_handlers(self):\n handlers = {\n # Text Markup\n 'emphasis': 'emphasis',\n 'strong': 'strong',\n 'literal': 'code',\n # Blocks\n 'literal_block': 'preformatted',\n # Simple Lists\n # bullet-lists are handled completely by docutils because it uses\n # the node context to decide when to make a compact list\n # (no <p> tags).\n 'list_item': 'listitem',\n # Definition List\n 'definition_list': 'definition_list',\n }\n for rest_func, moin_func in handlers.items():\n visit_func, depart_func = self.create_wiki_functor(moin_func)\n visit_func = new.instancemethod(visit_func, self, MoinTranslator)\n depart_func = new.instancemethod(depart_func, self, MoinTranslator)\n setattr(self, 'visit_%s' % (rest_func), visit_func)\n setattr(self, 'depart_%s' % (rest_func), depart_func)",
"def make_tree(dataset):\n\treturn make_tree_helper(dataset)",
"def default_processors(self):\n return []",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def get_pages_content(self):\n\n #TODO other markup langage (piece of cake)\n for page in self.postlist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n if page['markup']=='markdown':\n page['content']=self.md.convert(page['pre_content'])\n\n\n for page in self.pagelist:\n self.log(\"\\t\" + page['filename'])\n temp=self.env.from_string(page['raw_text'])\n page['pre_content']=temp.render(page=page,pagelist=self.pagelist,postlist=self.postlist,postlist_lan=self.postlist_lan,ext=self.ext,**page)\n if page['markup']=='markdown':\n page['content']=self.md.convert(page['pre_content'])",
"def process_markdown(questions):\n for question in questions:\n question['question'] = markdown(question['question'])\n for i in range(len(question['answers'])):\n this = question['answers'][i][0]\n question['answers'][i][0] = markdown_nopara(this)\n return questions",
"def markdown(self):\n cfg_fname = '%s.md' % self._configurable.__name__.lower()\n\n markdown = [self.doc]\n\n if self._style is True:\n markdown.append(\n 'This key must be set to a dictionary. Its structure is defined '\n '[here](%s). Not specifying the key is equivalent to specifying '\n 'an empty dictionary.' % cfg_fname)\n yield self.key, '\\n\\n'.join(markdown)\n return\n\n markdown.append(\n 'More information about this structure may be found [here](%s).' % cfg_fname)\n\n segue = 'The following configuration keys are used to configure this structure.'\n if self._optional:\n segue += (' This structure is optional, so it is legal to not specify '\n 'any of them, except when this structure is required by context.')\n markdown.append(segue)\n\n for loader in self._configurable.loaders:\n for key, _ in loader.markdown():\n markdown.append('### `%s%s`' % (self.prefix, key))\n #doc = '\\n\\n'.join((\n #'#' + paragraph if paragraph.startswith('###') else paragraph\n #for paragraph in doc.split('\\n\\n')))\n #markdown.append(doc)\n markdown.append('This key is documented [here](%s#%s).' % (cfg_fname, key))\n\n markdown = '\\n\\n'.join(markdown)\n\n if self.prefix:\n yield '%s*' % self.prefix, markdown\n else:\n yield '%s%s keys' % (self.key[0].upper(), self.key[1:]), markdown",
"def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, include_maintoc=False):\n tree = tree.deepcopy()\n for toctreenode in tree.traverse(addnodes.toctree):\n newnodes = []\n #if include_maintoc:\n # newnodes.append(shape_latex_nodes.sectiontoc())\n #else:\n include_maintoc = True\n includefiles = map(text_type, toctreenode['includefiles'])\n for includefile in includefiles:\n try:\n builder.info(colorfunc(includefile) + \" \", nonl=1)\n subtree = inline_all_toctrees(builder, docnameset, includefile,\n builder.env.get_doctree(includefile),\n colorfunc, include_maintoc=True)\n docnameset.add(includefile)\n except Exception:\n builder.warn('toctree contains ref to nonexisting '\n 'file %r' % includefile,\n builder.env.doc2path(docname))\n else:\n sof = addnodes.start_of_file(docname=includefile)\n sof.children = subtree.children\n for sectionnode in sof.traverse(nodes.section):\n if 'docname' not in sectionnode:\n sectionnode['docname'] = includefile\n newnodes.append(sof)\n toctreenode.parent.replace(toctreenode, newnodes)\n return tree",
"def set_markdown_extensions(site_settings):\n # Base markdown extensions support \"fenced_code\".\n markdown_extensions = [\"fenced_code\"]\n if site_settings[\"pygments\"]:\n markdown_extensions.extend([\n \"extra\",\n \"codehilite(css_class=hlcode)\",\n \"toc(title=Table of Contents)\"\n ])\n\n return markdown_extensions",
"def get_parser():\n parser = (\n MarkdownIt(\"commonmark\")\n .enable(\"table\")\n .use(front_matter_plugin)\n .use(myst_block_plugin)\n .use(myst_role_plugin)\n # we only need to parse block level components (for efficiency)\n .disable(\"inline\", True)\n )\n return parser",
"def build_command_parsing_tree(self):\n root = Start()\n if self.arguments:\n self.arguments.index()\n trav = root\n for rule in self.rules:\n new_trav = trav.build_children_node_from_rule(rule, self.arguments)\n trav = new_trav\n self.syntax_root = root\n return root\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add node to stash | def __stashNode(self, node, type):
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder | [
"def stash(self):",
"def add_to_tree(self, node):\n # print(node.state)\n # if(node.parent_node is not None):\n # print(\"parent:\", node.parent_node.state)\n # print()\n self.tree.append(node)\n return",
"def create_stash(self, payload, path=None):\n if path:\n self._request('POST', '/stashes/{}/{}'.format(path),\n json.dumps(payload))\n else:\n self._request('POST', '/stashes/', json.dumps(payload))\n return True",
"def stash_push(repo):\n with open_repo_closing(repo) as r:\n from dbnd._vendor.dulwich.stash import Stash\n\n stash = Stash.from_repo(r)\n stash.push()",
"def add_stnode(self, path, shares):\n return self.share_tree_manager.add_stnode(path, shares)",
"def add_node(self, node, key):\n self.get_state()\n if int(key) not in self._sorted_keys:\n self.ring[str(key)] = node\n self._sorted_keys.append(int(key))\n self._sorted_keys.sort()\n self.save_state()",
"def add_node(self, node):\n if node not in self._graph:\n self._graph[node] = {}",
"def insert(self, node):\n\n self.track.append(node)\n node.set_index_heap(len(self.track) - 1)\n if self.root is None:\n self.root = node\n\n else:\n parent = self.track[(node.index_heap - 1) // 2]\n parent.add_child_heap(node)\n # changed = self.sift_up(node)",
"def add(self, node):\n self.nodes.append(node)\n self.count += 1",
"def insert(self, key, value):\n if self.root is None:\n self.root = Node(key, value)\n else:\n self.add(key, value, self.root)",
"def add_node(self, node):\n self.nodes.append(node)\n for x in xrange(self.replicas):\n ringkey = self.hash_method(b(\"%s:%d\" % (node, x)))\n self.ring[ringkey] = node\n self.sorted_keys.append(ringkey)\n\n self.sorted_keys.sort()",
"def add_node(self, node_key, **kwargs):\n node_key = self.wrapper.standardize_key(node_key)\n\n if not node_key in self.tree:\n self.tree.append(node_key)\n\n if not node_key in self.lookup_table:\n self.lookup_table[node_key] = {}\n\n self.lookup_table[node_key].update(kwargs)\n\n if not 'inputs' in self.lookup_table[node_key]:\n self.lookup_table[node_key]['inputs'] = []\n\n self.clean_tree()\n return node_key",
"def add(self, node):\n self.nodes.append(node)\n node.parent_node = self\n node.depth = self.depth + 1\n node.pack(side=\"top\", fill=\"x\", in_=self.get_body(), pady=self.__class__.Node.PADDING)",
"def add_node(cls, cluster_id, branch_id, node):\n\n try: # try to pull the node list out\n\n node_list = cls.cluster_dict[cluster_id][branch_id]\n\n if node not in node_list:\n node_list.append(node)\n cls.cluster_dict[cluster_id][branch_id]= node_list\n\n except: # if no node list exists then just add\n cls.cluster_dict[cluster_id][branch_id] = [node]",
"def add_node(self, node: BONNode):\n self.nodes.append(node)",
"def addToNode(self,name,dic):\n\t\tn = listToPath(name)\n\t\tif not n in self.stats:\n\t\t\tself.stats[n] = dic\n\t\telse:\n\t\t\tself.stats[n].update(dic)\n\t\treturn name",
"def add_hash(self, value):\n self.leaves.append(Node(codecs.decode(value, 'hex_codec'), prehashed=True))",
"def _add_node(self, node):\n\n if not isinstance(node, Vertex):\n raise TypeError(\"Node must be vertex\")\n\n self._nodes[node.get_key()] = node",
"def add(self,node):\r\n self.network.addNode(node)\r\n return node",
"def insert_add_arg(self, node, key, value):\n if node not in self.args:\n self.args[node] = {}\n if 'add' not in self.args[node]:\n self.args[node]['add'] = {}\n self.args[node]['add'][key] = value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply inline patterns to a parsed Markdown tree. Iterate over ElementTree, find elements with inline tag, apply inline patterns and append newly created Elements to tree. If you don't want to process your data with inline paterns, instead of normal string, | def run(self, tree):
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if child.text and not isinstance(child.text, util.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = currElement.getchildren().index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if child.getchildren():
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = \
inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = \
inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text and isString(newChild.text):
newChild.text = \
inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree | [
"def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l",
"def inline_markup_to_html(astr):\n\n markup_to_elem = [(r'\\*', '<b>', '</b>'),\n (r'\\/', '<i>', '</i>'),\n (r'`', '<code>', '</code>')]\n\n def replace(matched):\n \"\"\" Take matched, add opening & closing tags, cgi escape if code \"\"\"\n\n matched_str = matched.groups()[0]\n if match == '`':\n matched_str = cgi.escape(matched_str)\n return opener + matched_str + closer\n\n for match, opener, closer in markup_to_elem:\n astr = wrap_match(match).sub(replace, astr)\n\n return fu.pipe(astr, [convert_markup_links, convert_raw_links])",
"def _handleInline(self, line):\r\n\r\n if not(line):\r\n return [self.doc.createTextNode(' ')]\r\n\r\n for pattern in self.inlinePatterns:\r\n list = self._applyPattern( line, pattern)\r\n if list: return list\r\n\r\n return [self.doc.createTextNode(line)]",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def extendMarkdown(self, md, md_globals):\n\t\tcoder = ActLabTreeProcessor(md)\n\n\t\tmd.treeprocessors.add(\"actlabcode\", coder, \"<inline\")\n\t\tmd.registerExtension(self)",
"def process_markdown(questions):\n for question in questions:\n question['question'] = markdown(question['question'])\n for i in range(len(question['answers'])):\n this = question['answers'][i][0]\n question['answers'][i][0] = markdown_nopara(this)\n return questions",
"def extendMarkdown(self, md):\n # Insert del pattern into markdown parser\n md.inlinePatterns.register(ChordPDFPattern(CHORD_RE), \"chord\", 175)",
"def extendMarkdown(self, md, md_globals):\n md.inlinePatterns[\"mention_link\"] = TwitterMentionPattern(MENTION_RE, md)",
"def markdown(self, s):\n\n \"\"\"\n Start with some helper functions to process each markdown type.\n Each markdown element has a method to handle the specifics. Each\n method is passed the following parameters:\n\n Arguments:\n m -- a list of the elements parsed for the match. m[0] is\n the full matched substring within s.\n s -- the string to process\n new_str -- the string used to build the replacement string.\n Generally of the format 'stuff{}stuff', where\n 'stuff' is markdown, and {} is replaced with the\n text between the markdown tags.\n\n Returns:\n Modified string with inline markdown element expanded.\n \"\"\"\n def md_vars(m, s, new_str):\n \"\"\"\n Handle inline link and vars: [variable_name]\n\n See docstring in code for argument information.\n \"\"\"\n def makeJitAttrs(params):\n d = {l[0]: l[1] for l in self._special_parameter.regex.findall(params)}\n return d\n\n self.debug.print(\"mdvars(<strong>m[0])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[0])))\n self.debug.print(\"mdvars(<strong>m[1])=</strong><em>{}</em>\".format(HtmlUtils.escape_html(m[1])))\n self.debug.print(\"mdvars(<strong>s)=</strong><em>{}</em>\".format(HtmlUtils.escape_html(s)))\n jit_attrs = None if not m[3] else makeJitAttrs(m[3])\n if self._namespaces.exists(m[1]):\n # Substitute the variable name with the value\n c, v = self._stripClass(self._namespaces.getValue(m[1], jit_attrs))\n v = self._md_value(v)\n if(not c):\n # print(\"OLD: {}<br />\\nNEW: {}<br />\".format(m[0], v))\n s = s.replace(m[0], v)\n else:\n s = s.replace(m[0], '<{0}{1}>{2}</{0}>'.format('span', c, v))\n else:\n # No need to do anything here, just leave the unknown link/variable alone\n pass\n\n return s\n\n def md_plain(m, s, new_str):\n \"\"\"\n Handle simple replacement markdown. e.g. *foo* or **bar**, etc.\n\n See docstring in code for argument information.\n \"\"\"\n return s.replace(m[0], new_str.format(m[1]))\n\n # A map linking markdown keys to processor functions\n markdownTypes = [\n ('vars', md_vars),\n ('strong', md_plain),\n ('emphasis', md_plain),\n ('ins', md_plain),\n ('del', md_plain),\n ]\n\n self._inc_nesting_level()\n self.debug.print(\"markdown({})\".format(HtmlUtils.escape_html(s)))\n # For each type of markdown\n for key, md_func in markdownTypes:\n md_obj = self._regex_markdown[key]\n matches = findall(md_obj.regex, s) # find all the matches\n for m in matches:\n # for each match, process it\n s = md_func(m, s, md_obj.new_str)\n\n #print(\"RETURN: {}\".format(s))\n self._dec_nesting_level()\n return s # return the processed string",
"def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])",
"def extendMarkdown(self, md, md_globals):\n\n adicon = AdmonitionIconTreeprocessor(md)\n md.treeprocessors.add(\"admonitionicon\", adicon, \">inline\")\n md.registerExtension(self)",
"def add_img_attributes(html: ET.Element, content_dir: Path, markdown: Path) -> ET.Element:\n assert content_dir.is_absolute()\n assert markdown.is_absolute()\n \n def visit(element: ET.Element):\n if element.tag == 'img':\n add_attributes(element, content_dir, markdown)\n return\n \n for child in element:\n visit(child)\n \n visit(html)\n return html",
"def extendMarkdown(self, md, md_globals):\r\n md.parser = MarkdownParser()\r\n md.preprocessors.add (\"header\", HeaderPreprocessor(self), \"<reference\")\r\n md.preprocessors.add(\"line\", LinePreprocessor(self), \"<reference\")",
"def inline_tag_thing(raw_body):\n\n tags = []\n\n def replace(matched):\n \"\"\" Take matched, mutate closed-over tag list, return link markup \"\"\"\n\n matched_str = matched.groups()[0]\n tags.append(matched_str)\n return '\"{0}\"->{1}'.format(matched_str, urlize(matched_str))\n\n raw_body = wrap_match('\\|').sub(replace, raw_body)\n return raw_body, tags",
"def parse_markdown(self, markdown):\n return self.markdown.render(body)",
"def render_inline_images_for(html, article):\n token = \"%placeholder%\"\n for image in article.articleimage_set.all():\n try:\n \"\"\"\n @Stephen\n \n 1. This needs to change to match a subset of the image,\n such that filebrowser 'versions' will work\n 2. This seems to spitting out an extra '>', &\n 3. if width & height is specified on the image attributes,\n those need to flow through to the template\n \"\"\"\n url = image.image.url_relative\n except AttributeError:\n continue\n while url in html:\n start = html.rfind(\"<\", 0, html.find(url))\n end = html.find(\">\", html.find(url)) + 1\n html = html[:start] + token + html[end:]\n image.inline_width = 300\n image.inline_height = 200\n \n t = get_template(\"articles/_inline_image.html\")\n rendered = t.render(Context({\"inline_image\": image}))\n html = html.replace(token, rendered)\n \n return html",
"def __process_inline_link_tag(self, line, type_of_link='Inline'):\n\n if type_of_link == 'Inline':\n regex_patterns = [\n regex.compile(\n r'(\\[[\\S\\s]+\\])\\ *\\(((http|https)\\:\\/\\/'\n r'?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.[a-zA-Z]'\n r'{2,6}[a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#]*)\\)'\n )\n ]\n elif type_of_link == 'Image':\n regex_patterns = [\n regex.compile(r'\\!\\[([\\S\\s]*)\\]\\(([\\S\\s]*)\\)')\n ]\n else:\n regex_patterns = [\n regex.compile(\n r'\\<((http|https)\\:\\/\\/?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]'\n r'+\\.[a-zA-Z]{2,6}[a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#]*)\\>'\n ),\n regex.compile(\n r'\\<(\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+)\\>'\n )\n ]\n\n for regex_pattern in regex_patterns:\n if regex.search(regex_pattern, line):\n matched_parts = regex.search(regex_pattern, line)\n if type_of_link == 'Inline':\n inner_text = matched_parts.group(1).strip('[').strip(']')\n link = matched_parts.group(2)\n tag = f'<a href=\"{link}\">{inner_text}</a>'\n elif type_of_link == 'Image':\n alt_text = matched_parts.group(1)\n source = matched_parts.group(2)\n tag = f'<img src=\"{source}\" alt=\"{alt_text}\">'\n else:\n link = matched_parts.group(1)\n tag = f'<a href=\"{link}\">{link}</a>'\n line = regex.sub(regex_pattern, tag, line)\n return line",
"def markdown(text):\n\n def pygments(m):\n return highlight(m.group(2), get_lexer_by_name(m.group(1)), HtmlFormatter())\n\n # add newline after a begin code tag\n text = text.replace(\"<code>\", \"<code>\\n\")\n # Pygments where <code> tags have a class\n text = re.sub(re.compile('<code class=\"([^\"]*)\">((.|\\n)*?)</code>'), pygments, text)\n # apply markdown\n return markup.markdown(text)",
"def generate_markdown(pelican):\n global enabled\n if not enabled:\n return\n\n include_regex = pelican.settings.get('PELIGRAM_INCLUDE_REGEX')\n media_patterns=pelican.settings.get(\"PELIGRAM_MEDIA_PATTERNS\", DEFAULT_INSTAGRAM_MEDIA_PATTERNS)\n\n if include_regex:\n pattern = re.compile(include_regex)\n is_included = lambda name: pattern.match(name)\n else:\n is_included = lambda name: not name.startswith('.')\n\n in_path = instagram_data_path(pelican)\n logger.debug(\"pelican-gram started\")\n processor=_processor(pelican,in_path)\n for dirpath, _, filenames in os.walk(in_path):\n for filename in filenames:\n if is_included(filename):\n if filename.endswith('.json'):\n logger.debug(f\"Processing file: {filename}\")\n media_filenames=sum(list(map(lambda pattern: fnmatch.filter(filenames,path.splitext(filename)[0]+pattern),media_patterns)),[])\n processor.process_instagram_metadata(filename,media_filenames)",
"def parse_elements(text):\n \n \n # sanitise and split using BeautifulSoup\n soup = BeautifulSoup(parse(text))\n elements = [e for e in soup.contents if type(e) == Tag]\n \n # wrap blocks in <div>\n format = u\"<div class='doccomment-block' id='DE-%d'>\\n%s\\n</div>\"\n for seq,txt in enumerate(elements):\n elements[seq] = format % (seq, txt)\n \n return elements"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if the tag is a block level HTML tag. | def isBlockLevel(tag):
if isinstance(tag, string_type):
return BLOCK_LEVEL_ELEMENTS.match(tag)
# Some ElementTree tags are not strings, so return False.
return False | [
"def is_block_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"block\"",
"def IsBlock(self) -> bool:",
"def is_block(self):\n if self.get_level() == 1:\n return True\n else:\n return False",
"def IsBlock(block_name):\n idef = scriptcontext.doc.InstanceDefinitions.Find(block_name)\n return (idef is not None)",
"def inspectblocktype(self, block_type):\n try:\n # try via header\n return self.data.header.has_block_type(block_type)\n except ValueError:\n # header does not have the information because nif version is\n # too old\n return True",
"def hasTag(self,tag):\n return HopperLowLevel.basicBlockHasTag(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__,tag.__tag_internal__)",
"def isTag(self, line):\n\t\n\t\treturn re.match('^\\s*(LÄNK|BILD|RUBRIK|STYCKE|LISTA|NUMMER|CITAT|TEXT|HTML|KOD|RUTA)', line)",
"def is_inline_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"inline\"",
"def _is_before_body(tag):\n next_div = tag.find_next_sibling('div')\n next_table = tag.find_next_sibling('table')\n if tag.name != 'div' or (next_div is None and next_table is None):\n return False\n return (re.search(r'^\\s*$', next_div.text) is None or next_table is not None or\n next_div.find('img') is not None)",
"def is_top_block(self):\n return self._parent_block is None",
"def is_body_html(self) -> bool:\n return self._is_body_html",
"def test_body(self):\n child_block = self.block.child_blocks[\"body\"]\n\n self.assertIsInstance(child_block, RichTextBlock)\n self.assertFalse(child_block.required)",
"def is_html(text):\n if text is not None and '<html' in text[:300].lower():\n return True\n return False",
"def is_xml_tree(el: bs4.Tag) -> bool:\n\n return bool(el._is_xml)",
"def is_root(self, el: bs4.Tag) -> bool:\n\n root = self.root and self.root is el # type: ignore[attr-defined]\n if not root:\n parent = self.get_parent(el)\n root = parent is not None and self.is_html and self.is_iframe(parent) # type: ignore[attr-defined]\n return root",
"def is_html(self):\n return self._is_html",
"def is_iframe(self, el: bs4.Tag) -> bool:\n\n return bool(\n ((el.name if self.is_xml_tree(el) else util.lower(el.name)) == 'iframe') and\n self.is_html_tag(el) # type: ignore[attr-defined]\n )",
"def tagIs(self, tag):\n return self.tag == tag",
"def token_descendant_mixed(self, tk):\n\t\t\tif tk.name == \"StartElement\":\n\t\t\t\t# Mark every descendant:\n\t\t\t\tif (tk.content_model in [2,3] and self.desc_mixed_level is None):\n\t\t\t\t\tself.desc_mixed_level = tk.level\n\t\t\t\t\treturn False\n\t\t\t\treturn (self.desc_mixed_level is not None)\n\t\t\telif tk.name == \"EndElement\":\n\t\t\t\t# Stop marking every descendant:\n\t\t\t\tif (tk.level is self.desc_mixed_level):\t\n\t\t\t\t\tself.desc_mixed_level = None\n\t\t\t\telif (self.desc_mixed_level is not None):\n\t\t\t\t\treturn True\n\t\t\t\treturn False\n\t\t\telif (self.desc_mixed_level is None):\n\t\t\t\treturn False\n\t\t\treturn (self.desc_mixed_level >= tk.level-1)",
"def match_html_tags(html_data):\n S = LinkedListStack()\n\n # find the position of the first left angle bracket\n left_bracket = html_data.find(\"<\")\n while left_bracket != -1:\n # find the position of the corresponding right angle bracket\n right_bracket = html_data.find(\">\", left_bracket + 1)\n if right_bracket == -1:\n return False\n\n # get the tag\n tag = html_data[left_bracket + 1:right_bracket]\n\n # determine if it is a closing or opening tag. If it is an opening tag, add it to the stack else check for its corresponding opening tag on the stack\n if tag.startswith(\"/\"):\n if S._is_empty():\n return False\n opening_tag = S.pop().value\n if opening_tag != tag[1:]:\n return False\n else:\n S.push(tag)\n # find the next left bracket\n left_bracket = html_data.find(\"<\", right_bracket + 1)\n # checks if all opening tags have been matched at the end of the script\n return S._is_empty()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a string representing bool value. If parsing was successful, returns True or False. If parsing was not successful, raises ValueError, or, if fail_on_errors=False, returns None. | def parseBoolValue(value, fail_on_errors=True):
if not isinstance(value, string_type):
return bool(value)
elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
return True
elif value.lower() in ('false', 'no', 'n', 'off', '0'):
return False
elif fail_on_errors:
raise ValueError('Cannot parse bool value: %r' % value) | [
"def parse_bool(value):\n return bool({\n 'True': True,\n 'False': False\n }.get(value, value))",
"def str2bool(text: str) -> bool:\n text = text.lower()\n if text == \"true\":\n return True\n elif text == \"false\":\n return False\n else:\n raise ValueError(f\"Cannot parse bool: '{text}'\")",
"def _get_bool(val) -> bool | None:\n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val.strip().lower() == \"true\":\n return True\n elif val.strip().lower() == \"false\":\n return False\n return None",
"def strtobool(val):\n val = val.lower()\n if val in {\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"}:\n return 1\n if val in {\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"}:\n return 0\n raise ValueError(f\"invalid truth value {val!r}\")",
"def bool_converter(val):\n return bool(strtobool(str(val)))",
"def parse_bool(val) -> str:\n return str(val).lower() if isinstance(val, bool) else val",
"def cast_bool(value) -> bool:\n if isinstance(value, bool):\n return value\n if isinstance(value, str):\n if value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n else:\n raise ValueError(\"Ambiguous value of \" + value)\n else:\n raise TypeError(\"Unsupported type of {} with value {}\"\n .format(type(value), value))",
"def get_bool(item):\r\n\r\n if str(item).lower() in ['true','yes','1','t','y']:\r\n return True\r\n if str(item).lower() in ['false', 'no', '0', 'f', 'n']:\r\n return False\r\n raise ValueError(\"'%s' cannot be parsed into a boolean value\" % item)",
"def parse_bool(s, default=False):\n if s is None:\n return default\n return TRUTH.get(s.lower(), default)",
"def convert_boolean(s, l, tokens):\n if tokens[0] == 'True':\n return True\n elif tokens[0] == 'False':\n return False",
"def boolean_from_str(src):\n if src is None:\n return None\n elif src == \"true\":\n return True\n elif src == \"false\":\n return False\n elif src == \"1\":\n return True\n elif src == \"0\":\n return False\n else:\n raise ValueError",
"def _tristate_bool_option(val: str) -> Union[None, bool]:\n val = val and val.strip().lower()\n if not val:\n return None\n if val in \"true 1 yes on\".split():\n return True\n if val in \"false 0 no off\".split():\n return False\n raise ValueError(f\"invalid boolean {val!r} supplied\")",
"def str_to_bool(s):\n return s and not s.lower() == 'false'",
"def Bool(val):\n if type(val) is bool:\n return val\n if isinstance(val, str):\n v = val.upper()\n if v in {'TRUE', 'YES', 'T', 'Y', '1'}:\n return True\n if v in {'FALSE', 'NO', 'F', 'N', '0'}:\n return False\n elif int(val) == float(val):\n v = int(val)\n if v in {0, 1}:\n return bool(v)\n raise ValueError(\"Expected Boolean, but received %s\" % (val,))",
"def BoolGET(val):\n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val == '1' or val.lower() == 'true':\n return True\n elif val == '0' or val.lower() == 'false':\n return False\n raise Invalid(\n 'boolean must be \"1\", \"true\", \"0\", or \"false\" (case insensitive)'\n )",
"def parse_boolean_literal(ast, _variables=None):\n if isinstance(ast, BooleanValueNode):\n return ast.value\n return INVALID",
"def parse_bool_literal(\n tokens: List[lexer.LexerToken],\n) -> Tuple[LiteralToken, List[lexer.LexerToken]]:\n\n # It is either true (optional)\n true, tokens = eat_one(tokens, lexer.KeywordToken, False, \"true\")\n if true:\n return LiteralToken(true), tokens\n\n # Or false (required, because if it would be true we would've returned already)\n false, tokens = eat_one(tokens, lexer.KeywordToken, with_value=\"false\")\n return LiteralToken(false), tokens",
"def test_parse_bool_false_for_non_truthy_values():\n assert_false(eg_config._parse_bool_from_raw_egrc_value(''))\n assert_false(eg_config._parse_bool_from_raw_egrc_value(None))\n assert_false(eg_config._parse_bool_from_raw_egrc_value('false'))\n assert_false(eg_config._parse_bool_from_raw_egrc_value('False'))",
"def _convert_yaml_to_bool(_yaml_bool_value):\n true_values = ['yes', 'true']\n if _yaml_bool_value.lower() in true_values:\n _bool_value = True\n else:\n _bool_value = False\n return _bool_value",
"def str_to_bool(attr: str) -> bool:\n if attr != \"True\" and attr != \"False\":\n raise ValueError(\n \"The attribute is not a string representation of a Python\"\n \"bool ('True' or 'False')\"\n )\n\n bool_attr = json.loads(attr.lower())\n return bool_attr"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves an HTML segment for later reinsertion. Returns a placeholder string that needs to be inserted into the document. | def store(self, html, safe=False):
self.rawHtmlBlocks.append((html, safe))
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder | [
"def save(self, destination):\n if not self.html:\n self.html = file_html(self.current_plot, CDN, None)\n with open(destination, 'w') as output_file:\n output_file.write(self.html)\n self.filename = destination\n return self.filename",
"def save(self):\n html_file='%s/index.html'%self.web_dir\n with open(html_file,'wt') as file:\n file.write(self.doc.render())",
"def savePage(self):\r\n #data = self.title +\"\\n\"+ Cleaner().page_cleaner(self.text)\r\n data = self.title +\"\\n\"+ self.text\r\n\r\n \r\n filename = str(self.pageCounter)+ \".txt\"\r\n file_path = os.path.join(self.directory, filename)\r\n\r\n #print(\"file path: \", file_path)\r\n if not os.path.isdir(self.directory):\r\n os.mkdir(self.directory)\r\n \r\n file = io.open(file_path, \"w\",encoding=\"utf-8\")\r\n file.write(data)\r\n file.close()\r\n\r\n #print(r'Saved file: ',self.pageCounter, \".txt\")\r",
"def save_rst(str, case_name):\n print(str)\n with open(os.path.join(\"img\", \"{}.rst\".format(case_name)), \"w\") as fil:\n fil.write(str)",
"def save(self, *args, **kwargs):\n\n self.body_html = apply_markup_filter(self.body)\n super(LegalPage, self).save(*args, **kwargs)",
"def view(s):\n f = open('blah.html','w')\n f.write(s)\n f.close()",
"def dump_html(self, html):\n\n f = open(self.path, 'a')\n f.write(html)\n f.close()",
"def save(self):\n self.path.write_text(toml.dumps(self.tomldoc))",
"def save(title=None, temporary=False):",
"def writePage(html, filename):\n print('Saving ' + filename)\n with open(filename, 'w') as f:\n f.write(str(html))\n print('-'*50)",
"def save(self, *args, **kwargs):\n\t\tif self.title == None or len(self.title) == 0: self.title = str(self.doc)\n\t\tif self.title.rfind('/') != -1: self.title = self.title[self.title.rfind('/') + 1:]\n\t\tsuper(Document, self).save(*args, **kwargs)",
"def saveNewContent(self, newContent):\n self.saveFromBlocks([newContent])",
"def write(html):\n with open(f\"html_{datetime.today()}.html\", \"w\") as f:\n f.write(html)",
"def save(self, kwargs):\n self.logger.info(\"Saving chart\")\n if kwargs.get(\"fromIRC\"):\n summary = self.summary + \" (!earwigbot)\"\n else:\n if self.shutoff_enabled():\n return\n summary = self.summary\n\n statistics = self.compile_charts()\n\n page = self.site.get_page(self.pagename)\n text = page.get()\n newtext = re.sub(u\"<!-- stat begin -->(.*?)<!-- stat end -->\",\n \"<!-- stat begin -->\\n\" + statistics + \"\\n<!-- stat end -->\",\n text, flags=re.DOTALL)\n if newtext == text:\n self.logger.info(\"Chart unchanged; not saving\")\n return # Don't edit the page if we're not adding anything\n\n newtext = re.sub(\"<!-- sig begin -->(.*?)<!-- sig end -->\",\n \"<!-- sig begin -->~~~ at ~~~~~<!-- sig end -->\",\n newtext)\n page.edit(newtext, summary, minor=True, bot=True)\n self.logger.info(u\"Chart saved to [[{0}]]\".format(page.title))",
"def argSaveText(breakLevel, site, cont, export):\n if site.att is None:\n return filefmt.SegmentedText(\"$Empty$\")\n return site.att.createSaveText(breakLevel, cont, export)",
"def save_tag(self):\n self.save()",
"def save( self ):\n\n # load existing comments\n c = jpeg.getComments( self.fname )\n\n # Find previous metadata and get comments that precede and follow\n # our metadata (so we don't nuke somebody else's comments).\n before = ''\n after = ''\n i = c.find( BEGIN_TAG )\n if i == -1:\n # No previous tags\n before = c\n\n else:\n # Get before\n before = c[:i]\n\n # And get after\n i = c.find( END_TAG )\n assert i != -1, \"Bad metadata\"\n after = c[i+len( END_TAG ):]\n\n # Generate metadata block\n meta = BEGIN_TAG\n for ( name, value ) in self.items():\n meta = '%s<%s>%s</%s>' % ( meta, name, value, name )\n meta = '%s%s' % ( meta, END_TAG )\n\n # Write comments back out\n jpeg.setComments( '%s%s%s' % ( before, meta, after ), self.fname )",
"def PlaceAtTag(self, tag, newText):\n \n index = self.text.find(\"<!--tag:{}-->\".format(tag))\n if index > -1:\n newStr = self.text[:index]\n newStr += newText\n newStr += self.text[index:]\n self.text = newStr\n logging.debug(\"Succesfully placed string in file.\")\n else:\n logging.debug(\"Could not find tag {0} in {1}\".format(tag, \n self.template))",
"def _insert_html(self, cursor, html):\n cursor.beginEditBlock()\n cursor.insertHtml(html)\n\n # After inserting HTML, the text document \"remembers\" it's in \"html\n # mode\", which means that subsequent calls adding plain text will result\n # in unwanted formatting, lost tab characters, etc. The following code\n # hacks around this behavior, which I consider to be a bug in Qt, by\n # (crudely) resetting the document's style state.\n cursor.movePosition(QtGui.QTextCursor.Left,\n QtGui.QTextCursor.KeepAnchor)\n if cursor.selection().toPlainText() == ' ':\n cursor.removeSelectedText()\n else:\n cursor.movePosition(QtGui.QTextCursor.Right)\n cursor.insertText(' ', QtGui.QTextCharFormat())\n cursor.endEditBlock()",
"def _save_button_clicked(self):\n\n fileName, _ = QtWidgets.QFileDialog.getSaveFileName(self,\"Save File\",UWBsim.BASE_DIR,\"All Files (*);;YAML files (*.yaml)\")\n \n yaml_dump = {}\n for i in range(len(self.anchor_positions)):\n key = str(i)\n yaml_dump[key] = {}\n yaml_dump[key]['x'] = str(self.anchorLineEdits[i][0].text())\n yaml_dump[key]['y'] = str(self.anchorLineEdits[i][1].text())\n yaml_dump[key]['z'] = str(self.anchorLineEdits[i][2].text())\n\n if not fileName.endswith('.yaml'):\n fileName = fileName + '.yaml'\n \n with open(fileName, 'w') as f:\n yaml.safe_dump(yaml_dump, f)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Store tag data and return a placeholder. | def store_tag(self, tag, attrs, left_index, right_index):
self.tag_data.append({'tag': tag, 'attrs': attrs,
'left_index': left_index,
'right_index': right_index})
placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
self.tag_counter += 1 # equal to the tag's index in self.tag_data
return placeholder | [
"def add_new(self, tag, VR, value):\n data_element = DataElement(tag, VR, value)\n self[data_element.tag] = data_element # use data_element.tag since DataElement verified it",
"def _save(self, data, etag):\n cache.set(self.ETAG_KEY, etag, self._cache_timeout)\n cache.set(self.DATA_KEY, data, self._cache_timeout)",
"def save_tag(self):\n self.save()",
"def set(self, tag: str, value: str) -> None:",
"def store_one(self, data: dict):\n raise NotImplementedError(\"`store` not implemented in sub-handler: {}\".format(self))",
"def store_tweet_tag(tweet_id, hashtag):\n dab = db.db_conection()\n cursor = dab.cursor()\n try:\n cursor = dab.cursor()\n insert_query = \"INSERT INTO `tweet_tags` (`tweet_id`, `tag`) VALUES (%s, %s)\"\n cursor.execute(insert_query, (tweet_id, str(hashtag)))\n dab.commit()\n cursor.close()\n dab.close()\n except pymysql.Error as mysql_err:\n logging.info(\"osint-tw - \", mysql_err)\n dab.close()\n return",
"def register_data(self):\n raise NotImplementedError",
"def PlaceAtTag(self, tag, newText):\n \n index = self.text.find(\"<!--tag:{}-->\".format(tag))\n if index > -1:\n newStr = self.text[:index]\n newStr += newText\n newStr += self.text[index:]\n self.text = newStr\n logging.debug(\"Succesfully placed string in file.\")\n else:\n logging.debug(\"Could not find tag {0} in {1}\".format(tag, \n self.template))",
"def _save_to_buffer(self, data):\n \n # We're not interested in data that isn't in a tag.\n if not self.xml_buffer:\n return\n \n self.xml_buffer[-1] += data",
"def get_or_create_tag(cls, session, tagname):\n tag = cls.get_tag(tagname)\n if tag:\n return tag\n tag = cls(tag_string=tagname)\n session.add(tag)\n session.commit()\n return tag",
"def add_tag(self, tag: str, content):\n if tag in self:\n if type(self[tag]) == type(content):\n self[tag] = [self[tag], content]\n elif type(self[tag]) is list:\n self[tag].append(content)\n else:\n print(\"Wrong tag <{}> content\".format(tag))\n exit(-1)\n else:\n self[tag] = content",
"def set_data(self, data):\n if data is None:\n gbp.log.debug(\"BUG: trying to store 'None', not allowed\")\n data = \"\"\n self._data = data",
"def store_skill(skill):\n # Retrieve the tag for the skill, or fall back to 'unnamed'.\n skill_tag = skill.get(\"tag\", \"unnamed\")\n\n if skill_tag in skills and skills[skill_tag][\"path\"] != skill[\"path\"]:\n if \"-\" in skill_tag:\n prefix, _ = skill_tag.split(\"-\")\n skill_tag = f\"{prefix}-{duplicate_suffix(prefix)}\"\n else:\n skill_tag = f\"{skill_tag}-{duplicate_suffix(skill_tag)}\"\n\n # Set the unique tag:\n skill[\"tag\"] = skill_tag\n\n # Store the skill\n skills[skill_tag] = skill\n\n # Return this (now definitely unique) tag.\n return skill_tag",
"def setTag(self, t):\r\n self.tag = t",
"def add_placeholder(self, name, dtype, shape=None):\r\n\r\n overwrite_shape = self.get_input_shape(name)\r\n if overwrite_shape:\r\n shape = overwrite_shape\r\n placerholder_tensor = self._network.add_input(name=name, dtype=TRTNetworkBuilder._to_dtype(dtype), shape=shape)\r\n self._remember_op_output(placerholder_tensor, name)\r\n return placerholder_tensor",
"def create_tag(self, key=None, category=None, data=None):\n global _Tag\n if not _Tag:\n from src.typeclasses.models import Tag as _Tag\n return _Tag.objects.create_tag(key=key, category=category, data=data)",
"def do_placeholder(parser, token):\n name, params = parse_placeholder(parser, token)\n return PlaceholderNode(name, **params)",
"def append_data(self, key: str, data):\n self.__storage[key] = data",
"def get_or_create_tag_value(self, project_id, key, value, **kwargs):\n raise NotImplementedError",
"def create_tag(self, key=None, category=None, data=None):\r\n global _Tag\r\n if not _Tag:\r\n from src.typeclasses.models import Tag as _Tag\r\n return _Tag.objects.create_tag(key=key, category=category, data=data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run Markdown from the command line. | def run():
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(2)
logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler())
# Run
markdown.markdownFromFile(**options) | [
"def run_markdown(cpp_filename):\n basename, ext = os.path.splitext(cpp_filename)\n # https://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output\n # https://stackoverflow.com/questions/35160256/how-do-i-output-lists-as-a-table-in-jupyter-notebook\n\n # Run executable while capturing output\n result = subprocess.run(\n [os.path.join(os.curdir, basename)],\n stdout=subprocess.PIPE,\n check=True,\n )\n # present output as a markdown\n disp.display(disp.Markdown(result.stdout.decode()))",
"def test_cli_md_nb():\n nb_s = _podoc('--no-pandoc -f markdown -t notebook',\n stdin='hello *world*')\n # From dict string to notebook.\n nb = Podoc(with_pandoc=False).loads(nb_s, 'notebook')\n assert nb.cells[0].cell_type == 'markdown'\n assert nb.cells[0].source == 'hello *world*'",
"def jupyter_to_markdown(path):\n\n args = ['jupyter', 'nbconvert', '--to', 'html', path]\n child = subprocess.call(args)",
"def main():\n\n src_dir = Path(\"src\")\n build_dir = Path(\"build\")\n # Get the pweave files from the source directory as strings\n files = [f.name for f in src_dir.glob(\"*.pmd\")]\n\n for f in files:\n # Turn python markdown into HTML\n weave(\n src_dir.joinpath(f),\n output=build_dir.joinpath(f.replace(\"pmd\", \"html\")),\n cache=True,\n )",
"def main():\n\n parser = OptionParser(\n description=\"Embiggen embiggens your HTML generation\")\n parser.add_option('--indent-string', default='\\t',\n help='the string to prepend at each indentation level',\n metavar='INDENT_STRING')\n parser.add_option('--close-tag-guides', default=False, action='store_true',\n help='add comments at the end of divs')\n (options, _) = parser.parse_args()\n\n for line in sys.stdin:\n print embiggen(line, options.indent_string, '\\n',\n options.close_tag_guides)",
"def test_markup_markdown(self):\r\n\r\n a = self.new_article('Demo', '''A First Level Header\r\n====================\r\n\r\nA Second Level Header\r\n---------------------\r\n\r\nNow is the time for all good men to come to\r\nthe aid of their country. This is just a\r\nregular paragraph.''', markup=MARKUP_MARKDOWN)\r\n a.do_render_markup()\r\n\r\n print a.rendered_content",
"def parse(text):\n md_extensions = getattr(settings, \"DOCCOMMENT_MARKDOWN_EXTENSIONS\", DEFAULT_EXTENSIONS)\n md_safemode = getattr(settings, \"DOCCOMMENT_MARKDOWN_SAFEMODE\", DEFAULT_SAFEMODE)\n return markdown(text, md_extensions, safe_mode=md_safemode)",
"def markdown(text):\n\n def pygments(m):\n return highlight(m.group(2), get_lexer_by_name(m.group(1)), HtmlFormatter())\n\n # add newline after a begin code tag\n text = text.replace(\"<code>\", \"<code>\\n\")\n # Pygments where <code> tags have a class\n text = re.sub(re.compile('<code class=\"([^\"]*)\">((.|\\n)*?)</code>'), pygments, text)\n # apply markdown\n return markup.markdown(text)",
"def markdown_to_docx(path):\n\n \"\"\"pandoc titanic_transform.html -s -o titanic.docx\"\"\"\n\n \"\"\"titanic_transform.html -f markdown -t html | pandoc -f html -t docx -o titanic.docx\"\"\"\n args = ['pandoc', path, '-s', '-o', './jupyter/titanic.docx']\n child = subprocess.call(args)",
"def test_markdown(self):\n self.session.post.return_value = unittest.mock.Mock(ok=True)\n text = \"##Hello\"\n mode = \"markdown\"\n self.instance.markdown(text=text, mode=mode)\n self.post_called_with(\n url_for(\"markdown\"), data={\"text\": text, \"mode\": mode}, headers={}\n )",
"def parse_markdown_readme():\n # Attempt to run pandoc on markdown file\n import subprocess\n try:\n subprocess.call(\n ['pandoc', '-t', 'rst', '-o', 'README.rst', 'README.md']\n )\n except OSError:\n return LONG_DESCRIPTION\n\n # Attempt to load output\n try:\n readme = open(join(dirname(__file__), 'README.rst'))\n except IOError:\n return LONG_DESCRIPTION\n return readme.read()",
"def apply_markdown( request ):\r\n markup = markdown( request.POST['data'], extensions=['codehilite'] )\r\n return render_to_response( 'utils/markup/markdown/preview.html',\r\n {'preview':markup},\r\n context_instance=RequestContext(request))",
"def render_readme(dir_name):\n print(\"Rendering new README.md file in {} ...\".format(dir_name))\n cmd = 'makeDoc.py {}'.format(dir_name)\n os.system(cmd)",
"def markdown_html(text):\n return markdown.markdown(text)",
"def publish_markdown(filename):\n # suppose post `title` is just `filename` without prefix-dirpath and suffix-extension\n title = os.path.splitext(os.path.basename(filename))[0]\n # suppose markdown-file and images are in the same folder\n img_base_path = os.path.dirname(filename)\n\n markdown_file = codecs.open(filename, 'r', encoding='utf-8')\n text = markdown_file.read()\n description = markdown.markdown(text, extensions=['extra', 'toc'])\n\n # wrapping `description` in div#markdown is just my favor, it's not necessary\n description = '<div id=\"markdown\">\\n\\n%s\\n\\n</div>' % description\n\n post.new(title, description, img_base_path)",
"def test_generate_terminal_markdown():\n assert gen_term.main() is True",
"def launch_dream_latex():\n cmd = \"/usr/local/bin/mvim /Users/Fandekasp/Documents/testdream.tex --cmd\"\n logger.info(\"Vim called\")\n logger.debug(cmd.split() + [':se lines=40 columns=80'])\n subprocess.call(cmd.split() + [':se lines=40 columns=80'])",
"def run(self, md_file, save_path='', save=True):\n self.MD_FILE = md_file\n if save_path == '':\n # use the same dir as md_file\n self.SAVE_PATH = os.path.dirname(os.path.abspath(self.MD_FILE))\n else:\n self.SAVE_PATH = save_path\n # read markdown file into str\n try:\n with open(self.MD_FILE, 'r') as f:\n md = f.read()\n except:\n # for \"utf-8 with dom\" format\n with open(self.MD_FILE, 'r', encoding='utf-8-sig') as f:\n md = f.read()\n # findall all  commands\n url_commands = re.findall(r\"!\\[[\\s\\S]*?\\]\\(.+?\\)\", md)\n # download each img and change the  commands to new REPLACE_MODE format\n for id, each in enumerate(url_commands):\n id = str(id)\n url = re.findall(r\"!\\[[\\s\\S]*?\\]\\((.+?)\\)\", each)\n if url:\n url = url[0]\n print(\"Downloading: {}\".format(url))\n\n else:\n raise ValueError('Err in matching url in {}'.format(each))\n save_name, file_type = self._pic_download(\n url, self.SAVE_PATH, id)\n print('Done')\n replace_str = self.REPLACE_MODE.format(id+file_type)\n print('Replacing {} to {}'.format(each, replace_str))\n md = md.replace(each, replace_str)\n\n # save the new markdown file\n print('#'*80)\n print('Saving modifieded markdown file into {}'.format(\n os.path.join(self.SAVE_PATH, '_'+self.MD_FILE)))\n if save:\n with codecs.open(os.path.join(self.SAVE_PATH, '_'+self.MD_FILE), \"w\", \"utf-8\") as f:\n f.write(md)\n print('Done!')\n return md",
"async def markdown(self):\n return await self.wiki.http.get_markdown(self.title)",
"def main():\n\n # file-specific constants\n section_header = 'Python Scikit-learn Models'\n table_header_list = ['Model Name', 'Model Description', 'Data Name',\n 'Data Description', 'Performance Metric 1',\n 'Performance Metric 2']\n\n # determine output markdown filename from current filename\n current_path = re.split(r'[\\\\/]', inspect.getfile(inspect.currentframe()))\n current_fname_prefix = current_path[-1].split('.')[0]\n out_txt_fname = current_fname_prefix + '.txt'\n\n # run benchmark models\n models = run_models()\n\n # generate markdown\n gen_table_md(models, section_header, table_header_list, out_txt_fname)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode codec that converts Unicode characters into named entities (where the names are known), or failing that, numerical entities. | def named_entities_codec(text):
if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):
s = []
for c in text.object[text.start:text.end]:
if ord(c) in codepoint2name:
s.append(u'&%s;' % codepoint2name[ord(c)])
else:
s.append(u'&#%s;' % ord(c))
return ''.join(s), text.end
else:
raise TypeError("Can't handle %s" % text.__name__) | [
"def named_entities_codec(text):\r\n \r\n if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):\r\n s = []\r\n for c in text.object[text.start:text.end]:\r\n if ord(c) in codepoint2name:\r\n s.append('&{};'.format(codepoint2name[ord(c)]))\r\n else:\r\n s.append('&#{};'.format(ord(c)))\r\n return ''.join(s), text.end\r\n else:\r\n raise TypeError(\"Can't handle {}\".format(text.__name__))",
"def _unicode_encode(self, value):\n splits = self.high_codepoints_re.split(value)\n enc_value = b''\n str_len = 0\n for s in splits:\n if self.high_codepoints_re.match(s):\n str_len += 2\n enc_value += self._encode_to_surrogate_pair(s)\n else:\n str_len += len(s)\n enc_value += s.encode('utf-8')\n return str_len, enc_value",
"def encodeString():\n pass",
"def encode_character(self, immune, char):\r\n \r\n # Check for immune\r\n if char in immune:\r\n return char\r\n \r\n ord_char = ord(char)\r\n \r\n # Only look at 8-bit \r\n if not codec.is_8bit(ord_char):\r\n return char\r\n \r\n # Pass alphanumerics\r\n if char.isalnum(): \r\n return char\r\n \r\n # Check for illegal characters\r\n if (codec.is_control_char(ord_char) and \r\n char != \"\\t\" and\r\n char != \"\\n\" and\r\n char != \"\\r\"):\r\n return \" \"\r\n \r\n # Check if there's a defined entity\r\n entity_name = self.entity_values_to_names.get(ord_char, None)\r\n if entity_name is not None:\r\n return \"&\" + entity_name + \";\"\r\n \r\n # Return the hex entity as suggested in the spec\r\n hex_str = codec.get_hex_for_char(ord_char).lower()\r\n return \"&#x\" + hex_str + \";\"",
"def unicode_from_unknown(s) :\r\n try :\r\n return unicode(s)\r\n except :\r\n pass\r\n\r\n return coerce_to_ascii(s)",
"def convert_special_chars_to_eng(name):\n return name.translate(SPECIAL_TO_ENG)",
"def unicode_encode_error():\n try:\n '€'.encode('ascii')\n except UnicodeEncodeError:\n return \"can't encode this character to ascii\"",
"def norwegian_ascii(unicode_str):\n unicode_str = re.sub(r\"ø\", \"oe\", unicode_str, flags=re.IGNORECASE)\n unicode_str = re.sub(r\"æ\", \"ae\", unicode_str, flags=re.IGNORECASE)\n unicode_str = re.sub(r\"å\", \"aa\", unicode_str, flags=re.IGNORECASE)\n return unicode_str.encode(\"ascii\", \"ignore\").decode()",
"def make_unicode(text):\n return text.encode( encoding=\"utf-8\")",
"def sanitize(w):\n\n # print w\n\n map = {'æ': 'ae',\n 'ø': 'o',\n '¨': 'o',\n 'ß': 'ss',\n 'Ø': 'o',\n '\\xef\\xac\\x80': 'ff',\n '\\xef\\xac\\x81': 'fi',\n '\\xef\\xac\\x82': 'fl'}\n\n # This replaces funny chars in map\n for char, replace_char in map.items():\n w = re.sub(char, replace_char, w)\n\n # w = unicode(w, encoding='latin-1')\n # w = str(w, encoding=\"utf-8\")\n\n # This gets rite of accents\n w = ''.join((c for c in unicodedata.normalize('NFD', w) if unicodedata.category(c) != 'Mn'))\n\n return w",
"def name_that_character(c, encoding):\n try:\n uchr = decode(c, encoding)\n except UnicodeDecodeError:\n return None\n return unicodedata.name(uchr, None)",
"def encode_filename(filename):\n if isinstance(filename, unicode):\n if os.path.supports_unicode_filenames:\n return filename\n else:\n return filename.encode(_io_encoding, 'replace')\n else:\n return filename",
"def unicode_error():\n try:\n 'é'.encode('latin-1').decode('ascii')\n except UnicodeError:\n return \"can't encode or decode\"",
"def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ':\n self.trans[char] = 'A'\n for char in 'ȀǞ':\n self.trans[char] = 'Ä'\n self.trans['Ǻ'] = 'Å'\n self.trans['Ä'] = 'Ae'\n self.trans['Å'] = 'Aa'\n for char in 'àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ':\n self.trans[char] = 'a'\n for char in 'ȁǟ':\n self.trans[char] = 'ä'\n self.trans['ǻ'] = 'å'\n self.trans['ä'] = 'ae'\n self.trans['å'] = 'aa'\n for char in 'ḂḄḆƁƂ':\n self.trans[char] = 'B'\n for char in 'ḃḅḇƀɓƃ':\n self.trans[char] = 'b'\n for char in 'ĆĈĊÇČƇ':\n self.trans[char] = 'C'\n for char in 'ćĉċçčƈȼ':\n self.trans[char] = 'c'\n self.trans['Ḉ'] = 'Ç'\n self.trans['ḉ'] = 'ç'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ĎḊḌḎḐḒĐƉƊƋ':\n self.trans[char] = 'D'\n for char in 'ďḋḍḏḑḓđɖɗƌ':\n self.trans[char] = 'd'\n for char in 'ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ':\n self.trans[char] = 'E'\n for char in 'ỀẾỄỆỂ':\n self.trans[char] = 'Ê'\n for char in 'èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ':\n self.trans[char] = 'e'\n for char in 'ềếễệể':\n self.trans[char] = 'ê'\n for char in 'ḞƑ':\n self.trans[char] = 'F'\n for char in 'ḟƒ':\n self.trans[char] = 'f'\n for char in 'ǴḠĞĠĢǦǤƓ':\n self.trans[char] = 'G'\n for char in 'ǵḡğġģǧǥɠ':\n self.trans[char] = 'g'\n self.trans['Ĝ'] = 'Gx'\n self.trans['ĝ'] = 'gx'\n for char in 'ḢḤḦȞḨḪH̱ĦǶ':\n self.trans[char] = 'H'\n for char in 'ḣḥḧȟḩḫ̱ẖħƕ':\n self.trans[char] = 'h'\n for char in 'IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ':\n self.trans[char] = 'I'\n for char in 'ıìȉíîĩḭïḯīĭȋįǐiịỉɨ':\n self.trans[char] = 'i'\n for char in 'ĴJ':\n self.trans[char] = 'J'\n for char in 'ɟĵ̌ǰ':\n self.trans[char] = 'j'\n for char in 'ḰǨĶḲḴƘ':\n self.trans[char] = 'K'\n for char in 'ḱǩķḳḵƙ':\n self.trans[char] = 'k'\n for char in 'ĹĻĽḶḸḺḼȽŁ':\n self.trans[char] = 'L'\n for char in 'ĺļľḷḹḻḽƚłɫ':\n self.trans[char] = 'l'\n for char in 'ḾṀṂ':\n self.trans[char] = 'M'\n for char in 'ḿṁṃɱ':\n self.trans[char] = 'm'\n for char in 'ǸŃÑŅŇṄṆṈṊŊƝɲȠ':\n self.trans[char] = 'N'\n for char in 'ǹńñņňṅṇṉṋŋɲƞ':\n self.trans[char] = 'n'\n for char in 'ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ':\n self.trans[char] = 'O'\n for char in 'òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ':\n self.trans[char] = 'o'\n for char in 'ȌŐȪ':\n self.trans[char] = 'Ö'\n for char in 'ȍőȫ':\n self.trans[char] = 'ö'\n for char in 'ỒỐỖỘỔȎ':\n self.trans[char] = 'Ô'\n for char in 'ồốỗộổȏ':\n self.trans[char] = 'ô'\n for char in 'ṔṖƤ':\n self.trans[char] = 'P'\n for char in 'ṕṗƥ':\n self.trans[char] = 'p'\n self.trans['ᵽ'] = 'q'\n for char in 'ȐŔŖŘȒṘṚṜṞ':\n self.trans[char] = 'R'\n for char in 'ȑŕŗřȓṙṛṝṟɽ':\n self.trans[char] = 'r'\n for char in 'ŚṤŞȘŠṦṠṢṨ':\n self.trans[char] = 'S'\n for char in 'śṥşșšṧṡṣṩȿ':\n self.trans[char] = 's'\n self.trans['Ŝ'] = 'Sx'\n self.trans['ŝ'] = 'sx'\n for char in 'ŢȚŤṪṬṮṰŦƬƮ':\n self.trans[char] = 'T'\n for char in 'ţțťṫṭṯṱŧȾƭʈ':\n self.trans[char] = 't'\n for char in 'ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ':\n self.trans[char] = 'U'\n for char in 'ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ':\n self.trans[char] = 'u'\n for char in 'ȔŰǛǗǕǙ':\n self.trans[char] = 'Ü'\n for char in 'ȕűǜǘǖǚ':\n self.trans[char] = 'ü'\n self.trans['Û'] = 'Ux'\n self.trans['û'] = 'ux'\n self.trans['Ȗ'] = 'Û'\n self.trans['ȗ'] = 'û'\n self.trans['Ừ'] = 'Ù'\n self.trans['ừ'] = 'ù'\n self.trans['Ứ'] = 'Ú'\n self.trans['ứ'] = 'ú'\n for char in 'ṼṾ':\n self.trans[char] = 'V'\n for char in 'ṽṿ':\n self.trans[char] = 'v'\n for char in 'ẀẂŴẄẆẈ':\n self.trans[char] = 'W'\n for char in 'ẁẃŵẅẇẉ':\n self.trans[char] = 'w'\n for char in 'ẊẌ':\n self.trans[char] = 'X'\n for char in 'ẋẍ':\n self.trans[char] = 'x'\n for char in 'ỲÝŶŸỸȲẎỴỶƳ':\n self.trans[char] = 'Y'\n for char in 'ỳýŷÿỹȳẏỵỷƴ':\n self.trans[char] = 'y'\n for char in 'ŹẐŻẒŽẔƵȤ':\n self.trans[char] = 'Z'\n for char in 'źẑżẓžẕƶȥ':\n self.trans[char] = 'z'\n self.trans['ɀ'] = 'zv'\n\n # Latin: extended Latin alphabet\n self.trans['ɑ'] = 'a'\n for char in 'ÆǼǢ':\n self.trans[char] = 'AE'\n for char in 'æǽǣ':\n self.trans[char] = 'ae'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ƎƏƐ':\n self.trans[char] = 'E'\n for char in 'ǝəɛ':\n self.trans[char] = 'e'\n for char in 'ƔƢ':\n self.trans[char] = 'G'\n for char in 'ᵷɣƣᵹ':\n self.trans[char] = 'g'\n self.trans['Ƅ'] = 'H'\n self.trans['ƅ'] = 'h'\n self.trans['Ƕ'] = 'Wh'\n self.trans['ƕ'] = 'wh'\n self.trans['Ɩ'] = 'I'\n self.trans['ɩ'] = 'i'\n self.trans['Ŋ'] = 'Ng'\n self.trans['ŋ'] = 'ng'\n self.trans['Œ'] = 'OE'\n self.trans['œ'] = 'oe'\n self.trans['Ɔ'] = 'O'\n self.trans['ɔ'] = 'o'\n self.trans['Ȣ'] = 'Ou'\n self.trans['ȣ'] = 'ou'\n self.trans['Ƽ'] = 'Q'\n for char in 'ĸƽ':\n self.trans[char] = 'q'\n self.trans['ȹ'] = 'qp'\n self.trans[''] = 'r'\n self.trans['ſ'] = 's'\n self.trans['ß'] = 'ss'\n self.trans['Ʃ'] = 'Sh'\n for char in 'ʃᶋ':\n self.trans[char] = 'sh'\n self.trans['Ʉ'] = 'U'\n self.trans['ʉ'] = 'u'\n self.trans['Ʌ'] = 'V'\n self.trans['ʌ'] = 'v'\n for char in 'ƜǷ':\n self.trans[char] = 'W'\n for char in 'ɯƿ':\n self.trans[char] = 'w'\n self.trans['Ȝ'] = 'Y'\n self.trans['ȝ'] = 'y'\n self.trans['IJ'] = 'IJ'\n self.trans['ij'] = 'ij'\n self.trans['Ƨ'] = 'Z'\n for char in 'ʮƨ':\n self.trans[char] = 'z'\n self.trans['Ʒ'] = 'Zh'\n self.trans['ʒ'] = 'zh'\n self.trans['Ǯ'] = 'Dzh'\n self.trans['ǯ'] = 'dzh'\n for char in 'ƸƹʔˀɁɂ':\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in 'Cʗǃ':\n self.trans[char] = '!'\n\n # Punctuation and typography\n for char in '«»“”„¨':\n self.trans[char] = u'\"'\n for char in '‘’′':\n self.trans[char] = u\"'\"\n self.trans['•'] = '*'\n self.trans['@'] = '(at)'\n self.trans['¤'] = '$'\n self.trans['¢'] = 'c'\n self.trans['€'] = 'E'\n self.trans['£'] = 'L'\n self.trans['¥'] = 'yen'\n self.trans['†'] = '+'\n self.trans['‡'] = '++'\n self.trans['°'] = ':'\n self.trans['¡'] = '!'\n self.trans['¿'] = '?'\n self.trans['‰'] = 'o/oo'\n self.trans['‱'] = 'o/ooo'\n for char in '¶§':\n self.trans[char] = '>'\n self.trans['…'] = '...'\n for char in '‒–—―':\n self.trans[char] = '-'\n self.trans['·'] = ' '\n self.trans['¦'] = '|'\n self.trans['⁂'] = '***'\n self.trans['◊'] = '<>'\n self.trans['‽'] = '?!'\n self.trans['؟'] = ';-)'\n self.trans['¹'] = '1'\n self.trans['²'] = '2'\n self.trans['³'] = '3'\n\n # Cyrillic\n self.trans.update({'А': 'A', 'а': 'a', 'Б': 'B', 'б': 'b',\n 'В': 'V', 'в': 'v', 'Г': 'G', 'г': 'g',\n 'Д': 'D', 'д': 'd', 'Е': 'E', 'е': 'e',\n 'Ж': 'Zh', 'ж': 'zh', 'З': 'Z', 'з': 'z',\n 'И': 'I', 'и': 'i', 'Й': 'J', 'й': 'j',\n 'К': 'K', 'к': 'k', 'Л': 'L', 'л': 'l',\n 'М': 'M', 'м': 'm', 'Н': 'N', 'н': 'n',\n 'О': 'O', 'о': 'o', 'П': 'P', 'п': 'p',\n 'Р': 'R', 'р': 'r', 'С': 'S', 'с': 's',\n 'Т': 'T', 'т': 't', 'У': 'U', 'у': 'u',\n 'Ф': 'F', 'ф': 'f', 'х': 'kh', 'Ц': 'C',\n 'ц': 'c', 'Ч': 'Ch', 'ч': 'ch', 'Ш': 'Sh',\n 'ш': 'sh', 'Щ': 'Shch', 'щ': 'shch', 'Ь': \"'\",\n 'ь': \"'\", 'Ъ': '\"', 'ъ': '\"', 'Ю': 'Yu',\n 'ю': 'yu', 'Я': 'Ya', 'я': 'ya', 'Х': 'Kh',\n 'Χ': 'Kh'})\n\n # Additional Cyrillic letters, most occuring in only a few languages\n self.trans.update({\n 'Ы': 'Y', 'ы': 'y', 'Ё': 'Ë', 'ё': 'ë',\n 'Э': 'È', 'Ѐ': 'È', 'э': 'è', 'ѐ': 'è',\n 'І': 'I', 'і': 'i', 'Ї': 'Ji', 'ї': 'ji',\n 'Є': 'Je', 'є': 'je', 'Ґ': 'G', 'Ҝ': 'G',\n 'ґ': 'g', 'ҝ': 'g', 'Ђ': 'Dj', 'ђ': 'dj',\n 'Љ': 'Lj', 'љ': 'lj',\n 'Њ': 'Nj', 'њ': 'nj', 'Ћ': 'Cj', 'ћ': 'cj',\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n 'Ќ': 'Kj', 'ќ': 'kj', 'Ӣ': 'Ii', 'ӣ': 'ii',\n 'Ҳ': 'H', 'ҳ': 'h',\n 'Ҷ': 'Dz', 'ҷ': 'dz', 'Ө': 'Ô', 'Ӫ': 'Ô',\n 'ө': 'ô', 'ӫ': 'ô', 'Ү': 'Y', 'ү': 'y', 'Һ': 'H',\n 'һ': 'h', 'Ә': 'AE', 'Ӕ': 'AE', 'ә': 'ae',\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n 'ѝ': 'ì', 'Ѝ': 'Ì', 'Ӑ': 'A', 'ă': 'a', 'Ӓ': 'Ä',\n 'Ҽ': 'Ts', 'Ҿ': 'Ts', 'ҽ': 'ts', 'ҿ': 'ts',\n 'Ҙ': 'Dh', 'ҙ': 'dh', 'Ӏ': '', 'ӏ': '', 'Ӆ': 'L',\n 'ӆ': 'l', 'Ӎ': 'M', 'ӎ': 'm', 'Ӧ': 'Ö', 'ӧ': 'ö',\n 'Ҩ': 'u', 'ҩ': 'u', 'Ҧ': 'Ph', 'ҧ': 'ph', 'Ҏ': 'R',\n 'ҏ': 'r', 'Ҫ': 'Th', 'ҫ': 'th', 'Ҭ': 'T', 'ҭ': 't',\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n 'ӹ': 'u', 'Ҵ': 'Tts', 'ҵ': 'tts', 'Ӵ': 'Ch', 'ӵ': 'ch'})\n\n for char in 'ЈӤҊ':\n self.trans[char] = 'J'\n for char in 'јӥҋ':\n self.trans[char] = 'j'\n for char in 'ЏӁӜҶ':\n self.trans[char] = 'Dzh'\n for char in 'џӂӝҷ':\n self.trans[char] = 'dzh'\n for char in 'ЅӞӠӋҸ':\n self.trans[char] = 'Dz'\n for char in 'ѕӟӡӌҹ':\n self.trans[char] = 'dz'\n for char in 'ҒӶҔ':\n self.trans[char] = 'G'\n for char in 'ғӷҕ':\n self.trans[char] = 'g'\n for char in 'ҚҞҠӃ':\n self.trans[char] = 'Q'\n for char in 'қҟҡӄ':\n self.trans[char] = 'q'\n for char in 'ҢҤӉӇ':\n self.trans[char] = 'Ng'\n for char in 'ңҥӊӈ':\n self.trans[char] = 'ng'\n for char in 'ӖѢҌ':\n self.trans[char] = 'E'\n for char in 'ӗѣҍ':\n self.trans[char] = 'e'\n for char in 'ӲӰҮ':\n self.trans[char] = 'Ü'\n for char in 'ӳӱү':\n self.trans[char] = 'ü'\n\n # Archaic Cyrillic letters\n self.trans.update({\n 'Ѹ': 'Ou', 'ѹ': 'ou', 'Ѡ': 'O', 'Ѻ': 'O', 'ѡ': 'o',\n 'ѻ': 'o', 'Ѿ': 'Ot', 'ѿ': 'ot', 'Ѣ': 'E', 'ѣ': 'e',\n 'Ѥ': 'Ei', 'Ѧ': 'Ei', 'ѥ': 'ei', 'ѧ': 'ei', 'Ѫ': 'Ai',\n 'ѫ': 'ai', 'Ѯ': 'X', 'ѯ': 'x', 'Ѱ': 'Ps', 'ѱ': 'ps',\n 'Ѳ': 'Th', 'ѳ': 'th', 'Ѵ': 'Ü', 'Ѷ': 'Ü', 'ѵ': 'ü'})\n\n # Hebrew alphabet\n for char in 'אע':\n self.trans[char] = u\"'\"\n self.trans['ב'] = 'b'\n self.trans['ג'] = 'g'\n self.trans['ד'] = 'd'\n self.trans['ה'] = 'h'\n self.trans['ו'] = 'v'\n self.trans['ז'] = 'z'\n self.trans['ח'] = 'kh'\n self.trans['ט'] = 't'\n self.trans['י'] = 'y'\n for char in 'ךכ':\n self.trans[char] = 'k'\n self.trans['ל'] = 'l'\n for char in 'םמ':\n self.trans[char] = 'm'\n for char in 'ןנ':\n self.trans[char] = 'n'\n self.trans['ס'] = 's'\n for char in 'ףפ':\n self.trans[char] = 'ph'\n for char in 'ץצ':\n self.trans[char] = 'ts'\n self.trans['ק'] = 'q'\n self.trans['ר'] = 'r'\n self.trans['ש'] = 'sh'\n self.trans['ת'] = 'th'\n\n # Arab alphabet\n for char in 'اﺍﺎ':\n self.trans[char] = 'a'\n for char in 'بﺏﺐﺒﺑ':\n self.trans[char] = 'b'\n for char in 'تﺕﺖﺘﺗ':\n self.trans[char] = 't'\n for char in 'ثﺙﺚﺜﺛ':\n self.trans[char] = 'th'\n for char in 'جﺝﺞﺠﺟ':\n self.trans[char] = 'g'\n for char in 'حﺡﺢﺤﺣ':\n self.trans[char] = 'h'\n for char in 'خﺥﺦﺨﺧ':\n self.trans[char] = 'kh'\n for char in 'دﺩﺪ':\n self.trans[char] = 'd'\n for char in 'ذﺫﺬ':\n self.trans[char] = 'dh'\n for char in 'رﺭﺮ':\n self.trans[char] = 'r'\n for char in 'زﺯﺰ':\n self.trans[char] = 'z'\n for char in 'سﺱﺲﺴﺳ':\n self.trans[char] = 's'\n for char in 'شﺵﺶﺸﺷ':\n self.trans[char] = 'sh'\n for char in 'صﺹﺺﺼﺻ':\n self.trans[char] = 's'\n for char in 'ضﺽﺾﻀﺿ':\n self.trans[char] = 'd'\n for char in 'طﻁﻂﻄﻃ':\n self.trans[char] = 't'\n for char in 'ظﻅﻆﻈﻇ':\n self.trans[char] = 'z'\n for char in 'عﻉﻊﻌﻋ':\n self.trans[char] = u\"'\"\n for char in 'غﻍﻎﻐﻏ':\n self.trans[char] = 'gh'\n for char in 'فﻑﻒﻔﻓ':\n self.trans[char] = 'f'\n for char in 'قﻕﻖﻘﻗ':\n self.trans[char] = 'q'\n for char in 'كﻙﻚﻜﻛک':\n self.trans[char] = 'k'\n for char in 'لﻝﻞﻠﻟ':\n self.trans[char] = 'l'\n for char in 'مﻡﻢﻤﻣ':\n self.trans[char] = 'm'\n for char in 'نﻥﻦﻨﻧ':\n self.trans[char] = 'n'\n for char in 'هﻩﻪﻬﻫ':\n self.trans[char] = 'h'\n for char in 'وﻭﻮ':\n self.trans[char] = 'w'\n for char in 'یيﻱﻲﻴﻳ':\n self.trans[char] = 'y'\n # Arabic - additional letters, modified letters and ligatures\n self.trans['ﺀ'] = \"'\"\n for char in 'آﺁﺂ':\n self.trans[char] = u\"'a\"\n for char in 'ةﺓﺔ':\n self.trans[char] = 'th'\n for char in 'ىﻯﻰ':\n self.trans[char] = 'á'\n for char in 'یﯼﯽﯿﯾ':\n self.trans[char] = 'y'\n self.trans['؟'] = '?'\n # Arabic - ligatures\n for char in 'ﻻﻼ':\n self.trans[char] = 'la'\n self.trans['ﷲ'] = 'llah'\n for char in 'إأ':\n self.trans[char] = u\"a'\"\n self.trans['ؤ'] = \"w'\"\n self.trans['ئ'] = \"y'\"\n for char in '◌◌':\n self.trans[char] = \"\" # indicates absence of vowels\n # Arabic vowels\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'i'\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'iy'\n # Arab numerals\n for char in '٠۰':\n self.trans[char] = '0'\n for char in '١۱':\n self.trans[char] = '1'\n for char in '٢۲':\n self.trans[char] = '2'\n for char in '٣۳':\n self.trans[char] = '3'\n for char in '٤۴':\n self.trans[char] = '4'\n for char in '٥۵':\n self.trans[char] = '5'\n for char in '٦۶':\n self.trans[char] = '6'\n for char in '٧۷':\n self.trans[char] = '7'\n for char in '٨۸':\n self.trans[char] = '8'\n for char in '٩۹':\n self.trans[char] = '9'\n # Perso-Arabic\n for char in 'پﭙﭙپ':\n self.trans[char] = 'p'\n for char in 'چچچچ':\n self.trans[char] = 'ch'\n for char in 'ژژ':\n self.trans[char] = 'zh'\n for char in 'گﮔﮕﮓ':\n self.trans[char] = 'g'\n\n # Greek\n self.trans.update({\n 'Α': 'A', 'α': 'a', 'Β': 'B', 'β': 'b', 'Γ': 'G',\n 'γ': 'g', 'Δ': 'D', 'δ': 'd', 'Ε': 'E', 'ε': 'e',\n 'Ζ': 'Z', 'ζ': 'z', 'Η': 'I', 'η': 'i', 'θ': 'th',\n 'Θ': 'Th', 'Ι': 'I', 'ι': 'i', 'Κ': 'K', 'κ': 'k',\n 'Λ': 'L', 'λ': 'l', 'Μ': 'M', 'μ': 'm', 'Ν': 'N',\n 'ν': 'n', 'Ξ': 'X', 'ξ': 'x', 'Ο': 'O', 'ο': 'o',\n 'Π': 'P', 'π': 'p', 'Ρ': 'R', 'ρ': 'r', 'Σ': 'S',\n 'σ': 's', 'ς': 's', 'Τ': 'T', 'τ': 't', 'Υ': 'Y',\n 'υ': 'y', 'Φ': 'F', 'φ': 'f', 'Ψ': 'Ps', 'ψ': 'ps',\n 'Ω': 'O', 'ω': 'o', 'ϗ': '&', 'Ϛ': 'St', 'ϛ': 'st',\n 'Ϙ': 'Q', 'Ϟ': 'Q', 'ϙ': 'q', 'ϟ': 'q', 'Ϻ': 'S',\n 'ϻ': 's', 'Ϡ': 'Ss', 'ϡ': 'ss', 'Ϸ': 'Sh', 'ϸ': 'sh',\n '·': ':', 'Ά': 'Á', 'ά': 'á', 'Έ': 'É', 'Ή': 'É',\n 'έ': 'é', 'ή': 'é', 'Ί': 'Í', 'ί': 'í', 'Ϊ': 'Ï',\n 'ϊ': 'ï', 'ΐ': 'ï', 'Ό': 'Ó', 'ό': 'ó', 'Ύ': 'Ý',\n 'ύ': 'ý', 'Ϋ': 'Y', 'ϋ': 'ÿ', 'ΰ': 'ÿ', 'Ώ': 'Ó',\n 'ώ': 'ó'})\n\n # Japanese (katakana and hiragana)\n for char in 'アァあ':\n self.trans[char] = 'a'\n for char in 'イィい':\n self.trans[char] = 'i'\n for char in 'ウう':\n self.trans[char] = 'u'\n for char in 'エェえ':\n self.trans[char] = 'e'\n for char in 'オォお':\n self.trans[char] = 'o'\n for char in 'ャや':\n self.trans[char] = 'ya'\n for char in 'ュゆ':\n self.trans[char] = 'yu'\n for char in 'ョよ':\n self.trans[char] = 'yo'\n for char in 'カか':\n self.trans[char] = 'ka'\n for char in 'キき':\n self.trans[char] = 'ki'\n for char in 'クく':\n self.trans[char] = 'ku'\n for char in 'ケけ':\n self.trans[char] = 'ke'\n for char in 'コこ':\n self.trans[char] = 'ko'\n for char in 'サさ':\n self.trans[char] = 'sa'\n for char in 'シし':\n self.trans[char] = 'shi'\n for char in 'スす':\n self.trans[char] = 'su'\n for char in 'セせ':\n self.trans[char] = 'se'\n for char in 'ソそ':\n self.trans[char] = 'so'\n for char in 'タた':\n self.trans[char] = 'ta'\n for char in 'チち':\n self.trans[char] = 'chi'\n for char in 'ツつ':\n self.trans[char] = 'tsu'\n for char in 'テて':\n self.trans[char] = 'te'\n for char in 'トと':\n self.trans[char] = 'to'\n for char in 'ナな':\n self.trans[char] = 'na'\n for char in 'ニに':\n self.trans[char] = 'ni'\n for char in 'ヌぬ':\n self.trans[char] = 'nu'\n for char in 'ネね':\n self.trans[char] = 'ne'\n for char in 'ノの':\n self.trans[char] = 'no'\n for char in 'ハは':\n self.trans[char] = 'ha'\n for char in 'ヒひ':\n self.trans[char] = 'hi'\n for char in 'フふ':\n self.trans[char] = 'fu'\n for char in 'ヘへ':\n self.trans[char] = 'he'\n for char in 'ホほ':\n self.trans[char] = 'ho'\n for char in 'マま':\n self.trans[char] = 'ma'\n for char in 'ミみ':\n self.trans[char] = 'mi'\n for char in 'ムむ':\n self.trans[char] = 'mu'\n for char in 'メめ':\n self.trans[char] = 'me'\n for char in 'モも':\n self.trans[char] = 'mo'\n for char in 'ラら':\n self.trans[char] = 'ra'\n for char in 'リり':\n self.trans[char] = 'ri'\n for char in 'ルる':\n self.trans[char] = 'ru'\n for char in 'レれ':\n self.trans[char] = 're'\n for char in 'ロろ':\n self.trans[char] = 'ro'\n for char in 'ワわ':\n self.trans[char] = 'wa'\n for char in 'ヰゐ':\n self.trans[char] = 'wi'\n for char in 'ヱゑ':\n self.trans[char] = 'we'\n for char in 'ヲを':\n self.trans[char] = 'wo'\n for char in 'ンん':\n self.trans[char] = 'n'\n for char in 'ガが':\n self.trans[char] = 'ga'\n for char in 'ギぎ':\n self.trans[char] = 'gi'\n for char in 'グぐ':\n self.trans[char] = 'gu'\n for char in 'ゲげ':\n self.trans[char] = 'ge'\n for char in 'ゴご':\n self.trans[char] = 'go'\n for char in 'ザざ':\n self.trans[char] = 'za'\n for char in 'ジじ':\n self.trans[char] = 'ji'\n for char in 'ズず':\n self.trans[char] = 'zu'\n for char in 'ゼぜ':\n self.trans[char] = 'ze'\n for char in 'ゾぞ':\n self.trans[char] = 'zo'\n for char in 'ダだ':\n self.trans[char] = 'da'\n for char in 'ヂぢ':\n self.trans[char] = 'dji'\n for char in 'ヅづ':\n self.trans[char] = 'dzu'\n for char in 'デで':\n self.trans[char] = 'de'\n for char in 'ドど':\n self.trans[char] = 'do'\n for char in 'バば':\n self.trans[char] = 'ba'\n for char in 'ビび':\n self.trans[char] = 'bi'\n for char in 'ブぶ':\n self.trans[char] = 'bu'\n for char in 'ベべ':\n self.trans[char] = 'be'\n for char in 'ボぼ':\n self.trans[char] = 'bo'\n for char in 'パぱ':\n self.trans[char] = 'pa'\n for char in 'ピぴ':\n self.trans[char] = 'pi'\n for char in 'プぷ':\n self.trans[char] = 'pu'\n for char in 'ペぺ':\n self.trans[char] = 'pe'\n for char in 'ポぽ':\n self.trans[char] = 'po'\n for char in 'ヴゔ':\n self.trans[char] = 'vu'\n self.trans['ヷ'] = 'va'\n self.trans['ヸ'] = 'vi'\n self.trans['ヹ'] = 've'\n self.trans['ヺ'] = 'vo'\n\n # Japanese and Chinese punctuation and typography\n for char in '・·':\n self.trans[char] = ' '\n for char in '〃『』《》':\n self.trans[char] = u'\"'\n for char in '「」〈〉〘〙〚〛':\n self.trans[char] = u\"'\"\n for char in '(〔':\n self.trans[char] = '('\n for char in ')〕':\n self.trans[char] = ')'\n for char in '[【〖':\n self.trans[char] = '['\n for char in ']】〗':\n self.trans[char] = ']'\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in '•◦':\n self.trans[char] = '_'\n for char in '※*':\n self.trans[char] = '*'\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in ',、':\n self.trans[char] = ','\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in 'ეჱ':\n self.trans[char] = 'e'\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in 'ყ':\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in 'წ':\n self.trans[char] = u\"ts'\"\n for char in 'ჭ':\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in 'पप':\n self.trans[char] = 'p'\n self.trans['अ'] = 'a'\n for char in 'आा':\n self.trans[char] = 'aa'\n self.trans['प'] = 'pa'\n for char in 'इि':\n self.trans[char] = 'i'\n for char in 'ईी':\n self.trans[char] = 'ii'\n for char in 'उु':\n self.trans[char] = 'u'\n for char in 'ऊू':\n self.trans[char] = 'uu'\n for char in 'एे':\n self.trans[char] = 'e'\n for char in 'ऐै':\n self.trans[char] = 'ai'\n for char in 'ओो':\n self.trans[char] = 'o'\n for char in 'औौ':\n self.trans[char] = 'au'\n for char in 'ऋृर':\n self.trans[char] = 'r'\n for char in 'ॠॄ':\n self.trans[char] = 'rr'\n for char in 'ऌॢल':\n self.trans[char] = 'l'\n for char in 'ॡॣ':\n self.trans[char] = 'll'\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in 'टत':\n self.trans[char] = 't'\n for char in 'ठथ':\n self.trans[char] = 'th'\n for char in 'डद':\n self.trans[char] = 'd'\n for char in 'ढध':\n self.trans[char] = 'dh'\n for char in 'णन':\n self.trans[char] = 'n'\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in 'षस':\n self.trans[char] = 's'\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in 'क़':\n self.trans[char] = 'q'\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in 'डढ':\n self.trans[char] = 'r'\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in 'ख्':\n self.trans[char] = 'khn'\n self.trans['त'] = 'tn'\n for char in 'द्':\n self.trans[char] = 'dn'\n self.trans['श'] = 'cn'\n for char in 'ह्':\n self.trans[char] = 'fn'\n for char in 'अँ':\n self.trans[char] = 'm'\n for char in '॒॑':\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in 'Տ':\n self.trans[char] = u\"T'\"\n for char in 'տ':\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in 'க்':\n self.trans[char] = 'k'\n for char in 'ஙண்ந்ன்':\n self.trans[char] = 'n'\n self.trans['ச'] = 'c'\n for char in 'ஞ்':\n self.trans[char] = 'ñ'\n for char in 'ட்':\n self.trans[char] = 'th'\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in 'ம்':\n self.trans[char] = 'm'\n for char in 'ய்':\n self.trans[char] = 'y'\n for char in 'ர்ழ்ற':\n self.trans[char] = 'r'\n for char in 'ல்ள':\n self.trans[char] = 'l'\n for char in 'வ்':\n self.trans[char] = 'v'\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in 'க்ஷ':\n self.trans[char] = 'x'\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in 'আা':\n self.trans[char] = 'a'\n for char in 'ইিঈী':\n self.trans[char] = 'i'\n for char in 'উুঊূ':\n self.trans[char] = 'u'\n for char in 'ঋৃ':\n self.trans[char] = 'ri'\n for char in 'এেয়':\n self.trans[char] = 'e'\n for char in 'ঐৈ':\n self.trans[char] = 'oi'\n for char in 'ওো':\n self.trans[char] = 'o'\n for char in 'ঔৌ':\n self.trans[char] = 'ou'\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in 'টত':\n self.trans[char] = 't'\n for char in 'ঠথ':\n self.trans[char] = 'th'\n for char in 'ডদ':\n self.trans[char] = 'd'\n for char in 'ঢধ':\n self.trans[char] = 'dh'\n for char in 'ণন':\n self.trans[char] = 'n'\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in 'য়':\n self.trans[char] = '-'\n for char in 'ড়':\n self.trans[char] = 'r'\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in 'ขฃคฅฆ':\n self.trans[char] = 'kh'\n self.trans['ง'] = 'ng'\n for char in 'จฉชฌ':\n self.trans[char] = 'ch'\n for char in 'ซศษส':\n self.trans[char] = 's'\n for char in 'ญย':\n self.trans[char] = 'y'\n for char in 'ฎด':\n self.trans[char] = 'd'\n for char in 'ฏต':\n self.trans[char] = 't'\n for char in 'ฐฑฒถทธ':\n self.trans[char] = 'th'\n for char in 'ณน':\n self.trans[char] = 'n'\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in 'ผพภ':\n self.trans[char] = 'ph'\n for char in 'ฝฟ':\n self.trans[char] = 'f'\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in 'ลฬ':\n self.trans[char] = 'l'\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in 'หฮ':\n self.trans[char] = 'h'\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in 'อวโิ':\n self.trans[char] = 'o'\n for char in 'ะัา':\n self.trans[char] = 'a'\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in 'เ็':\n self.trans[char] = 'e'\n self.trans['แ'] = 'ae'\n for char in 'ใไ':\n self.trans[char] = 'ai'\n for char in '่้๊๋็์':\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans['ಅ'] = 'a'\n for char in 'ಆಾ':\n self.trans[char] = 'aa'\n for char in 'ಇಿ':\n self.trans[char] = 'i'\n for char in 'ಈೀ':\n self.trans[char] = 'ii'\n for char in 'ಉು':\n self.trans[char] = 'u'\n for char in 'ಊೂ':\n self.trans[char] = 'uu'\n for char in 'ಋೂ':\n self.trans[char] = u\"r'\"\n for char in 'ಎೆ':\n self.trans[char] = 'e'\n for char in 'ಏೇ':\n self.trans[char] = 'ee'\n for char in 'ಐೈ':\n self.trans[char] = 'ai'\n for char in 'ಒೊ':\n self.trans[char] = 'o'\n for char in 'ಓೋ':\n self.trans[char] = 'oo'\n for char in 'ಔೌ':\n self.trans[char] = 'au'\n self.trans['ಂ'] = \"m'\"\n self.trans['ಃ'] = \"h'\"\n self.trans['ಕ'] = 'k'\n self.trans['ಖ'] = 'kh'\n self.trans['ಗ'] = 'g'\n self.trans['ಘ'] = 'gh'\n self.trans['ಙ'] = 'ng'\n self.trans['ಚ'] = 'c'\n self.trans['ಛ'] = 'ch'\n self.trans['ಜ'] = 'j'\n self.trans['ಝ'] = 'ny'\n self.trans['ಟ'] = 'tt'\n self.trans['ಠ'] = 'tth'\n self.trans['ಡ'] = 'dd'\n self.trans['ಢ'] = 'ddh'\n self.trans['ಣ'] = 'nn'\n self.trans['ತ'] = 't'\n self.trans['ಥ'] = 'th'\n self.trans['ದ'] = 'd'\n self.trans['ಧ'] = 'dh'\n self.trans['ನ'] = 'n'\n self.trans['ಪ'] = 'p'\n self.trans['ಫ'] = 'ph'\n self.trans['ಬ'] = 'b'\n self.trans['ಭ'] = 'bh'\n self.trans['ಮ'] = 'm'\n self.trans['ಯ'] = 'y'\n self.trans['ರ'] = 'r'\n self.trans['ಲ'] = 'l'\n self.trans['ವ'] = 'v'\n self.trans['ಶ'] = 'sh'\n self.trans['ಷ'] = 'ss'\n self.trans['ಸ'] = 's'\n self.trans['ಹ'] = 'h'\n self.trans['ಳ'] = 'll'\n self.trans['೦'] = '0'\n self.trans['೧'] = '1'\n self.trans['೨'] = '2'\n self.trans['೩'] = '3'\n self.trans['೪'] = '4'\n self.trans['೫'] = '5'\n self.trans['೬'] = '6'\n self.trans['೭'] = '7'\n self.trans['೮'] = '8'\n self.trans['೯'] = '9'\n # Telugu\n self.trans['అ'] = 'a'\n for char in 'ఆా':\n self.trans[char] = 'aa'\n for char in 'ఇి':\n self.trans[char] = 'i'\n for char in 'ఈీ':\n self.trans[char] = 'ii'\n for char in 'ఉు':\n self.trans[char] = 'u'\n for char in 'ఊూ':\n self.trans[char] = 'uu'\n for char in 'ఋృ':\n self.trans[char] = \"r'\"\n for char in 'ౠౄ':\n self.trans[char] = 'r\"'\n self.trans['ఌ'] = \"l'\"\n self.trans['ౡ'] = 'l\"'\n for char in 'ఎె':\n self.trans[char] = 'e'\n for char in 'ఏే':\n self.trans[char] = 'ee'\n for char in 'ఐై':\n self.trans[char] = 'ai'\n for char in 'ఒొ':\n self.trans[char] = 'o'\n for char in 'ఓో':\n self.trans[char] = 'oo'\n for char in 'ఔౌ':\n self.trans[char] = 'au'\n self.trans['ం'] = \"'\"\n self.trans['ః'] = '\"'\n self.trans['క'] = 'k'\n self.trans['ఖ'] = 'kh'\n self.trans['గ'] = 'g'\n self.trans['ఘ'] = 'gh'\n self.trans['ఙ'] = 'ng'\n self.trans['చ'] = 'ts'\n self.trans['ఛ'] = 'tsh'\n self.trans['జ'] = 'j'\n self.trans['ఝ'] = 'jh'\n self.trans['ఞ'] = 'ñ'\n for char in 'టత':\n self.trans[char] = 't'\n for char in 'ఠథ':\n self.trans[char] = 'th'\n for char in 'డద':\n self.trans[char] = 'd'\n for char in 'ఢధ':\n self.trans[char] = 'dh'\n for char in 'ణన':\n self.trans[char] = 'n'\n self.trans['ప'] = 'p'\n self.trans['ఫ'] = 'ph'\n self.trans['బ'] = 'b'\n self.trans['భ'] = 'bh'\n self.trans['మ'] = 'm'\n self.trans['య'] = 'y'\n for char in 'రఱ':\n self.trans[char] = 'r'\n for char in 'లళ':\n self.trans[char] = 'l'\n self.trans['వ'] = 'v'\n self.trans['శ'] = 'sh'\n for char in 'షస':\n self.trans[char] = 's'\n self.trans['హ'] = 'h'\n self.trans['్'] = \"\"\n for char in 'ంఁ':\n self.trans[char] = '^'\n self.trans['ః'] = '-'\n self.trans['౦'] = '0'\n self.trans['౧'] = '1'\n self.trans['౨'] = '2'\n self.trans['౩'] = '3'\n self.trans['౪'] = '4'\n self.trans['౫'] = '5'\n self.trans['౬'] = '6'\n self.trans['౭'] = '7'\n self.trans['౮'] = '8'\n self.trans['౯'] = '9'\n self.trans['౹'] = '1/4'\n self.trans['౺'] = '1/2'\n self.trans['౻'] = '3/4'\n self.trans['౼'] = '1/16'\n self.trans['౽'] = '1/8'\n self.trans['౾'] = '3/16'\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans['ກ'] = 'k'\n for char in 'ຂຄ':\n self.trans[char] = 'kh'\n self.trans['ງ'] = 'ng'\n self.trans['ຈ'] = 'ch'\n for char in 'ສຊ':\n self.trans[char] = 's'\n self.trans['ຍ'] = 'ny'\n self.trans['ດ'] = 'd'\n self.trans['ຕ'] = 't'\n for char in 'ຖທ':\n self.trans[char] = 'th'\n self.trans['ນ'] = 'n'\n self.trans['ບ'] = 'b'\n self.trans['ປ'] = 'p'\n for char in 'ຜພ':\n self.trans[char] = 'ph'\n for char in 'ຝຟ':\n self.trans[char] = 'f'\n for char in 'ມໝ':\n self.trans[char] = 'm'\n self.trans['ຢ'] = 'y'\n for char in 'ຣຼ':\n self.trans[char] = 'r'\n for char in 'ລຼ':\n self.trans[char] = 'l'\n self.trans['ວ'] = 'v'\n self.trans['ຮ'] = 'h'\n self.trans['ອ'] = \"'\"\n for char in 'ະັ':\n self.trans[char] = 'a'\n self.trans['ິ'] = 'i'\n self.trans['ຶ'] = 'ue'\n self.trans['ຸ'] = 'u'\n self.trans['ເ'] = 'é'\n self.trans['ແ'] = 'è'\n for char in 'ໂົາໍ':\n self.trans[char] = 'o'\n self.trans['ຽ'] = 'ia'\n self.trans['ເຶ'] = 'uea'\n self.trans['ຍ'] = 'i'\n for char in 'ໄໃ':\n self.trans[char] = 'ai'\n self.trans['ຳ'] = 'am'\n self.trans['າ'] = 'aa'\n self.trans['ີ'] = 'ii'\n self.trans['ື'] = 'yy'\n self.trans['ູ'] = 'uu'\n self.trans['ເ'] = 'e'\n self.trans['ແ'] = 'ei'\n self.trans['໐'] = '0'\n self.trans['໑'] = '1'\n self.trans['໒'] = '2'\n self.trans['໓'] = '3'\n self.trans['໔'] = '4'\n self.trans['໕'] = '5'\n self.trans['໖'] = '6'\n self.trans['໗'] = '7'\n self.trans['໘'] = '8'\n self.trans['໙'] = '9'\n # Chinese -- note: incomplete\n for char in '埃挨哎唉哀皑癌蔼矮艾碍爱隘':\n self.trans[char] = 'ai'\n for char in '鞍氨安俺按暗岸胺案':\n self.trans[char] = 'an'\n for char in '肮昂盎':\n self.trans[char] = 'ang'\n for char in '凹敖熬翱袄傲奥懊澳':\n self.trans[char] = 'ao'\n for char in '芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸':\n self.trans[char] = 'ba'\n for char in '白柏百摆佰败拜稗':\n self.trans[char] = 'bai'\n for char in '斑班搬扳般颁板版扮拌伴瓣半办绊':\n self.trans[char] = 'ban'\n for char in '邦帮梆榜膀绑棒磅蚌镑傍谤':\n self.trans[char] = 'bang'\n for char in '苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆':\n self.trans[char] = 'bao'\n for char in '杯碑悲卑北辈背贝钡倍狈备惫焙被':\n self.trans[char] = 'bei'\n for char in '奔苯本笨':\n self.trans[char] = 'ben'\n for char in '崩绷甭泵蹦迸':\n self.trans[char] = 'beng'\n for char in '逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛':\n self.trans[char] = 'bi'\n for char in '鞭边编贬扁便变卞辨辩辫遍':\n self.trans[char] = 'bian'\n for char in '标彪膘表':\n self.trans[char] = 'biao'\n for char in '鳖憋别瘪':\n self.trans[char] = 'bie'\n for char in '彬斌濒滨宾摈':\n self.trans[char] = 'bin'\n for char in '兵冰柄丙秉饼炳病并':\n self.trans[char] = 'bing'\n for char in '玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳':\n self.trans[char] = 'bo'\n for char in '哺补埠不布步簿部怖':\n self.trans[char] = 'bu'\n for char in '猜裁材才财睬踩采彩菜蔡':\n self.trans[char] = 'cai'\n for char in '餐参蚕残惭惨灿':\n self.trans[char] = 'can'\n for char in '苍舱仓沧藏':\n self.trans[char] = 'cang'\n for char in '操糙槽曹草':\n self.trans[char] = 'cao'\n for char in '厕策侧册测':\n self.trans[char] = 'ce'\n for char in '层蹭':\n self.trans[char] = 'ceng'\n for char in '插叉茬茶查碴搽察岔差诧':\n self.trans[char] = 'cha'\n for char in '拆柴豺':\n self.trans[char] = 'chai'\n for char in '搀掺蝉馋谗缠铲产阐颤':\n self.trans[char] = 'chan'\n for char in '昌猖场尝常长偿肠厂敞畅唱倡':\n self.trans[char] = 'chang'\n for char in '超抄钞朝嘲潮巢吵炒':\n self.trans[char] = 'chao'\n for char in '车扯撤掣彻澈':\n self.trans[char] = 'che'\n for char in '郴臣辰尘晨忱沉陈趁衬':\n self.trans[char] = 'chen'\n for char in '撑称城橙成呈乘程惩澄诚承逞骋秤':\n self.trans[char] = 'cheng'\n for char in '吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽':\n self.trans[char] = 'chi'\n for char in '充冲虫崇宠':\n self.trans[char] = 'chong'\n for char in '抽酬畴踌稠愁筹仇绸瞅丑臭':\n self.trans[char] = 'chou'\n for char in '初出橱厨躇锄雏滁除楚储矗搐触处':\n self.trans[char] = 'chu'\n self.trans['揣'] = 'chuai'\n for char in '川穿椽传船喘串':\n self.trans[char] = 'chuan'\n for char in '疮窗幢床闯创':\n self.trans[char] = 'chuang'\n for char in '吹炊捶锤垂':\n self.trans[char] = 'chui'\n for char in '春椿醇唇淳纯蠢':\n self.trans[char] = 'chun'\n for char in '戳绰':\n self.trans[char] = 'chuo'\n for char in '疵茨磁雌辞慈瓷词此刺赐次':\n self.trans[char] = 'ci'\n for char in '聪葱囱匆从丛':\n self.trans[char] = 'cong'\n self.trans['凑'] = 'cou'\n for char in '粗醋簇促':\n self.trans[char] = 'cu'\n for char in '蹿篡窜':\n self.trans[char] = 'cuan'\n for char in '摧崔催脆瘁粹淬翠':\n self.trans[char] = 'cui'\n for char in '村存寸':\n self.trans[char] = 'cun'\n for char in '磋撮搓措挫错':\n self.trans[char] = 'cuo'\n for char in '搭达答瘩打大':\n self.trans[char] = 'da'\n for char in '呆歹傣戴带殆代贷袋待逮怠':\n self.trans[char] = 'dai'\n for char in '耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋':\n self.trans[char] = 'dan'\n for char in '当挡党荡档':\n self.trans[char] = 'dang'\n for char in '刀捣蹈倒岛祷导到稻悼道盗':\n self.trans[char] = 'dao'\n for char in '德得的':\n self.trans[char] = 'de'\n for char in '蹬灯登等瞪凳邓':\n self.trans[char] = 'deng'\n for char in '堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔':\n self.trans[char] = 'di'\n for char in '颠掂滇碘点典靛垫电佃甸店惦奠淀殿':\n self.trans[char] = 'dian'\n for char in '碉叼雕凋刁掉吊钓调':\n self.trans[char] = 'diao'\n for char in '跌爹碟蝶迭谍叠':\n self.trans[char] = 'die'\n for char in '丁盯叮钉顶鼎锭定订':\n self.trans[char] = 'ding'\n self.trans['丢'] = 'diu'\n for char in '东冬董懂动栋侗恫冻洞':\n self.trans[char] = 'dong'\n for char in '兜抖斗陡豆逗痘':\n self.trans[char] = 'dou'\n for char in '都督毒犊独读堵睹赌杜镀肚度渡妒':\n self.trans[char] = 'du'\n for char in '端短锻段断缎':\n self.trans[char] = 'duan'\n for char in '堆兑队对':\n self.trans[char] = 'dui'\n for char in '墩吨蹲敦顿囤钝盾遁':\n self.trans[char] = 'dun'\n for char in '掇哆多夺垛躲朵跺舵剁惰堕':\n self.trans[char] = 'duo'\n for char in '蛾峨鹅俄额讹娥恶厄扼遏鄂饿':\n self.trans[char] = 'e'\n for char in '恩嗯':\n self.trans[char] = 'en'\n for char in '而儿耳尔饵洱二贰':\n self.trans[char] = 'er'\n for char in '发罚筏伐乏阀法珐':\n self.trans[char] = 'fa'\n for char in '藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛':\n self.trans[char] = 'fan'\n for char in '坊芳方肪房防妨仿访纺放':\n self.trans[char] = 'fang'\n for char in '菲非啡飞肥匪诽吠肺废沸费':\n self.trans[char] = 'fei'\n for char in '芬酚吩氛分纷坟焚汾粉奋份忿愤粪':\n self.trans[char] = 'fen'\n for char in '丰封枫蜂峰锋风疯烽逢冯缝讽奉凤':\n self.trans[char] = 'feng'\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in ('夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋'\n '复傅付阜父腹负富讣附妇缚咐'):\n self.trans[char] = 'fu'\n for char in '噶嘎':\n self.trans[char] = 'ga'\n for char in '该改概钙盖溉':\n self.trans[char] = 'gai'\n for char in '干甘杆柑竿肝赶感秆敢赣':\n self.trans[char] = 'gan'\n for char in '冈刚钢缸肛纲岗港杠':\n self.trans[char] = 'gang'\n for char in '篙皋高膏羔糕搞镐稿告':\n self.trans[char] = 'gao'\n for char in '哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各':\n self.trans[char] = 'ge'\n self.trans['给'] = 'gei'\n for char in '根跟':\n self.trans[char] = 'gen'\n for char in '耕更庚羹埂耿梗':\n self.trans[char] = 'geng'\n for char in '工攻功恭龚供躬公宫弓巩汞拱贡共':\n self.trans[char] = 'gong'\n for char in '钩勾沟苟狗垢构购够':\n self.trans[char] = 'gou'\n for char in '辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇':\n self.trans[char] = 'gu'\n for char in '刮瓜剐寡挂褂':\n self.trans[char] = 'gua'\n for char in '乖拐怪':\n self.trans[char] = 'guai'\n for char in '棺关官冠观管馆罐惯灌贯':\n self.trans[char] = 'guan'\n for char in '光广逛':\n self.trans[char] = 'guang'\n for char in '瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽':\n self.trans[char] = 'gui'\n for char in '辊滚棍':\n self.trans[char] = 'gun'\n for char in '锅郭国果裹过':\n self.trans[char] = 'guo'\n self.trans['哈'] = 'ha'\n for char in '骸孩海氦亥害骇':\n self.trans[char] = 'hai'\n for char in '酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉':\n self.trans[char] = 'han'\n for char in '夯杭航':\n self.trans[char] = 'hang'\n for char in '壕嚎豪毫郝好耗号浩':\n self.trans[char] = 'hao'\n for char in '呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺':\n self.trans[char] = 'he'\n for char in '嘿黑':\n self.trans[char] = 'hei'\n for char in '痕很狠恨':\n self.trans[char] = 'hen'\n for char in '哼亨横衡恒':\n self.trans[char] = 'heng'\n for char in '轰哄烘虹鸿洪宏弘红':\n self.trans[char] = 'hong'\n for char in '喉侯猴吼厚候后':\n self.trans[char] = 'hou'\n for char in '呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户':\n self.trans[char] = 'hu'\n for char in '花哗华猾滑画划化话':\n self.trans[char] = 'hua'\n for char in '槐徊怀淮坏':\n self.trans[char] = 'huai'\n for char in '欢环桓还缓换患唤痪豢焕涣宦幻':\n self.trans[char] = 'huan'\n for char in '荒慌黄磺蝗簧皇凰惶煌晃幌恍谎':\n self.trans[char] = 'huang'\n for char in '灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘':\n self.trans[char] = 'hui'\n for char in '荤昏婚魂浑混':\n self.trans[char] = 'hun'\n for char in '豁活伙火获或惑霍货祸':\n self.trans[char] = 'huo'\n for char in ('击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几'\n '脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪'):\n self.trans[char] = 'ji'\n for char in '嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁':\n self.trans[char] = 'jia'\n for char in ('歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健'\n '舰剑饯渐溅涧建'):\n self.trans[char] = 'jian'\n for char in '僵姜将浆江疆蒋桨奖讲匠酱降':\n self.trans[char] = 'jiang'\n for char in '蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖':\n self.trans[char] = 'jiao'\n for char in '揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届':\n self.trans[char] = 'jie'\n for char in '巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲':\n self.trans[char] = 'jin'\n for char in '荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净':\n self.trans[char] = 'jing'\n for char in '囧炯窘':\n self.trans[char] = 'jiong'\n for char in '揪究纠玖韭久灸九酒厩救旧臼舅咎就疚':\n self.trans[char] = 'jiu'\n for char in '鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧':\n self.trans[char] = 'ju'\n for char in '捐鹃娟倦眷卷绢':\n self.trans[char] = 'juan'\n for char in '撅攫抉掘倔爵觉决诀绝':\n self.trans[char] = 'jue'\n for char in '均菌钧军君峻俊竣浚郡骏':\n self.trans[char] = 'jun'\n for char in '喀咖卡咯':\n self.trans[char] = 'ka'\n for char in '开揩楷凯慨':\n self.trans[char] = 'kai'\n for char in '刊堪勘坎砍看':\n self.trans[char] = 'kan'\n for char in '康慷糠扛抗亢炕':\n self.trans[char] = 'kang'\n for char in '考拷烤靠':\n self.trans[char] = 'kao'\n for char in '坷苛柯棵磕颗科壳咳可渴克刻客课':\n self.trans[char] = 'ke'\n for char in '肯啃垦恳':\n self.trans[char] = 'ken'\n for char in '坑吭':\n self.trans[char] = 'keng'\n for char in '空恐孔控':\n self.trans[char] = 'kong'\n for char in '抠口扣寇':\n self.trans[char] = 'kou'\n for char in '枯哭窟苦酷库裤':\n self.trans[char] = 'ku'\n for char in '夸垮挎跨胯':\n self.trans[char] = 'kua'\n for char in '块筷侩快':\n self.trans[char] = 'kuai'\n for char in '宽款':\n self.trans[char] = 'kuan'\n for char in '匡筐狂框矿眶旷况':\n self.trans[char] = 'kuang'\n for char in '亏盔岿窥葵奎魁傀馈愧溃':\n self.trans[char] = 'kui'\n for char in '坤昆捆困':\n self.trans[char] = 'kun'\n for char in '括扩廓阔':\n self.trans[char] = 'kuo'\n for char in '垃拉喇蜡腊辣啦':\n self.trans[char] = 'la'\n for char in '莱来赖':\n self.trans[char] = 'lai'\n for char in '蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥':\n self.trans[char] = 'lan'\n for char in '琅榔狼廊郎朗浪':\n self.trans[char] = 'lang'\n for char in '捞劳牢老佬姥酪烙涝':\n self.trans[char] = 'lao'\n for char in '勒乐':\n self.trans[char] = 'le'\n for char in '雷镭蕾磊累儡垒擂肋类泪':\n self.trans[char] = 'lei'\n for char in '棱楞冷':\n self.trans[char] = 'leng'\n for char in ('厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力'\n '璃哩'):\n self.trans[char] = 'li'\n self.trans['俩'] = 'lia'\n for char in '联莲连镰廉怜涟帘敛脸链恋炼练':\n self.trans[char] = 'lian'\n for char in '粮凉梁粱良两辆量晾亮谅':\n self.trans[char] = 'liang'\n for char in '撩聊僚疗燎寥辽潦了撂镣廖料':\n self.trans[char] = 'liao'\n for char in '列裂烈劣猎':\n self.trans[char] = 'lie'\n for char in '琳林磷霖临邻鳞淋凛赁吝拎':\n self.trans[char] = 'lin'\n for char in '玲菱零龄铃伶羚凌灵陵岭领另令':\n self.trans[char] = 'ling'\n for char in '溜琉榴硫馏留刘瘤流柳六':\n self.trans[char] = 'liu'\n for char in '龙聋咙笼窿隆垄拢陇':\n self.trans[char] = 'long'\n for char in '楼娄搂篓漏陋':\n self.trans[char] = 'lou'\n for char in '芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸':\n self.trans[char] = 'lu'\n for char in '峦挛孪滦卵乱':\n self.trans[char] = 'luan'\n for char in '掠略':\n self.trans[char] = 'lue'\n for char in '抡轮伦仑沦纶论':\n self.trans[char] = 'lun'\n for char in '萝螺罗逻锣箩骡裸落洛骆络漯':\n self.trans[char] = 'luo'\n for char in '驴吕铝侣旅履屡缕虑氯律率滤绿':\n self.trans[char] = 'lv'\n for char in '妈麻玛码蚂马骂嘛吗':\n self.trans[char] = 'ma'\n for char in '埋买麦卖迈脉':\n self.trans[char] = 'mai'\n for char in '瞒馒蛮满蔓曼慢漫谩':\n self.trans[char] = 'man'\n for char in '芒茫盲氓忙莽':\n self.trans[char] = 'mang'\n for char in '猫茅锚毛矛铆卯茂冒帽貌贸':\n self.trans[char] = 'mao'\n self.trans['么'] = 'me'\n for char in '玫枚梅酶霉煤没眉媒镁每美昧寐妹媚':\n self.trans[char] = 'mei'\n for char in '门闷们':\n self.trans[char] = 'men'\n for char in '萌蒙檬盟锰猛梦孟':\n self.trans[char] = 'meng'\n for char in '眯醚靡糜迷谜弥米秘觅泌蜜密幂':\n self.trans[char] = 'mi'\n for char in '棉眠绵冕免勉娩缅面':\n self.trans[char] = 'mian'\n for char in '苗描瞄藐秒渺庙妙':\n self.trans[char] = 'miao'\n for char in '蔑灭':\n self.trans[char] = 'mie'\n for char in '民抿皿敏悯闽':\n self.trans[char] = 'min'\n for char in '明螟鸣铭名命':\n self.trans[char] = 'ming'\n self.trans['谬'] = 'miu'\n for char in '摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌':\n self.trans[char] = 'mo'\n for char in '谋牟某':\n self.trans[char] = 'mou'\n for char in '拇牡亩姆母墓暮幕募慕木目睦牧穆':\n self.trans[char] = 'mu'\n for char in '拿哪呐钠那娜纳':\n self.trans[char] = 'na'\n for char in '氖乃奶耐奈':\n self.trans[char] = 'nai'\n for char in '南男难':\n self.trans[char] = 'nan'\n self.trans['囊'] = 'nang'\n for char in '挠脑恼闹淖':\n self.trans[char] = 'nao'\n self.trans['呢'] = 'ne'\n for char in '馁内':\n self.trans[char] = 'nei'\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in '妮霓倪泥尼拟你匿腻逆溺':\n self.trans[char] = 'ni'\n for char in '蔫拈年碾撵捻念':\n self.trans[char] = 'nian'\n for char in '娘酿':\n self.trans[char] = 'niang'\n for char in '鸟尿':\n self.trans[char] = 'niao'\n for char in '捏聂孽啮镊镍涅':\n self.trans[char] = 'nie'\n self.trans['您'] = 'nin'\n for char in '柠狞凝宁拧泞':\n self.trans[char] = 'ning'\n for char in '牛扭钮纽':\n self.trans[char] = 'niu'\n for char in '脓浓农弄':\n self.trans[char] = 'nong'\n for char in '奴努怒':\n self.trans[char] = 'nu'\n self.trans['暖'] = 'nuan'\n for char in '虐疟':\n self.trans[char] = 'nue'\n for char in '挪懦糯诺':\n self.trans[char] = 'nuo'\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in '欧鸥殴藕呕偶沤':\n self.trans[char] = 'ou'\n for char in '啪趴爬帕怕琶':\n self.trans[char] = 'pa'\n for char in '拍排牌徘湃派':\n self.trans[char] = 'pai'\n for char in '攀潘盘磐盼畔判叛':\n self.trans[char] = 'pan'\n for char in '乓庞旁耪胖':\n self.trans[char] = 'pang'\n for char in '抛咆刨炮袍跑泡':\n self.trans[char] = 'pao'\n for char in '呸胚培裴赔陪配佩沛':\n self.trans[char] = 'pei'\n for char in '喷盆':\n self.trans[char] = 'pen'\n for char in '砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰':\n self.trans[char] = 'peng'\n for char in '坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬':\n self.trans[char] = 'pi'\n for char in '篇偏片骗':\n self.trans[char] = 'pian'\n for char in '飘漂瓢票':\n self.trans[char] = 'piao'\n for char in '撇瞥':\n self.trans[char] = 'pie'\n for char in '拼频贫品聘':\n self.trans[char] = 'pin'\n for char in '乒坪苹萍平凭瓶评屏':\n self.trans[char] = 'ping'\n for char in '坡泼颇婆破魄迫粕剖':\n self.trans[char] = 'po'\n for char in '扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮':\n self.trans[char] = 'pu'\n for char in ('期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄'\n '弃汽泣讫'):\n self.trans[char] = 'qi'\n for char in '掐恰洽':\n self.trans[char] = 'qia'\n for char in '牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉':\n self.trans[char] = 'qian'\n for char in '枪呛腔羌墙蔷强抢':\n self.trans[char] = 'qiang'\n for char in '橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍':\n self.trans[char] = 'qiao'\n for char in '切茄且怯窃':\n self.trans[char] = 'qie'\n for char in '钦侵亲秦琴勤芹擒禽寝沁':\n self.trans[char] = 'qin'\n for char in '青轻氢倾卿清擎晴氰情顷请庆':\n self.trans[char] = 'qing'\n for char in '琼穷':\n self.trans[char] = 'qiong'\n for char in '秋丘邱球求囚酋泅':\n self.trans[char] = 'qiu'\n for char in '趋区蛆曲躯屈驱渠取娶龋趣去':\n self.trans[char] = 'qu'\n for char in '圈颧权醛泉全痊拳犬券劝':\n self.trans[char] = 'quan'\n for char in '缺炔瘸却鹊榷确雀':\n self.trans[char] = 'que'\n for char in '裙群':\n self.trans[char] = 'qun'\n for char in '然燃冉染':\n self.trans[char] = 'ran'\n for char in '瓤壤攘嚷让':\n self.trans[char] = 'rang'\n for char in '饶扰绕':\n self.trans[char] = 'rao'\n for char in '惹热':\n self.trans[char] = 're'\n for char in '壬仁人忍韧任认刃妊纫':\n self.trans[char] = 'ren'\n for char in '扔仍':\n self.trans[char] = 'reng'\n self.trans['日'] = 'ri'\n for char in '戎茸蓉荣融熔溶容绒冗':\n self.trans[char] = 'rong'\n for char in '揉柔肉':\n self.trans[char] = 'rou'\n for char in '茹蠕儒孺如辱乳汝入褥':\n self.trans[char] = 'ru'\n for char in '软阮':\n self.trans[char] = 'ruan'\n for char in '蕊瑞锐':\n self.trans[char] = 'rui'\n for char in '闰润':\n self.trans[char] = 'run'\n for char in '若弱':\n self.trans[char] = 'ruo'\n for char in '撒洒萨':\n self.trans[char] = 'sa'\n for char in '腮鳃塞赛':\n self.trans[char] = 'sai'\n for char in '三叁伞散':\n self.trans[char] = 'san'\n for char in '桑嗓丧':\n self.trans[char] = 'sang'\n for char in '搔骚扫嫂':\n self.trans[char] = 'sao'\n for char in '瑟色涩':\n self.trans[char] = 'se'\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in '莎砂杀刹沙纱傻啥煞':\n self.trans[char] = 'sha'\n for char in '筛晒':\n self.trans[char] = 'shai'\n for char in '珊苫杉山删煽衫闪陕擅赡膳善汕扇缮':\n self.trans[char] = 'shan'\n for char in '墒伤商赏晌上尚裳':\n self.trans[char] = 'shang'\n for char in '梢捎稍烧芍勺韶少哨邵绍':\n self.trans[char] = 'shao'\n for char in '奢赊蛇舌舍赦摄射慑涉社设':\n self.trans[char] = 'she'\n for char in '砷申呻伸身深娠绅神沈审婶甚肾慎渗':\n self.trans[char] = 'shen'\n for char in '声生甥牲升绳省盛剩胜圣':\n self.trans[char] = 'sheng'\n for char in ('师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝'\n '势是嗜噬适仕侍释饰氏市恃室视试'):\n self.trans[char] = 'shi'\n for char in '收手首守寿授售受瘦兽':\n self.trans[char] = 'shou'\n for char in (\n '蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕'):\n self.trans[char] = 'shu'\n for char in '刷耍':\n self.trans[char] = 'shua'\n for char in '摔衰甩帅':\n self.trans[char] = 'shuai'\n for char in '栓拴':\n self.trans[char] = 'shuan'\n for char in '霜双爽':\n self.trans[char] = 'shuang'\n for char in '谁水睡税':\n self.trans[char] = 'shui'\n for char in '吮瞬顺舜':\n self.trans[char] = 'shun'\n for char in '说硕朔烁':\n self.trans[char] = 'shuo'\n for char in '斯撕嘶思私司丝死肆寺嗣四伺似饲巳':\n self.trans[char] = 'si'\n for char in '松耸怂颂送宋讼诵':\n self.trans[char] = 'song'\n for char in '搜艘擞':\n self.trans[char] = 'sou'\n for char in '嗽苏酥俗素速粟僳塑溯宿诉肃':\n self.trans[char] = 'su'\n for char in '酸蒜算':\n self.trans[char] = 'suan'\n for char in '虽隋随绥髓碎岁穗遂隧祟':\n self.trans[char] = 'sui'\n for char in '孙损笋':\n self.trans[char] = 'sun'\n for char in '蓑梭唆缩琐索锁所':\n self.trans[char] = 'suo'\n for char in '塌他它她塔獭挞蹋踏':\n self.trans[char] = 'ta'\n for char in '胎苔抬台泰酞太态汰':\n self.trans[char] = 'tai'\n for char in '坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭':\n self.trans[char] = 'tan'\n for char in '汤塘搪堂棠膛唐糖倘躺淌趟烫':\n self.trans[char] = 'tang'\n for char in '掏涛滔绦萄桃逃淘陶讨套':\n self.trans[char] = 'tao'\n self.trans['特'] = 'te'\n for char in '藤腾疼誊':\n self.trans[char] = 'teng'\n for char in '梯剔踢锑提题蹄啼体替嚏惕涕剃屉':\n self.trans[char] = 'ti'\n for char in '兲天添填田甜恬舔腆':\n self.trans[char] = 'tian'\n for char in '挑条迢眺跳':\n self.trans[char] = 'tiao'\n for char in '贴铁帖':\n self.trans[char] = 'tie'\n for char in '厅听烃汀廷停亭庭挺艇':\n self.trans[char] = 'ting'\n for char in '通桐酮瞳同铜彤童桶捅筒统痛':\n self.trans[char] = 'tong'\n for char in '偷投头透':\n self.trans[char] = 'tou'\n for char in '凸秃突图徒途涂屠土吐兔':\n self.trans[char] = 'tu'\n for char in '湍团':\n self.trans[char] = 'tuan'\n for char in '推颓腿蜕褪退':\n self.trans[char] = 'tui'\n for char in '吞屯臀':\n self.trans[char] = 'tun'\n for char in '拖托脱鸵陀驮驼椭妥拓唾':\n self.trans[char] = 'tuo'\n for char in '挖哇蛙洼娃瓦袜':\n self.trans[char] = 'wa'\n for char in '歪外':\n self.trans[char] = 'wai'\n for char in '豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞':\n self.trans[char] = 'wan'\n for char in '汪王亡枉网往旺望忘妄':\n self.trans[char] = 'wang'\n for char in '威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫':\n self.trans[char] = 'wei'\n for char in '瘟温蚊文闻纹吻稳紊问':\n self.trans[char] = 'wen'\n for char in '嗡翁瓮':\n self.trans[char] = 'weng'\n for char in '挝蜗涡窝我斡卧握沃':\n self.trans[char] = 'wo'\n for char in '巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误':\n self.trans[char] = 'wu'\n for char in ('昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系'\n '隙戏细'):\n self.trans[char] = 'xi'\n for char in '瞎虾匣霞辖暇峡侠狭下厦夏吓':\n self.trans[char] = 'xia'\n for char in '掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线':\n self.trans[char] = 'xian'\n for char in '相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象':\n self.trans[char] = 'xiang'\n for char in '萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效':\n self.trans[char] = 'xiao'\n for char in '楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑':\n self.trans[char] = 'xie'\n for char in '薪芯锌欣辛新忻心信衅':\n self.trans[char] = 'xin'\n for char in '星腥猩惺兴刑型形邢行醒幸杏性姓':\n self.trans[char] = 'xing'\n for char in '兄凶胸匈汹雄熊':\n self.trans[char] = 'xiong'\n for char in '休修羞朽嗅锈秀袖绣':\n self.trans[char] = 'xiu'\n for char in '墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续':\n self.trans[char] = 'xu'\n for char in '轩喧宣悬旋玄选癣眩绚':\n self.trans[char] = 'xuan'\n for char in '靴薛学穴雪血':\n self.trans[char] = 'xue'\n for char in '勋熏循旬询寻驯巡殉汛训讯逊迅':\n self.trans[char] = 'xun'\n for char in '压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶':\n self.trans[char] = 'ya'\n for char in '焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验':\n self.trans[char] = 'yan'\n for char in '殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾':\n self.trans[char] = 'yang'\n for char in '邀腰妖瑶摇尧遥窑谣姚咬舀药要耀':\n self.trans[char] = 'yao'\n for char in '椰噎耶爷野冶也页掖业叶曳腋夜液':\n self.trans[char] = 'ye'\n for char in ('一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿'\n '役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎'):\n self.trans[char] = 'yi'\n for char in '茵荫因殷音阴姻吟银淫寅饮尹引隐印':\n self.trans[char] = 'yin'\n for char in '英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映':\n self.trans[char] = 'ying'\n self.trans['哟'] = 'yo'\n for char in '拥佣臃痈庸雍踊蛹咏泳涌永恿勇用':\n self.trans[char] = 'yong'\n for char in '幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂':\n self.trans[char] = 'you'\n for char in ('淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻'\n '峪御愈欲狱育誉浴寓裕预豫驭'):\n self.trans[char] = 'yu'\n for char in '鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院':\n self.trans[char] = 'yuan'\n for char in '曰约越跃钥岳粤月悦阅':\n self.trans[char] = 'yue'\n for char in '耘云郧匀陨允运蕴酝晕韵孕':\n self.trans[char] = 'yun'\n for char in '匝砸杂':\n self.trans[char] = 'za'\n for char in '栽哉灾宰载再在':\n self.trans[char] = 'zai'\n for char in '咱攒暂赞':\n self.trans[char] = 'zan'\n for char in '赃脏葬':\n self.trans[char] = 'zang'\n for char in '遭糟凿藻枣早澡蚤躁噪造皂灶燥':\n self.trans[char] = 'zao'\n for char in '责择则泽':\n self.trans[char] = 'ze'\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in '增憎曾赠':\n self.trans[char] = 'zeng'\n for char in '扎喳渣札轧铡闸眨栅榨咋乍炸诈':\n self.trans[char] = 'zha'\n for char in '摘斋宅窄债寨':\n self.trans[char] = 'zhai'\n for char in '瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽':\n self.trans[char] = 'zhan'\n for char in '樟章彰漳张掌涨杖丈帐账仗胀瘴障':\n self.trans[char] = 'zhang'\n for char in '招昭找沼赵照罩兆肇召':\n self.trans[char] = 'zhao'\n for char in '遮折哲蛰辙者锗蔗这浙':\n self.trans[char] = 'zhe'\n for char in '珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳':\n self.trans[char] = 'zhen'\n for char in '蒸挣睁征狰争怔整拯正政帧症郑证':\n self.trans[char] = 'zheng'\n for char in ('芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置'\n '帜峙制智秩稚质炙痔滞治窒'):\n self.trans[char] = 'zhi'\n for char in '中盅忠钟衷终种肿重仲众':\n self.trans[char] = 'zhong'\n for char in '舟周州洲诌粥轴肘帚咒皱宙昼骤':\n self.trans[char] = 'zhou'\n for char in '珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻':\n self.trans[char] = 'zhu'\n for char in '抓爪':\n self.trans[char] = 'zhua'\n self.trans['拽'] = 'zhuai'\n for char in '专砖转撰赚篆':\n self.trans[char] = 'zhuan'\n for char in '桩庄装妆撞壮状':\n self.trans[char] = 'zhuang'\n for char in '椎锥追赘坠缀':\n self.trans[char] = 'zhui'\n for char in '谆准':\n self.trans[char] = 'zhun'\n for char in '捉拙卓桌琢茁酌啄着灼浊':\n self.trans[char] = 'zhuo'\n for char in '兹咨资姿滋淄孜紫仔籽滓子自渍字':\n self.trans[char] = 'zi'\n for char in '鬃棕踪宗综总纵':\n self.trans[char] = 'zong'\n for char in '邹走奏揍':\n self.trans[char] = 'zou'\n for char in '租足卒族祖诅阻组':\n self.trans[char] = 'zu'\n for char in '钻纂':\n self.trans[char] = 'zuan'\n for char in '嘴醉最罪':\n self.trans[char] = 'zui'\n for char in '尊遵':\n self.trans[char] = 'zun'\n for char in '昨左佐柞做作坐座':\n self.trans[char] = 'zuo'\n # from:\n # https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans['ଂ'] = 'anusvara'\n self.trans['ઇ'] = 'i'\n self.trans['എ'] = 'e'\n self.trans['ગ'] = 'ga'\n self.trans['ਜ'] = 'ja'\n self.trans['ഞ'] = 'nya'\n self.trans['ଢ'] = 'ddha'\n self.trans['ધ'] = 'dha'\n self.trans['ਬ'] = 'ba'\n self.trans['മ'] = 'ma'\n self.trans['ଲ'] = 'la'\n self.trans['ષ'] = 'ssa'\n self.trans['਼'] = 'nukta'\n self.trans['ാ'] = 'aa'\n self.trans['ୂ'] = 'uu'\n self.trans['ે'] = 'e'\n self.trans['ੌ'] = 'au'\n self.trans['ൎ'] = 'reph'\n self.trans['ੜ'] = 'rra'\n self.trans['՞'] = '?'\n self.trans['ୢ'] = 'l'\n self.trans['૧'] = '1'\n self.trans['੬'] = '6'\n self.trans['൮'] = '8'\n self.trans['୲'] = 'quarter'\n self.trans['ൾ'] = 'll'\n self.trans['ਇ'] = 'i'\n self.trans['ഉ'] = 'u'\n self.trans['ઌ'] = 'l'\n self.trans['ਗ'] = 'ga'\n self.trans['ങ'] = 'nga'\n self.trans['ଝ'] = 'jha'\n self.trans['જ'] = 'ja'\n self.trans['؟'] = '?'\n self.trans['ਧ'] = 'dha'\n self.trans['ഩ'] = 'nnna'\n self.trans['ଭ'] = 'bha'\n self.trans['બ'] = 'ba'\n self.trans['ഹ'] = 'ha'\n self.trans['ଽ'] = 'avagraha'\n self.trans['઼'] = 'nukta'\n self.trans['ੇ'] = 'ee'\n self.trans['୍'] = 'virama'\n self.trans['ૌ'] = 'au'\n self.trans['੧'] = '1'\n self.trans['൩'] = '3'\n self.trans['୭'] = '7'\n self.trans['૬'] = '6'\n self.trans['൹'] = 'mark'\n self.trans['ਖ਼'] = 'khha'\n self.trans['ਂ'] = 'bindi'\n self.trans['ഈ'] = 'ii'\n self.trans['ઍ'] = 'e'\n self.trans['ଌ'] = 'l'\n self.trans['ഘ'] = 'gha'\n self.trans['ઝ'] = 'jha'\n self.trans['ଡ଼'] = 'rra'\n self.trans['ਢ'] = 'ddha'\n self.trans['ന'] = 'na'\n self.trans['ભ'] = 'bha'\n self.trans['ବ'] = 'ba'\n self.trans['ਲ'] = 'la'\n self.trans['സ'] = 'sa'\n self.trans['ઽ'] = 'avagraha'\n self.trans['଼'] = 'nukta'\n self.trans['ੂ'] = 'uu'\n self.trans['ൈ'] = 'ai'\n self.trans['્'] = 'virama'\n self.trans['ୌ'] = 'au'\n self.trans['൨'] = '2'\n self.trans['૭'] = '7'\n self.trans['୬'] = '6'\n self.trans['ੲ'] = 'iri'\n self.trans['ഃ'] = 'visarga'\n self.trans['ં'] = 'anusvara'\n self.trans['ଇ'] = 'i'\n self.trans['ഓ'] = 'oo'\n self.trans['ଗ'] = 'ga'\n self.trans['ਝ'] = 'jha'\n self.trans['?'] = '?'\n self.trans['ണ'] = 'nna'\n self.trans['ઢ'] = 'ddha'\n self.trans['ଧ'] = 'dha'\n self.trans['ਭ'] = 'bha'\n self.trans['ള'] = 'lla'\n self.trans['લ'] = 'la'\n self.trans['ଷ'] = 'ssa'\n self.trans['ൃ'] = 'r'\n self.trans['ૂ'] = 'uu'\n self.trans['େ'] = 'e'\n self.trans['੍'] = 'virama'\n self.trans['ୗ'] = 'mark'\n self.trans['ൣ'] = 'll'\n self.trans['ૢ'] = 'l'\n self.trans['୧'] = '1'\n self.trans['੭'] = '7'\n self.trans['൳'] = '1/4'\n self.trans['୷'] = 'sixteenths'\n self.trans['ଆ'] = 'aa'\n self.trans['ઋ'] = 'r'\n self.trans['ഊ'] = 'uu'\n self.trans['ਐ'] = 'ai'\n self.trans['ଖ'] = 'kha'\n self.trans['છ'] = 'cha'\n self.trans['ച'] = 'ca'\n self.trans['ਠ'] = 'ttha'\n self.trans['ଦ'] = 'da'\n self.trans['ફ'] = 'pha'\n self.trans['പ'] = 'pa'\n self.trans['ਰ'] = 'ra'\n self.trans['ଶ'] = 'sha'\n self.trans['ഺ'] = 'ttta'\n self.trans['ੀ'] = 'ii'\n self.trans['ો'] = 'o'\n self.trans['ൊ'] = 'o'\n self.trans['ୖ'] = 'mark'\n self.trans['୦'] = '0'\n self.trans['૫'] = '5'\n self.trans['൪'] = '4'\n self.trans['ੰ'] = 'tippi'\n self.trans['୶'] = 'eighth'\n self.trans['ൺ'] = 'nn'\n self.trans['ଁ'] = 'candrabindu'\n self.trans['അ'] = 'a'\n self.trans['ઐ'] = 'ai'\n self.trans['ക'] = 'ka'\n self.trans['ਸ਼'] = 'sha'\n self.trans['ਛ'] = 'cha'\n self.trans['ଡ'] = 'dda'\n self.trans['ઠ'] = 'ttha'\n self.trans['ഥ'] = 'tha'\n self.trans['ਫ'] = 'pha'\n self.trans['ર'] = 'ra'\n self.trans['വ'] = 'va'\n self.trans['ୁ'] = 'u'\n self.trans['ી'] = 'ii'\n self.trans['ੋ'] = 'oo'\n self.trans['ૐ'] = 'om'\n self.trans['ୡ'] = 'll'\n self.trans['ૠ'] = 'rr'\n self.trans['੫'] = '5'\n self.trans['ୱ'] = 'wa'\n self.trans['૰'] = 'sign'\n self.trans['൵'] = 'quarters'\n self.trans['ਫ਼'] = 'fa'\n self.trans['ઁ'] = 'candrabindu'\n self.trans['ਆ'] = 'aa'\n self.trans['ઑ'] = 'o'\n self.trans['ଐ'] = 'ai'\n self.trans['ഔ'] = 'au'\n self.trans['ਖ'] = 'kha'\n self.trans['ડ'] = 'dda'\n self.trans['ଠ'] = 'ttha'\n self.trans['ത'] = 'ta'\n self.trans['ਦ'] = 'da'\n self.trans['ର'] = 'ra'\n self.trans['ഴ'] = 'llla'\n self.trans['ુ'] = 'u'\n self.trans['ୀ'] = 'ii'\n self.trans['ൄ'] = 'rr'\n self.trans['ૡ'] = 'll'\n self.trans['ୠ'] = 'rr'\n self.trans['੦'] = '0'\n self.trans['૱'] = 'sign'\n self.trans['୰'] = 'isshar'\n self.trans['൴'] = '1/2'\n self.trans['ਁ'] = 'bindi'\n self.trans['આ'] = 'aa'\n self.trans['ଋ'] = 'r'\n self.trans['ഏ'] = 'ee'\n self.trans['ખ'] = 'kha'\n self.trans['ଛ'] = 'cha'\n self.trans['ട'] = 'tta'\n self.trans['ਡ'] = 'dda'\n self.trans['દ'] = 'da'\n self.trans['ଫ'] = 'pha'\n self.trans['യ'] = 'ya'\n self.trans['શ'] = 'sha'\n self.trans['ി'] = 'i'\n self.trans['ੁ'] = 'u'\n self.trans['ୋ'] = 'o'\n self.trans['ੑ'] = 'udaat'\n self.trans['૦'] = '0'\n self.trans['୫'] = '5'\n self.trans['൯'] = '9'\n self.trans['ੱ'] = 'addak'\n self.trans['ൿ'] = 'k'\n self.trans['ആ'] = 'aa'\n self.trans['ଊ'] = 'uu'\n self.trans['એ'] = 'e'\n self.trans['ਔ'] = 'au'\n self.trans['ഖ'] = 'kha'\n self.trans['ଚ'] = 'ca'\n self.trans['ટ'] = 'tta'\n self.trans['ਤ'] = 'ta'\n self.trans['ദ'] = 'da'\n self.trans['ପ'] = 'pa'\n self.trans['ય'] = 'ya'\n self.trans['ശ'] = 'sha'\n self.trans['િ'] = 'i'\n self.trans['െ'] = 'e'\n self.trans['൦'] = '0'\n self.trans['୪'] = '4'\n self.trans['૯'] = '9'\n self.trans['ੴ'] = 'onkar'\n self.trans['ଅ'] = 'a'\n self.trans['ਏ'] = 'ee'\n self.trans['କ'] = 'ka'\n self.trans['ઔ'] = 'au'\n self.trans['ਟ'] = 'tta'\n self.trans['ഡ'] = 'dda'\n self.trans['ଥ'] = 'tha'\n self.trans['ત'] = 'ta'\n self.trans['ਯ'] = 'ya'\n self.trans['റ'] = 'rra'\n self.trans['ଵ'] = 'va'\n self.trans['ਿ'] = 'i'\n self.trans['ു'] = 'u'\n self.trans['ૄ'] = 'rr'\n self.trans['ൡ'] = 'll'\n self.trans['੯'] = '9'\n self.trans['൱'] = '100'\n self.trans['୵'] = 'sixteenth'\n self.trans['અ'] = 'a'\n self.trans['ਊ'] = 'uu'\n self.trans['ഐ'] = 'ai'\n self.trans['ક'] = 'ka'\n self.trans['ଔ'] = 'au'\n self.trans['ਚ'] = 'ca'\n self.trans['ഠ'] = 'ttha'\n self.trans['થ'] = 'tha'\n self.trans['ତ'] = 'ta'\n self.trans['ਪ'] = 'pa'\n self.trans['ര'] = 'ra'\n self.trans['વ'] = 'va'\n self.trans['ീ'] = 'ii'\n self.trans['ૅ'] = 'e'\n self.trans['ୄ'] = 'rr'\n self.trans['ൠ'] = 'rr'\n self.trans['ਜ਼'] = 'za'\n self.trans['੪'] = '4'\n self.trans['൰'] = '10'\n self.trans['୴'] = 'quarters'\n self.trans['ਅ'] = 'a'\n self.trans['ഋ'] = 'r'\n self.trans['ઊ'] = 'uu'\n self.trans['ଏ'] = 'e'\n self.trans['ਕ'] = 'ka'\n self.trans['ഛ'] = 'cha'\n self.trans['ચ'] = 'ca'\n self.trans['ଟ'] = 'tta'\n self.trans['ਥ'] = 'tha'\n self.trans['ഫ'] = 'pha'\n self.trans['પ'] = 'pa'\n self.trans['ଯ'] = 'ya'\n self.trans['ਵ'] = 'va'\n self.trans['ି'] = 'i'\n self.trans['ോ'] = 'oo'\n self.trans['ୟ'] = 'yya'\n self.trans['൫'] = '5'\n self.trans['૪'] = '4'\n self.trans['୯'] = '9'\n self.trans['ੵ'] = 'yakash'\n self.trans['ൻ'] = 'n'\n self.trans['ઃ'] = 'visarga'\n self.trans['ം'] = 'anusvara'\n self.trans['ਈ'] = 'ii'\n self.trans['ઓ'] = 'o'\n self.trans['ഒ'] = 'o'\n self.trans['ਘ'] = 'gha'\n self.trans['ଞ'] = 'nya'\n self.trans['ણ'] = 'nna'\n self.trans['ഢ'] = 'ddha'\n self.trans['ਲ਼'] = 'lla'\n self.trans['ਨ'] = 'na'\n self.trans['ମ'] = 'ma'\n self.trans['ળ'] = 'lla'\n self.trans['ല'] = 'la'\n self.trans['ਸ'] = 'sa'\n self.trans['¿'] = '?'\n self.trans['ା'] = 'aa'\n self.trans['ૃ'] = 'r'\n self.trans['ൂ'] = 'uu'\n self.trans['ੈ'] = 'ai'\n self.trans['ૣ'] = 'll'\n self.trans['ൢ'] = 'l'\n self.trans['੨'] = '2'\n self.trans['୮'] = '8'\n self.trans['൲'] = '1000'\n self.trans['ਃ'] = 'visarga'\n self.trans['ଉ'] = 'u'\n self.trans['ઈ'] = 'ii'\n self.trans['ਓ'] = 'oo'\n self.trans['ଙ'] = 'nga'\n self.trans['ઘ'] = 'gha'\n self.trans['ഝ'] = 'jha'\n self.trans['ਣ'] = 'nna'\n self.trans['ન'] = 'na'\n self.trans['ഭ'] = 'bha'\n self.trans['ଜ'] = 'ja'\n self.trans['ହ'] = 'ha'\n self.trans['સ'] = 'sa'\n self.trans['ഽ'] = 'avagraha'\n self.trans['ૈ'] = 'ai'\n self.trans['്'] = 'virama'\n self.trans['୩'] = '3'\n self.trans['૨'] = '2'\n self.trans['൭'] = '7'\n self.trans['ੳ'] = 'ura'\n self.trans['ൽ'] = 'l'\n self.trans['ઉ'] = 'u'\n self.trans['ଈ'] = 'ii'\n self.trans['ഌ'] = 'l'\n self.trans['ઙ'] = 'nga'\n self.trans['ଘ'] = 'gha'\n self.trans['ജ'] = 'ja'\n self.trans['ਞ'] = 'nya'\n self.trans['ନ'] = 'na'\n self.trans['ബ'] = 'ba'\n self.trans['ਮ'] = 'ma'\n self.trans['હ'] = 'ha'\n self.trans['ସ'] = 'sa'\n self.trans['ਾ'] = 'aa'\n self.trans['ૉ'] = 'o'\n self.trans['ୈ'] = 'ai'\n self.trans['ൌ'] = 'au'\n self.trans['૩'] = '3'\n self.trans['୨'] = '2'\n self.trans['൬'] = '6'\n self.trans['੮'] = '8'\n self.trans['ർ'] = 'rr'\n self.trans['ଃ'] = 'visarga'\n self.trans['ഇ'] = 'i'\n self.trans['ਉ'] = 'u'\n self.trans['ଓ'] = 'o'\n self.trans['ഗ'] = 'ga'\n self.trans['ਙ'] = 'nga'\n self.trans['ઞ'] = 'nya'\n self.trans['ଣ'] = 'nna'\n self.trans['ധ'] = 'dha'\n self.trans['મ'] = 'ma'\n self.trans['ଳ'] = 'lla'\n self.trans['ഷ'] = 'ssa'\n self.trans['ਹ'] = 'ha'\n self.trans['ਗ਼'] = 'ghha'\n self.trans['ા'] = 'aa'\n self.trans['ୃ'] = 'r'\n self.trans['േ'] = 'ee'\n self.trans['ൗ'] = 'mark'\n self.trans['ଢ଼'] = 'rha'\n self.trans['ୣ'] = 'll'\n self.trans['൧'] = '1'\n self.trans['੩'] = '3'\n self.trans['૮'] = '8'\n self.trans['୳'] = 'half'\n for char in self.trans:\n value = self.trans[char]\n if value == '?':\n continue\n while (value.encode(encoding, 'replace').decode(encoding) == '?'\n and value in self.trans):\n assert value != self.trans[value], \\\n '{!r} == self.trans[{!r}]!'.format(value, value)\n value = self.trans[value]\n self.trans[char] = value",
"def encode_str(encoding = 'utf-8'):\n return function(lambda value: value.encode(encoding) if isinstance(value, unicode) else value)",
"def reencode(s):\n return s.encode('ascii', 'xmlcharrefreplace').decode()",
"def convert(self, s):\r\n if self.input_codec <> self.output_codec:\r\n return unicode(s, self.input_codec).encode(self.output_codec)\r\n else:\r\n return s",
"def cencode(text):\n return _encode(text)[0]",
"def test_encoding_error(self):\n try:\n mark_safe(\"abcdefghijkl<p>mnὀp</p>qrstuwxyz\").encode(\"ascii\")\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n reporter = ExceptionReporter(None, exc_type, exc_value, tb)\n html = reporter.get_traceback_html()\n self.assertIn(\"<h2>Unicode error hint</h2>\", html)\n self.assertIn(\"The string that could not be encoded/decoded was: \", html)\n self.assertIn(\"<strong><p>mnὀp</p></strong>\", html)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode ampersands into & | def encode_ampersands(text):
text = re.sub('&(?!([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+);)', '&', text)
return text | [
"def fix_ampersands(qs):\n parts = []\n for p in qs.split('='):\n if p.count('&') > 1:\n l = p.split('&')\n last = l.pop()\n p = '%26'.join(l) + '&' + last\n parts.append(p)\n\n # an & in the last part definitely needs encoding\n parts[-1] = parts[-1].replace('&', '%26')\n\n return '='.join(parts)",
"def RefAmpersand(final_soup_href):\n if final_soup_href != None:\n final_soup_href = re.sub( r'\\&', r'&', final_soup_href)\n return final_soup_href",
"def encode_data(self, data):\r\n if data:\r\n data = urlencode(data)\r\n\r\n return data",
"def url_encode(text):\n return urllib.quote(text)",
"def escapeOnce(data):\n data = data.replace(\"&\", \"&\")\n\n #...but if it was already escaped, make sure it\n # is not done twice....this will turn any tags\n # back to how they were at the start.\n data = data.replace(\"&amp;\", \"&\")\n data = data.replace(\"&gt;\", \">\")\n data = data.replace(\"&lt;\", \"<\")\n data = data.replace(\"&#\", \"&#\")\n\n #..and just in case someone had double-escaped it, do it again\n data = data.replace(\"&amp;\", \"&\")\n data = data.replace(\"&gt;\", \">\")\n data = data.replace(\"&lt;\", \"<\")\n return data",
"def escapeForContent(data):\n if isinstance(data, unicode):\n data = data.encode('utf-8')\n data = data.replace(b'&', b'&'\n ).replace(b'<', b'<'\n ).replace(b'>', b'>')\n return data",
"def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&\")\r\n data = data.replace(\"<\", \"<\")\r\n data = data.replace(\">\", \">\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data",
"def percent_encode(data, safe=None):\n if data is None:\n return None\n if isinstance(data, (tuple, list, set)):\n return \"&\".join(\n percent_encode(value, safe=safe)\n for value in data\n )\n if isinstance(data, dict):\n return \"&\".join(\n key + \"=\" + percent_encode(value, safe=safe)\n for key, value in data.items()\n )\n return quote(bstr(data), safe or b\"\")",
"def encode_html( self, text):\n\t\thtml_escape_table = {\n\t\t\t\"&\": \"&\",\n\t\t\t'\"': \""\",\n\t\t\t\"'\": \"'\",\n\t\t\t\">\": \">\",\n\t\t\t\"<\": \"<\",\n\t\t\t}\n\t\t\n\t\tdef html_escape(text):\n\t\t\t\"\"\"Produce entities within text.\"\"\"\n\t\t\tL=[]\n\t\t\tfor c in text:\n\t\t\t\tL.append(html_escape_table.get(c,c))\n\t\t\treturn \"\".join(L)\n\n\t\treturn html_escape( text )",
"def encodeString():\n pass",
"def escape(t):\n return (t\n .replace(\""\", '@quot;')\n .replace(\"&\", \"@amp;\").replace(\"<\", \"@lt;\").replace(\">\", \"@gt;\")\n\n .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\n .replace(\"'\", \"'\").replace('\"', \""\")\n .replace(\"\\\\\", \"\\")\n\n .replace(\"@quot;\", '"')\n .replace(\"@amp;\", \"&\").replace(\"@lt;\", \"<\").replace(\"@gt;\", \">\")\n\n )",
"def _params(self, params):\r\n return urllib.urlencode(params)",
"def _encode_query(items: dict, *, mask=False) -> str:\n pairs = []\n for key in sorted(items.keys()):\n value = _MASK if mask and key in _MASKED_PARAMS else items[key]\n item = \"{}={}\".format(key, _quote(value))\n # Ensure 'url' goes last per CLI spec\n if key == \"url\":\n pairs.append(item)\n else:\n pairs.insert(0, item)\n return \"&\".join(pairs)",
"def _qs_encode(params, sep=\"&\"):\n\n pairs = []\n for (k, v) in params.items():\n pairs.append(_urlencode(k) + \"=\" + _urlencode(v))\n pairs.sort()\n return sep.join(pairs)",
"def html_encode_django_chars(txt):\n txt = txt.replace(\"{\", \"{\")\n txt = txt.replace(\"}\", \"}\")\n txt = txt.replace(\"%\", \"%\")\n return txt",
"def _unicodeurlencode(self, params):\n if isinstance(params, dict):\n params = params.items()\n return utils.web.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params])",
"def test_URLEncode_partial_encoded_input():\n res = main({'value': 'https%3A//www.google.com/url@to@encode'})\n assert res == 'https%3A//www.google.com/url%40to%40encode'",
"def uri_encode(uri:str) -> str:\n letters = ['%' + hex(ord(c))[-2:] if c in _uri_tohex else c for c in uri]\n return ''.join(letters)",
"def urlEncode(self, data):\n # type: (Union[str, bytearray]) -> Union[str,bytearray]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode codec that converts Unicode characters into named entities (where the names are known), or failing that, numerical entities. | def named_entities_codec(text):
if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):
s = []
for c in text.object[text.start:text.end]:
if ord(c) in codepoint2name:
s.append('&{};'.format(codepoint2name[ord(c)]))
else:
s.append('&#{};'.format(ord(c)))
return ''.join(s), text.end
else:
raise TypeError("Can't handle {}".format(text.__name__)) | [
"def named_entities_codec(text):\r\n \r\n if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):\r\n s = []\r\n for c in text.object[text.start:text.end]:\r\n if ord(c) in codepoint2name:\r\n s.append(u'&%s;' % codepoint2name[ord(c)])\r\n else:\r\n s.append(u'&#%s;' % ord(c))\r\n return ''.join(s), text.end\r\n else:\r\n raise TypeError(\"Can't handle %s\" % text.__name__)",
"def _unicode_encode(self, value):\n splits = self.high_codepoints_re.split(value)\n enc_value = b''\n str_len = 0\n for s in splits:\n if self.high_codepoints_re.match(s):\n str_len += 2\n enc_value += self._encode_to_surrogate_pair(s)\n else:\n str_len += len(s)\n enc_value += s.encode('utf-8')\n return str_len, enc_value",
"def encodeString():\n pass",
"def encode_character(self, immune, char):\r\n \r\n # Check for immune\r\n if char in immune:\r\n return char\r\n \r\n ord_char = ord(char)\r\n \r\n # Only look at 8-bit \r\n if not codec.is_8bit(ord_char):\r\n return char\r\n \r\n # Pass alphanumerics\r\n if char.isalnum(): \r\n return char\r\n \r\n # Check for illegal characters\r\n if (codec.is_control_char(ord_char) and \r\n char != \"\\t\" and\r\n char != \"\\n\" and\r\n char != \"\\r\"):\r\n return \" \"\r\n \r\n # Check if there's a defined entity\r\n entity_name = self.entity_values_to_names.get(ord_char, None)\r\n if entity_name is not None:\r\n return \"&\" + entity_name + \";\"\r\n \r\n # Return the hex entity as suggested in the spec\r\n hex_str = codec.get_hex_for_char(ord_char).lower()\r\n return \"&#x\" + hex_str + \";\"",
"def unicode_from_unknown(s) :\r\n try :\r\n return unicode(s)\r\n except :\r\n pass\r\n\r\n return coerce_to_ascii(s)",
"def convert_special_chars_to_eng(name):\n return name.translate(SPECIAL_TO_ENG)",
"def unicode_encode_error():\n try:\n '€'.encode('ascii')\n except UnicodeEncodeError:\n return \"can't encode this character to ascii\"",
"def norwegian_ascii(unicode_str):\n unicode_str = re.sub(r\"ø\", \"oe\", unicode_str, flags=re.IGNORECASE)\n unicode_str = re.sub(r\"æ\", \"ae\", unicode_str, flags=re.IGNORECASE)\n unicode_str = re.sub(r\"å\", \"aa\", unicode_str, flags=re.IGNORECASE)\n return unicode_str.encode(\"ascii\", \"ignore\").decode()",
"def make_unicode(text):\n return text.encode( encoding=\"utf-8\")",
"def sanitize(w):\n\n # print w\n\n map = {'æ': 'ae',\n 'ø': 'o',\n '¨': 'o',\n 'ß': 'ss',\n 'Ø': 'o',\n '\\xef\\xac\\x80': 'ff',\n '\\xef\\xac\\x81': 'fi',\n '\\xef\\xac\\x82': 'fl'}\n\n # This replaces funny chars in map\n for char, replace_char in map.items():\n w = re.sub(char, replace_char, w)\n\n # w = unicode(w, encoding='latin-1')\n # w = str(w, encoding=\"utf-8\")\n\n # This gets rite of accents\n w = ''.join((c for c in unicodedata.normalize('NFD', w) if unicodedata.category(c) != 'Mn'))\n\n return w",
"def name_that_character(c, encoding):\n try:\n uchr = decode(c, encoding)\n except UnicodeDecodeError:\n return None\n return unicodedata.name(uchr, None)",
"def encode_filename(filename):\n if isinstance(filename, unicode):\n if os.path.supports_unicode_filenames:\n return filename\n else:\n return filename.encode(_io_encoding, 'replace')\n else:\n return filename",
"def unicode_error():\n try:\n 'é'.encode('latin-1').decode('ascii')\n except UnicodeError:\n return \"can't encode or decode\"",
"def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ':\n self.trans[char] = 'A'\n for char in 'ȀǞ':\n self.trans[char] = 'Ä'\n self.trans['Ǻ'] = 'Å'\n self.trans['Ä'] = 'Ae'\n self.trans['Å'] = 'Aa'\n for char in 'àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ':\n self.trans[char] = 'a'\n for char in 'ȁǟ':\n self.trans[char] = 'ä'\n self.trans['ǻ'] = 'å'\n self.trans['ä'] = 'ae'\n self.trans['å'] = 'aa'\n for char in 'ḂḄḆƁƂ':\n self.trans[char] = 'B'\n for char in 'ḃḅḇƀɓƃ':\n self.trans[char] = 'b'\n for char in 'ĆĈĊÇČƇ':\n self.trans[char] = 'C'\n for char in 'ćĉċçčƈȼ':\n self.trans[char] = 'c'\n self.trans['Ḉ'] = 'Ç'\n self.trans['ḉ'] = 'ç'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ĎḊḌḎḐḒĐƉƊƋ':\n self.trans[char] = 'D'\n for char in 'ďḋḍḏḑḓđɖɗƌ':\n self.trans[char] = 'd'\n for char in 'ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ':\n self.trans[char] = 'E'\n for char in 'ỀẾỄỆỂ':\n self.trans[char] = 'Ê'\n for char in 'èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ':\n self.trans[char] = 'e'\n for char in 'ềếễệể':\n self.trans[char] = 'ê'\n for char in 'ḞƑ':\n self.trans[char] = 'F'\n for char in 'ḟƒ':\n self.trans[char] = 'f'\n for char in 'ǴḠĞĠĢǦǤƓ':\n self.trans[char] = 'G'\n for char in 'ǵḡğġģǧǥɠ':\n self.trans[char] = 'g'\n self.trans['Ĝ'] = 'Gx'\n self.trans['ĝ'] = 'gx'\n for char in 'ḢḤḦȞḨḪH̱ĦǶ':\n self.trans[char] = 'H'\n for char in 'ḣḥḧȟḩḫ̱ẖħƕ':\n self.trans[char] = 'h'\n for char in 'IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ':\n self.trans[char] = 'I'\n for char in 'ıìȉíîĩḭïḯīĭȋįǐiịỉɨ':\n self.trans[char] = 'i'\n for char in 'ĴJ':\n self.trans[char] = 'J'\n for char in 'ɟĵ̌ǰ':\n self.trans[char] = 'j'\n for char in 'ḰǨĶḲḴƘ':\n self.trans[char] = 'K'\n for char in 'ḱǩķḳḵƙ':\n self.trans[char] = 'k'\n for char in 'ĹĻĽḶḸḺḼȽŁ':\n self.trans[char] = 'L'\n for char in 'ĺļľḷḹḻḽƚłɫ':\n self.trans[char] = 'l'\n for char in 'ḾṀṂ':\n self.trans[char] = 'M'\n for char in 'ḿṁṃɱ':\n self.trans[char] = 'm'\n for char in 'ǸŃÑŅŇṄṆṈṊŊƝɲȠ':\n self.trans[char] = 'N'\n for char in 'ǹńñņňṅṇṉṋŋɲƞ':\n self.trans[char] = 'n'\n for char in 'ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ':\n self.trans[char] = 'O'\n for char in 'òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ':\n self.trans[char] = 'o'\n for char in 'ȌŐȪ':\n self.trans[char] = 'Ö'\n for char in 'ȍőȫ':\n self.trans[char] = 'ö'\n for char in 'ỒỐỖỘỔȎ':\n self.trans[char] = 'Ô'\n for char in 'ồốỗộổȏ':\n self.trans[char] = 'ô'\n for char in 'ṔṖƤ':\n self.trans[char] = 'P'\n for char in 'ṕṗƥ':\n self.trans[char] = 'p'\n self.trans['ᵽ'] = 'q'\n for char in 'ȐŔŖŘȒṘṚṜṞ':\n self.trans[char] = 'R'\n for char in 'ȑŕŗřȓṙṛṝṟɽ':\n self.trans[char] = 'r'\n for char in 'ŚṤŞȘŠṦṠṢṨ':\n self.trans[char] = 'S'\n for char in 'śṥşșšṧṡṣṩȿ':\n self.trans[char] = 's'\n self.trans['Ŝ'] = 'Sx'\n self.trans['ŝ'] = 'sx'\n for char in 'ŢȚŤṪṬṮṰŦƬƮ':\n self.trans[char] = 'T'\n for char in 'ţțťṫṭṯṱŧȾƭʈ':\n self.trans[char] = 't'\n for char in 'ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ':\n self.trans[char] = 'U'\n for char in 'ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ':\n self.trans[char] = 'u'\n for char in 'ȔŰǛǗǕǙ':\n self.trans[char] = 'Ü'\n for char in 'ȕűǜǘǖǚ':\n self.trans[char] = 'ü'\n self.trans['Û'] = 'Ux'\n self.trans['û'] = 'ux'\n self.trans['Ȗ'] = 'Û'\n self.trans['ȗ'] = 'û'\n self.trans['Ừ'] = 'Ù'\n self.trans['ừ'] = 'ù'\n self.trans['Ứ'] = 'Ú'\n self.trans['ứ'] = 'ú'\n for char in 'ṼṾ':\n self.trans[char] = 'V'\n for char in 'ṽṿ':\n self.trans[char] = 'v'\n for char in 'ẀẂŴẄẆẈ':\n self.trans[char] = 'W'\n for char in 'ẁẃŵẅẇẉ':\n self.trans[char] = 'w'\n for char in 'ẊẌ':\n self.trans[char] = 'X'\n for char in 'ẋẍ':\n self.trans[char] = 'x'\n for char in 'ỲÝŶŸỸȲẎỴỶƳ':\n self.trans[char] = 'Y'\n for char in 'ỳýŷÿỹȳẏỵỷƴ':\n self.trans[char] = 'y'\n for char in 'ŹẐŻẒŽẔƵȤ':\n self.trans[char] = 'Z'\n for char in 'źẑżẓžẕƶȥ':\n self.trans[char] = 'z'\n self.trans['ɀ'] = 'zv'\n\n # Latin: extended Latin alphabet\n self.trans['ɑ'] = 'a'\n for char in 'ÆǼǢ':\n self.trans[char] = 'AE'\n for char in 'æǽǣ':\n self.trans[char] = 'ae'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ƎƏƐ':\n self.trans[char] = 'E'\n for char in 'ǝəɛ':\n self.trans[char] = 'e'\n for char in 'ƔƢ':\n self.trans[char] = 'G'\n for char in 'ᵷɣƣᵹ':\n self.trans[char] = 'g'\n self.trans['Ƅ'] = 'H'\n self.trans['ƅ'] = 'h'\n self.trans['Ƕ'] = 'Wh'\n self.trans['ƕ'] = 'wh'\n self.trans['Ɩ'] = 'I'\n self.trans['ɩ'] = 'i'\n self.trans['Ŋ'] = 'Ng'\n self.trans['ŋ'] = 'ng'\n self.trans['Œ'] = 'OE'\n self.trans['œ'] = 'oe'\n self.trans['Ɔ'] = 'O'\n self.trans['ɔ'] = 'o'\n self.trans['Ȣ'] = 'Ou'\n self.trans['ȣ'] = 'ou'\n self.trans['Ƽ'] = 'Q'\n for char in 'ĸƽ':\n self.trans[char] = 'q'\n self.trans['ȹ'] = 'qp'\n self.trans[''] = 'r'\n self.trans['ſ'] = 's'\n self.trans['ß'] = 'ss'\n self.trans['Ʃ'] = 'Sh'\n for char in 'ʃᶋ':\n self.trans[char] = 'sh'\n self.trans['Ʉ'] = 'U'\n self.trans['ʉ'] = 'u'\n self.trans['Ʌ'] = 'V'\n self.trans['ʌ'] = 'v'\n for char in 'ƜǷ':\n self.trans[char] = 'W'\n for char in 'ɯƿ':\n self.trans[char] = 'w'\n self.trans['Ȝ'] = 'Y'\n self.trans['ȝ'] = 'y'\n self.trans['IJ'] = 'IJ'\n self.trans['ij'] = 'ij'\n self.trans['Ƨ'] = 'Z'\n for char in 'ʮƨ':\n self.trans[char] = 'z'\n self.trans['Ʒ'] = 'Zh'\n self.trans['ʒ'] = 'zh'\n self.trans['Ǯ'] = 'Dzh'\n self.trans['ǯ'] = 'dzh'\n for char in 'ƸƹʔˀɁɂ':\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in 'Cʗǃ':\n self.trans[char] = '!'\n\n # Punctuation and typography\n for char in '«»“”„¨':\n self.trans[char] = u'\"'\n for char in '‘’′':\n self.trans[char] = u\"'\"\n self.trans['•'] = '*'\n self.trans['@'] = '(at)'\n self.trans['¤'] = '$'\n self.trans['¢'] = 'c'\n self.trans['€'] = 'E'\n self.trans['£'] = 'L'\n self.trans['¥'] = 'yen'\n self.trans['†'] = '+'\n self.trans['‡'] = '++'\n self.trans['°'] = ':'\n self.trans['¡'] = '!'\n self.trans['¿'] = '?'\n self.trans['‰'] = 'o/oo'\n self.trans['‱'] = 'o/ooo'\n for char in '¶§':\n self.trans[char] = '>'\n self.trans['…'] = '...'\n for char in '‒–—―':\n self.trans[char] = '-'\n self.trans['·'] = ' '\n self.trans['¦'] = '|'\n self.trans['⁂'] = '***'\n self.trans['◊'] = '<>'\n self.trans['‽'] = '?!'\n self.trans['؟'] = ';-)'\n self.trans['¹'] = '1'\n self.trans['²'] = '2'\n self.trans['³'] = '3'\n\n # Cyrillic\n self.trans.update({'А': 'A', 'а': 'a', 'Б': 'B', 'б': 'b',\n 'В': 'V', 'в': 'v', 'Г': 'G', 'г': 'g',\n 'Д': 'D', 'д': 'd', 'Е': 'E', 'е': 'e',\n 'Ж': 'Zh', 'ж': 'zh', 'З': 'Z', 'з': 'z',\n 'И': 'I', 'и': 'i', 'Й': 'J', 'й': 'j',\n 'К': 'K', 'к': 'k', 'Л': 'L', 'л': 'l',\n 'М': 'M', 'м': 'm', 'Н': 'N', 'н': 'n',\n 'О': 'O', 'о': 'o', 'П': 'P', 'п': 'p',\n 'Р': 'R', 'р': 'r', 'С': 'S', 'с': 's',\n 'Т': 'T', 'т': 't', 'У': 'U', 'у': 'u',\n 'Ф': 'F', 'ф': 'f', 'х': 'kh', 'Ц': 'C',\n 'ц': 'c', 'Ч': 'Ch', 'ч': 'ch', 'Ш': 'Sh',\n 'ш': 'sh', 'Щ': 'Shch', 'щ': 'shch', 'Ь': \"'\",\n 'ь': \"'\", 'Ъ': '\"', 'ъ': '\"', 'Ю': 'Yu',\n 'ю': 'yu', 'Я': 'Ya', 'я': 'ya', 'Х': 'Kh',\n 'Χ': 'Kh'})\n\n # Additional Cyrillic letters, most occuring in only a few languages\n self.trans.update({\n 'Ы': 'Y', 'ы': 'y', 'Ё': 'Ë', 'ё': 'ë',\n 'Э': 'È', 'Ѐ': 'È', 'э': 'è', 'ѐ': 'è',\n 'І': 'I', 'і': 'i', 'Ї': 'Ji', 'ї': 'ji',\n 'Є': 'Je', 'є': 'je', 'Ґ': 'G', 'Ҝ': 'G',\n 'ґ': 'g', 'ҝ': 'g', 'Ђ': 'Dj', 'ђ': 'dj',\n 'Љ': 'Lj', 'љ': 'lj',\n 'Њ': 'Nj', 'њ': 'nj', 'Ћ': 'Cj', 'ћ': 'cj',\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n 'Ќ': 'Kj', 'ќ': 'kj', 'Ӣ': 'Ii', 'ӣ': 'ii',\n 'Ҳ': 'H', 'ҳ': 'h',\n 'Ҷ': 'Dz', 'ҷ': 'dz', 'Ө': 'Ô', 'Ӫ': 'Ô',\n 'ө': 'ô', 'ӫ': 'ô', 'Ү': 'Y', 'ү': 'y', 'Һ': 'H',\n 'һ': 'h', 'Ә': 'AE', 'Ӕ': 'AE', 'ә': 'ae',\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n 'ѝ': 'ì', 'Ѝ': 'Ì', 'Ӑ': 'A', 'ă': 'a', 'Ӓ': 'Ä',\n 'Ҽ': 'Ts', 'Ҿ': 'Ts', 'ҽ': 'ts', 'ҿ': 'ts',\n 'Ҙ': 'Dh', 'ҙ': 'dh', 'Ӏ': '', 'ӏ': '', 'Ӆ': 'L',\n 'ӆ': 'l', 'Ӎ': 'M', 'ӎ': 'm', 'Ӧ': 'Ö', 'ӧ': 'ö',\n 'Ҩ': 'u', 'ҩ': 'u', 'Ҧ': 'Ph', 'ҧ': 'ph', 'Ҏ': 'R',\n 'ҏ': 'r', 'Ҫ': 'Th', 'ҫ': 'th', 'Ҭ': 'T', 'ҭ': 't',\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n 'ӹ': 'u', 'Ҵ': 'Tts', 'ҵ': 'tts', 'Ӵ': 'Ch', 'ӵ': 'ch'})\n\n for char in 'ЈӤҊ':\n self.trans[char] = 'J'\n for char in 'јӥҋ':\n self.trans[char] = 'j'\n for char in 'ЏӁӜҶ':\n self.trans[char] = 'Dzh'\n for char in 'џӂӝҷ':\n self.trans[char] = 'dzh'\n for char in 'ЅӞӠӋҸ':\n self.trans[char] = 'Dz'\n for char in 'ѕӟӡӌҹ':\n self.trans[char] = 'dz'\n for char in 'ҒӶҔ':\n self.trans[char] = 'G'\n for char in 'ғӷҕ':\n self.trans[char] = 'g'\n for char in 'ҚҞҠӃ':\n self.trans[char] = 'Q'\n for char in 'қҟҡӄ':\n self.trans[char] = 'q'\n for char in 'ҢҤӉӇ':\n self.trans[char] = 'Ng'\n for char in 'ңҥӊӈ':\n self.trans[char] = 'ng'\n for char in 'ӖѢҌ':\n self.trans[char] = 'E'\n for char in 'ӗѣҍ':\n self.trans[char] = 'e'\n for char in 'ӲӰҮ':\n self.trans[char] = 'Ü'\n for char in 'ӳӱү':\n self.trans[char] = 'ü'\n\n # Archaic Cyrillic letters\n self.trans.update({\n 'Ѹ': 'Ou', 'ѹ': 'ou', 'Ѡ': 'O', 'Ѻ': 'O', 'ѡ': 'o',\n 'ѻ': 'o', 'Ѿ': 'Ot', 'ѿ': 'ot', 'Ѣ': 'E', 'ѣ': 'e',\n 'Ѥ': 'Ei', 'Ѧ': 'Ei', 'ѥ': 'ei', 'ѧ': 'ei', 'Ѫ': 'Ai',\n 'ѫ': 'ai', 'Ѯ': 'X', 'ѯ': 'x', 'Ѱ': 'Ps', 'ѱ': 'ps',\n 'Ѳ': 'Th', 'ѳ': 'th', 'Ѵ': 'Ü', 'Ѷ': 'Ü', 'ѵ': 'ü'})\n\n # Hebrew alphabet\n for char in 'אע':\n self.trans[char] = u\"'\"\n self.trans['ב'] = 'b'\n self.trans['ג'] = 'g'\n self.trans['ד'] = 'd'\n self.trans['ה'] = 'h'\n self.trans['ו'] = 'v'\n self.trans['ז'] = 'z'\n self.trans['ח'] = 'kh'\n self.trans['ט'] = 't'\n self.trans['י'] = 'y'\n for char in 'ךכ':\n self.trans[char] = 'k'\n self.trans['ל'] = 'l'\n for char in 'םמ':\n self.trans[char] = 'm'\n for char in 'ןנ':\n self.trans[char] = 'n'\n self.trans['ס'] = 's'\n for char in 'ףפ':\n self.trans[char] = 'ph'\n for char in 'ץצ':\n self.trans[char] = 'ts'\n self.trans['ק'] = 'q'\n self.trans['ר'] = 'r'\n self.trans['ש'] = 'sh'\n self.trans['ת'] = 'th'\n\n # Arab alphabet\n for char in 'اﺍﺎ':\n self.trans[char] = 'a'\n for char in 'بﺏﺐﺒﺑ':\n self.trans[char] = 'b'\n for char in 'تﺕﺖﺘﺗ':\n self.trans[char] = 't'\n for char in 'ثﺙﺚﺜﺛ':\n self.trans[char] = 'th'\n for char in 'جﺝﺞﺠﺟ':\n self.trans[char] = 'g'\n for char in 'حﺡﺢﺤﺣ':\n self.trans[char] = 'h'\n for char in 'خﺥﺦﺨﺧ':\n self.trans[char] = 'kh'\n for char in 'دﺩﺪ':\n self.trans[char] = 'd'\n for char in 'ذﺫﺬ':\n self.trans[char] = 'dh'\n for char in 'رﺭﺮ':\n self.trans[char] = 'r'\n for char in 'زﺯﺰ':\n self.trans[char] = 'z'\n for char in 'سﺱﺲﺴﺳ':\n self.trans[char] = 's'\n for char in 'شﺵﺶﺸﺷ':\n self.trans[char] = 'sh'\n for char in 'صﺹﺺﺼﺻ':\n self.trans[char] = 's'\n for char in 'ضﺽﺾﻀﺿ':\n self.trans[char] = 'd'\n for char in 'طﻁﻂﻄﻃ':\n self.trans[char] = 't'\n for char in 'ظﻅﻆﻈﻇ':\n self.trans[char] = 'z'\n for char in 'عﻉﻊﻌﻋ':\n self.trans[char] = u\"'\"\n for char in 'غﻍﻎﻐﻏ':\n self.trans[char] = 'gh'\n for char in 'فﻑﻒﻔﻓ':\n self.trans[char] = 'f'\n for char in 'قﻕﻖﻘﻗ':\n self.trans[char] = 'q'\n for char in 'كﻙﻚﻜﻛک':\n self.trans[char] = 'k'\n for char in 'لﻝﻞﻠﻟ':\n self.trans[char] = 'l'\n for char in 'مﻡﻢﻤﻣ':\n self.trans[char] = 'm'\n for char in 'نﻥﻦﻨﻧ':\n self.trans[char] = 'n'\n for char in 'هﻩﻪﻬﻫ':\n self.trans[char] = 'h'\n for char in 'وﻭﻮ':\n self.trans[char] = 'w'\n for char in 'یيﻱﻲﻴﻳ':\n self.trans[char] = 'y'\n # Arabic - additional letters, modified letters and ligatures\n self.trans['ﺀ'] = \"'\"\n for char in 'آﺁﺂ':\n self.trans[char] = u\"'a\"\n for char in 'ةﺓﺔ':\n self.trans[char] = 'th'\n for char in 'ىﻯﻰ':\n self.trans[char] = 'á'\n for char in 'یﯼﯽﯿﯾ':\n self.trans[char] = 'y'\n self.trans['؟'] = '?'\n # Arabic - ligatures\n for char in 'ﻻﻼ':\n self.trans[char] = 'la'\n self.trans['ﷲ'] = 'llah'\n for char in 'إأ':\n self.trans[char] = u\"a'\"\n self.trans['ؤ'] = \"w'\"\n self.trans['ئ'] = \"y'\"\n for char in '◌◌':\n self.trans[char] = \"\" # indicates absence of vowels\n # Arabic vowels\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'i'\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'iy'\n # Arab numerals\n for char in '٠۰':\n self.trans[char] = '0'\n for char in '١۱':\n self.trans[char] = '1'\n for char in '٢۲':\n self.trans[char] = '2'\n for char in '٣۳':\n self.trans[char] = '3'\n for char in '٤۴':\n self.trans[char] = '4'\n for char in '٥۵':\n self.trans[char] = '5'\n for char in '٦۶':\n self.trans[char] = '6'\n for char in '٧۷':\n self.trans[char] = '7'\n for char in '٨۸':\n self.trans[char] = '8'\n for char in '٩۹':\n self.trans[char] = '9'\n # Perso-Arabic\n for char in 'پﭙﭙپ':\n self.trans[char] = 'p'\n for char in 'چچچچ':\n self.trans[char] = 'ch'\n for char in 'ژژ':\n self.trans[char] = 'zh'\n for char in 'گﮔﮕﮓ':\n self.trans[char] = 'g'\n\n # Greek\n self.trans.update({\n 'Α': 'A', 'α': 'a', 'Β': 'B', 'β': 'b', 'Γ': 'G',\n 'γ': 'g', 'Δ': 'D', 'δ': 'd', 'Ε': 'E', 'ε': 'e',\n 'Ζ': 'Z', 'ζ': 'z', 'Η': 'I', 'η': 'i', 'θ': 'th',\n 'Θ': 'Th', 'Ι': 'I', 'ι': 'i', 'Κ': 'K', 'κ': 'k',\n 'Λ': 'L', 'λ': 'l', 'Μ': 'M', 'μ': 'm', 'Ν': 'N',\n 'ν': 'n', 'Ξ': 'X', 'ξ': 'x', 'Ο': 'O', 'ο': 'o',\n 'Π': 'P', 'π': 'p', 'Ρ': 'R', 'ρ': 'r', 'Σ': 'S',\n 'σ': 's', 'ς': 's', 'Τ': 'T', 'τ': 't', 'Υ': 'Y',\n 'υ': 'y', 'Φ': 'F', 'φ': 'f', 'Ψ': 'Ps', 'ψ': 'ps',\n 'Ω': 'O', 'ω': 'o', 'ϗ': '&', 'Ϛ': 'St', 'ϛ': 'st',\n 'Ϙ': 'Q', 'Ϟ': 'Q', 'ϙ': 'q', 'ϟ': 'q', 'Ϻ': 'S',\n 'ϻ': 's', 'Ϡ': 'Ss', 'ϡ': 'ss', 'Ϸ': 'Sh', 'ϸ': 'sh',\n '·': ':', 'Ά': 'Á', 'ά': 'á', 'Έ': 'É', 'Ή': 'É',\n 'έ': 'é', 'ή': 'é', 'Ί': 'Í', 'ί': 'í', 'Ϊ': 'Ï',\n 'ϊ': 'ï', 'ΐ': 'ï', 'Ό': 'Ó', 'ό': 'ó', 'Ύ': 'Ý',\n 'ύ': 'ý', 'Ϋ': 'Y', 'ϋ': 'ÿ', 'ΰ': 'ÿ', 'Ώ': 'Ó',\n 'ώ': 'ó'})\n\n # Japanese (katakana and hiragana)\n for char in 'アァあ':\n self.trans[char] = 'a'\n for char in 'イィい':\n self.trans[char] = 'i'\n for char in 'ウう':\n self.trans[char] = 'u'\n for char in 'エェえ':\n self.trans[char] = 'e'\n for char in 'オォお':\n self.trans[char] = 'o'\n for char in 'ャや':\n self.trans[char] = 'ya'\n for char in 'ュゆ':\n self.trans[char] = 'yu'\n for char in 'ョよ':\n self.trans[char] = 'yo'\n for char in 'カか':\n self.trans[char] = 'ka'\n for char in 'キき':\n self.trans[char] = 'ki'\n for char in 'クく':\n self.trans[char] = 'ku'\n for char in 'ケけ':\n self.trans[char] = 'ke'\n for char in 'コこ':\n self.trans[char] = 'ko'\n for char in 'サさ':\n self.trans[char] = 'sa'\n for char in 'シし':\n self.trans[char] = 'shi'\n for char in 'スす':\n self.trans[char] = 'su'\n for char in 'セせ':\n self.trans[char] = 'se'\n for char in 'ソそ':\n self.trans[char] = 'so'\n for char in 'タた':\n self.trans[char] = 'ta'\n for char in 'チち':\n self.trans[char] = 'chi'\n for char in 'ツつ':\n self.trans[char] = 'tsu'\n for char in 'テて':\n self.trans[char] = 'te'\n for char in 'トと':\n self.trans[char] = 'to'\n for char in 'ナな':\n self.trans[char] = 'na'\n for char in 'ニに':\n self.trans[char] = 'ni'\n for char in 'ヌぬ':\n self.trans[char] = 'nu'\n for char in 'ネね':\n self.trans[char] = 'ne'\n for char in 'ノの':\n self.trans[char] = 'no'\n for char in 'ハは':\n self.trans[char] = 'ha'\n for char in 'ヒひ':\n self.trans[char] = 'hi'\n for char in 'フふ':\n self.trans[char] = 'fu'\n for char in 'ヘへ':\n self.trans[char] = 'he'\n for char in 'ホほ':\n self.trans[char] = 'ho'\n for char in 'マま':\n self.trans[char] = 'ma'\n for char in 'ミみ':\n self.trans[char] = 'mi'\n for char in 'ムむ':\n self.trans[char] = 'mu'\n for char in 'メめ':\n self.trans[char] = 'me'\n for char in 'モも':\n self.trans[char] = 'mo'\n for char in 'ラら':\n self.trans[char] = 'ra'\n for char in 'リり':\n self.trans[char] = 'ri'\n for char in 'ルる':\n self.trans[char] = 'ru'\n for char in 'レれ':\n self.trans[char] = 're'\n for char in 'ロろ':\n self.trans[char] = 'ro'\n for char in 'ワわ':\n self.trans[char] = 'wa'\n for char in 'ヰゐ':\n self.trans[char] = 'wi'\n for char in 'ヱゑ':\n self.trans[char] = 'we'\n for char in 'ヲを':\n self.trans[char] = 'wo'\n for char in 'ンん':\n self.trans[char] = 'n'\n for char in 'ガが':\n self.trans[char] = 'ga'\n for char in 'ギぎ':\n self.trans[char] = 'gi'\n for char in 'グぐ':\n self.trans[char] = 'gu'\n for char in 'ゲげ':\n self.trans[char] = 'ge'\n for char in 'ゴご':\n self.trans[char] = 'go'\n for char in 'ザざ':\n self.trans[char] = 'za'\n for char in 'ジじ':\n self.trans[char] = 'ji'\n for char in 'ズず':\n self.trans[char] = 'zu'\n for char in 'ゼぜ':\n self.trans[char] = 'ze'\n for char in 'ゾぞ':\n self.trans[char] = 'zo'\n for char in 'ダだ':\n self.trans[char] = 'da'\n for char in 'ヂぢ':\n self.trans[char] = 'dji'\n for char in 'ヅづ':\n self.trans[char] = 'dzu'\n for char in 'デで':\n self.trans[char] = 'de'\n for char in 'ドど':\n self.trans[char] = 'do'\n for char in 'バば':\n self.trans[char] = 'ba'\n for char in 'ビび':\n self.trans[char] = 'bi'\n for char in 'ブぶ':\n self.trans[char] = 'bu'\n for char in 'ベべ':\n self.trans[char] = 'be'\n for char in 'ボぼ':\n self.trans[char] = 'bo'\n for char in 'パぱ':\n self.trans[char] = 'pa'\n for char in 'ピぴ':\n self.trans[char] = 'pi'\n for char in 'プぷ':\n self.trans[char] = 'pu'\n for char in 'ペぺ':\n self.trans[char] = 'pe'\n for char in 'ポぽ':\n self.trans[char] = 'po'\n for char in 'ヴゔ':\n self.trans[char] = 'vu'\n self.trans['ヷ'] = 'va'\n self.trans['ヸ'] = 'vi'\n self.trans['ヹ'] = 've'\n self.trans['ヺ'] = 'vo'\n\n # Japanese and Chinese punctuation and typography\n for char in '・·':\n self.trans[char] = ' '\n for char in '〃『』《》':\n self.trans[char] = u'\"'\n for char in '「」〈〉〘〙〚〛':\n self.trans[char] = u\"'\"\n for char in '(〔':\n self.trans[char] = '('\n for char in ')〕':\n self.trans[char] = ')'\n for char in '[【〖':\n self.trans[char] = '['\n for char in ']】〗':\n self.trans[char] = ']'\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in '•◦':\n self.trans[char] = '_'\n for char in '※*':\n self.trans[char] = '*'\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in ',、':\n self.trans[char] = ','\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in 'ეჱ':\n self.trans[char] = 'e'\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in 'ყ':\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in 'წ':\n self.trans[char] = u\"ts'\"\n for char in 'ჭ':\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in 'पप':\n self.trans[char] = 'p'\n self.trans['अ'] = 'a'\n for char in 'आा':\n self.trans[char] = 'aa'\n self.trans['प'] = 'pa'\n for char in 'इि':\n self.trans[char] = 'i'\n for char in 'ईी':\n self.trans[char] = 'ii'\n for char in 'उु':\n self.trans[char] = 'u'\n for char in 'ऊू':\n self.trans[char] = 'uu'\n for char in 'एे':\n self.trans[char] = 'e'\n for char in 'ऐै':\n self.trans[char] = 'ai'\n for char in 'ओो':\n self.trans[char] = 'o'\n for char in 'औौ':\n self.trans[char] = 'au'\n for char in 'ऋृर':\n self.trans[char] = 'r'\n for char in 'ॠॄ':\n self.trans[char] = 'rr'\n for char in 'ऌॢल':\n self.trans[char] = 'l'\n for char in 'ॡॣ':\n self.trans[char] = 'll'\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in 'टत':\n self.trans[char] = 't'\n for char in 'ठथ':\n self.trans[char] = 'th'\n for char in 'डद':\n self.trans[char] = 'd'\n for char in 'ढध':\n self.trans[char] = 'dh'\n for char in 'णन':\n self.trans[char] = 'n'\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in 'षस':\n self.trans[char] = 's'\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in 'क़':\n self.trans[char] = 'q'\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in 'डढ':\n self.trans[char] = 'r'\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in 'ख्':\n self.trans[char] = 'khn'\n self.trans['त'] = 'tn'\n for char in 'द्':\n self.trans[char] = 'dn'\n self.trans['श'] = 'cn'\n for char in 'ह्':\n self.trans[char] = 'fn'\n for char in 'अँ':\n self.trans[char] = 'm'\n for char in '॒॑':\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in 'Տ':\n self.trans[char] = u\"T'\"\n for char in 'տ':\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in 'க்':\n self.trans[char] = 'k'\n for char in 'ஙண்ந்ன்':\n self.trans[char] = 'n'\n self.trans['ச'] = 'c'\n for char in 'ஞ்':\n self.trans[char] = 'ñ'\n for char in 'ட்':\n self.trans[char] = 'th'\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in 'ம்':\n self.trans[char] = 'm'\n for char in 'ய்':\n self.trans[char] = 'y'\n for char in 'ர்ழ்ற':\n self.trans[char] = 'r'\n for char in 'ல்ள':\n self.trans[char] = 'l'\n for char in 'வ்':\n self.trans[char] = 'v'\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in 'க்ஷ':\n self.trans[char] = 'x'\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in 'আা':\n self.trans[char] = 'a'\n for char in 'ইিঈী':\n self.trans[char] = 'i'\n for char in 'উুঊূ':\n self.trans[char] = 'u'\n for char in 'ঋৃ':\n self.trans[char] = 'ri'\n for char in 'এেয়':\n self.trans[char] = 'e'\n for char in 'ঐৈ':\n self.trans[char] = 'oi'\n for char in 'ওো':\n self.trans[char] = 'o'\n for char in 'ঔৌ':\n self.trans[char] = 'ou'\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in 'টত':\n self.trans[char] = 't'\n for char in 'ঠথ':\n self.trans[char] = 'th'\n for char in 'ডদ':\n self.trans[char] = 'd'\n for char in 'ঢধ':\n self.trans[char] = 'dh'\n for char in 'ণন':\n self.trans[char] = 'n'\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in 'য়':\n self.trans[char] = '-'\n for char in 'ড়':\n self.trans[char] = 'r'\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in 'ขฃคฅฆ':\n self.trans[char] = 'kh'\n self.trans['ง'] = 'ng'\n for char in 'จฉชฌ':\n self.trans[char] = 'ch'\n for char in 'ซศษส':\n self.trans[char] = 's'\n for char in 'ญย':\n self.trans[char] = 'y'\n for char in 'ฎด':\n self.trans[char] = 'd'\n for char in 'ฏต':\n self.trans[char] = 't'\n for char in 'ฐฑฒถทธ':\n self.trans[char] = 'th'\n for char in 'ณน':\n self.trans[char] = 'n'\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in 'ผพภ':\n self.trans[char] = 'ph'\n for char in 'ฝฟ':\n self.trans[char] = 'f'\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in 'ลฬ':\n self.trans[char] = 'l'\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in 'หฮ':\n self.trans[char] = 'h'\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in 'อวโิ':\n self.trans[char] = 'o'\n for char in 'ะัา':\n self.trans[char] = 'a'\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in 'เ็':\n self.trans[char] = 'e'\n self.trans['แ'] = 'ae'\n for char in 'ใไ':\n self.trans[char] = 'ai'\n for char in '่้๊๋็์':\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans['ಅ'] = 'a'\n for char in 'ಆಾ':\n self.trans[char] = 'aa'\n for char in 'ಇಿ':\n self.trans[char] = 'i'\n for char in 'ಈೀ':\n self.trans[char] = 'ii'\n for char in 'ಉು':\n self.trans[char] = 'u'\n for char in 'ಊೂ':\n self.trans[char] = 'uu'\n for char in 'ಋೂ':\n self.trans[char] = u\"r'\"\n for char in 'ಎೆ':\n self.trans[char] = 'e'\n for char in 'ಏೇ':\n self.trans[char] = 'ee'\n for char in 'ಐೈ':\n self.trans[char] = 'ai'\n for char in 'ಒೊ':\n self.trans[char] = 'o'\n for char in 'ಓೋ':\n self.trans[char] = 'oo'\n for char in 'ಔೌ':\n self.trans[char] = 'au'\n self.trans['ಂ'] = \"m'\"\n self.trans['ಃ'] = \"h'\"\n self.trans['ಕ'] = 'k'\n self.trans['ಖ'] = 'kh'\n self.trans['ಗ'] = 'g'\n self.trans['ಘ'] = 'gh'\n self.trans['ಙ'] = 'ng'\n self.trans['ಚ'] = 'c'\n self.trans['ಛ'] = 'ch'\n self.trans['ಜ'] = 'j'\n self.trans['ಝ'] = 'ny'\n self.trans['ಟ'] = 'tt'\n self.trans['ಠ'] = 'tth'\n self.trans['ಡ'] = 'dd'\n self.trans['ಢ'] = 'ddh'\n self.trans['ಣ'] = 'nn'\n self.trans['ತ'] = 't'\n self.trans['ಥ'] = 'th'\n self.trans['ದ'] = 'd'\n self.trans['ಧ'] = 'dh'\n self.trans['ನ'] = 'n'\n self.trans['ಪ'] = 'p'\n self.trans['ಫ'] = 'ph'\n self.trans['ಬ'] = 'b'\n self.trans['ಭ'] = 'bh'\n self.trans['ಮ'] = 'm'\n self.trans['ಯ'] = 'y'\n self.trans['ರ'] = 'r'\n self.trans['ಲ'] = 'l'\n self.trans['ವ'] = 'v'\n self.trans['ಶ'] = 'sh'\n self.trans['ಷ'] = 'ss'\n self.trans['ಸ'] = 's'\n self.trans['ಹ'] = 'h'\n self.trans['ಳ'] = 'll'\n self.trans['೦'] = '0'\n self.trans['೧'] = '1'\n self.trans['೨'] = '2'\n self.trans['೩'] = '3'\n self.trans['೪'] = '4'\n self.trans['೫'] = '5'\n self.trans['೬'] = '6'\n self.trans['೭'] = '7'\n self.trans['೮'] = '8'\n self.trans['೯'] = '9'\n # Telugu\n self.trans['అ'] = 'a'\n for char in 'ఆా':\n self.trans[char] = 'aa'\n for char in 'ఇి':\n self.trans[char] = 'i'\n for char in 'ఈీ':\n self.trans[char] = 'ii'\n for char in 'ఉు':\n self.trans[char] = 'u'\n for char in 'ఊూ':\n self.trans[char] = 'uu'\n for char in 'ఋృ':\n self.trans[char] = \"r'\"\n for char in 'ౠౄ':\n self.trans[char] = 'r\"'\n self.trans['ఌ'] = \"l'\"\n self.trans['ౡ'] = 'l\"'\n for char in 'ఎె':\n self.trans[char] = 'e'\n for char in 'ఏే':\n self.trans[char] = 'ee'\n for char in 'ఐై':\n self.trans[char] = 'ai'\n for char in 'ఒొ':\n self.trans[char] = 'o'\n for char in 'ఓో':\n self.trans[char] = 'oo'\n for char in 'ఔౌ':\n self.trans[char] = 'au'\n self.trans['ం'] = \"'\"\n self.trans['ః'] = '\"'\n self.trans['క'] = 'k'\n self.trans['ఖ'] = 'kh'\n self.trans['గ'] = 'g'\n self.trans['ఘ'] = 'gh'\n self.trans['ఙ'] = 'ng'\n self.trans['చ'] = 'ts'\n self.trans['ఛ'] = 'tsh'\n self.trans['జ'] = 'j'\n self.trans['ఝ'] = 'jh'\n self.trans['ఞ'] = 'ñ'\n for char in 'టత':\n self.trans[char] = 't'\n for char in 'ఠథ':\n self.trans[char] = 'th'\n for char in 'డద':\n self.trans[char] = 'd'\n for char in 'ఢధ':\n self.trans[char] = 'dh'\n for char in 'ణన':\n self.trans[char] = 'n'\n self.trans['ప'] = 'p'\n self.trans['ఫ'] = 'ph'\n self.trans['బ'] = 'b'\n self.trans['భ'] = 'bh'\n self.trans['మ'] = 'm'\n self.trans['య'] = 'y'\n for char in 'రఱ':\n self.trans[char] = 'r'\n for char in 'లళ':\n self.trans[char] = 'l'\n self.trans['వ'] = 'v'\n self.trans['శ'] = 'sh'\n for char in 'షస':\n self.trans[char] = 's'\n self.trans['హ'] = 'h'\n self.trans['్'] = \"\"\n for char in 'ంఁ':\n self.trans[char] = '^'\n self.trans['ః'] = '-'\n self.trans['౦'] = '0'\n self.trans['౧'] = '1'\n self.trans['౨'] = '2'\n self.trans['౩'] = '3'\n self.trans['౪'] = '4'\n self.trans['౫'] = '5'\n self.trans['౬'] = '6'\n self.trans['౭'] = '7'\n self.trans['౮'] = '8'\n self.trans['౯'] = '9'\n self.trans['౹'] = '1/4'\n self.trans['౺'] = '1/2'\n self.trans['౻'] = '3/4'\n self.trans['౼'] = '1/16'\n self.trans['౽'] = '1/8'\n self.trans['౾'] = '3/16'\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans['ກ'] = 'k'\n for char in 'ຂຄ':\n self.trans[char] = 'kh'\n self.trans['ງ'] = 'ng'\n self.trans['ຈ'] = 'ch'\n for char in 'ສຊ':\n self.trans[char] = 's'\n self.trans['ຍ'] = 'ny'\n self.trans['ດ'] = 'd'\n self.trans['ຕ'] = 't'\n for char in 'ຖທ':\n self.trans[char] = 'th'\n self.trans['ນ'] = 'n'\n self.trans['ບ'] = 'b'\n self.trans['ປ'] = 'p'\n for char in 'ຜພ':\n self.trans[char] = 'ph'\n for char in 'ຝຟ':\n self.trans[char] = 'f'\n for char in 'ມໝ':\n self.trans[char] = 'm'\n self.trans['ຢ'] = 'y'\n for char in 'ຣຼ':\n self.trans[char] = 'r'\n for char in 'ລຼ':\n self.trans[char] = 'l'\n self.trans['ວ'] = 'v'\n self.trans['ຮ'] = 'h'\n self.trans['ອ'] = \"'\"\n for char in 'ະັ':\n self.trans[char] = 'a'\n self.trans['ິ'] = 'i'\n self.trans['ຶ'] = 'ue'\n self.trans['ຸ'] = 'u'\n self.trans['ເ'] = 'é'\n self.trans['ແ'] = 'è'\n for char in 'ໂົາໍ':\n self.trans[char] = 'o'\n self.trans['ຽ'] = 'ia'\n self.trans['ເຶ'] = 'uea'\n self.trans['ຍ'] = 'i'\n for char in 'ໄໃ':\n self.trans[char] = 'ai'\n self.trans['ຳ'] = 'am'\n self.trans['າ'] = 'aa'\n self.trans['ີ'] = 'ii'\n self.trans['ື'] = 'yy'\n self.trans['ູ'] = 'uu'\n self.trans['ເ'] = 'e'\n self.trans['ແ'] = 'ei'\n self.trans['໐'] = '0'\n self.trans['໑'] = '1'\n self.trans['໒'] = '2'\n self.trans['໓'] = '3'\n self.trans['໔'] = '4'\n self.trans['໕'] = '5'\n self.trans['໖'] = '6'\n self.trans['໗'] = '7'\n self.trans['໘'] = '8'\n self.trans['໙'] = '9'\n # Chinese -- note: incomplete\n for char in '埃挨哎唉哀皑癌蔼矮艾碍爱隘':\n self.trans[char] = 'ai'\n for char in '鞍氨安俺按暗岸胺案':\n self.trans[char] = 'an'\n for char in '肮昂盎':\n self.trans[char] = 'ang'\n for char in '凹敖熬翱袄傲奥懊澳':\n self.trans[char] = 'ao'\n for char in '芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸':\n self.trans[char] = 'ba'\n for char in '白柏百摆佰败拜稗':\n self.trans[char] = 'bai'\n for char in '斑班搬扳般颁板版扮拌伴瓣半办绊':\n self.trans[char] = 'ban'\n for char in '邦帮梆榜膀绑棒磅蚌镑傍谤':\n self.trans[char] = 'bang'\n for char in '苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆':\n self.trans[char] = 'bao'\n for char in '杯碑悲卑北辈背贝钡倍狈备惫焙被':\n self.trans[char] = 'bei'\n for char in '奔苯本笨':\n self.trans[char] = 'ben'\n for char in '崩绷甭泵蹦迸':\n self.trans[char] = 'beng'\n for char in '逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛':\n self.trans[char] = 'bi'\n for char in '鞭边编贬扁便变卞辨辩辫遍':\n self.trans[char] = 'bian'\n for char in '标彪膘表':\n self.trans[char] = 'biao'\n for char in '鳖憋别瘪':\n self.trans[char] = 'bie'\n for char in '彬斌濒滨宾摈':\n self.trans[char] = 'bin'\n for char in '兵冰柄丙秉饼炳病并':\n self.trans[char] = 'bing'\n for char in '玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳':\n self.trans[char] = 'bo'\n for char in '哺补埠不布步簿部怖':\n self.trans[char] = 'bu'\n for char in '猜裁材才财睬踩采彩菜蔡':\n self.trans[char] = 'cai'\n for char in '餐参蚕残惭惨灿':\n self.trans[char] = 'can'\n for char in '苍舱仓沧藏':\n self.trans[char] = 'cang'\n for char in '操糙槽曹草':\n self.trans[char] = 'cao'\n for char in '厕策侧册测':\n self.trans[char] = 'ce'\n for char in '层蹭':\n self.trans[char] = 'ceng'\n for char in '插叉茬茶查碴搽察岔差诧':\n self.trans[char] = 'cha'\n for char in '拆柴豺':\n self.trans[char] = 'chai'\n for char in '搀掺蝉馋谗缠铲产阐颤':\n self.trans[char] = 'chan'\n for char in '昌猖场尝常长偿肠厂敞畅唱倡':\n self.trans[char] = 'chang'\n for char in '超抄钞朝嘲潮巢吵炒':\n self.trans[char] = 'chao'\n for char in '车扯撤掣彻澈':\n self.trans[char] = 'che'\n for char in '郴臣辰尘晨忱沉陈趁衬':\n self.trans[char] = 'chen'\n for char in '撑称城橙成呈乘程惩澄诚承逞骋秤':\n self.trans[char] = 'cheng'\n for char in '吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽':\n self.trans[char] = 'chi'\n for char in '充冲虫崇宠':\n self.trans[char] = 'chong'\n for char in '抽酬畴踌稠愁筹仇绸瞅丑臭':\n self.trans[char] = 'chou'\n for char in '初出橱厨躇锄雏滁除楚储矗搐触处':\n self.trans[char] = 'chu'\n self.trans['揣'] = 'chuai'\n for char in '川穿椽传船喘串':\n self.trans[char] = 'chuan'\n for char in '疮窗幢床闯创':\n self.trans[char] = 'chuang'\n for char in '吹炊捶锤垂':\n self.trans[char] = 'chui'\n for char in '春椿醇唇淳纯蠢':\n self.trans[char] = 'chun'\n for char in '戳绰':\n self.trans[char] = 'chuo'\n for char in '疵茨磁雌辞慈瓷词此刺赐次':\n self.trans[char] = 'ci'\n for char in '聪葱囱匆从丛':\n self.trans[char] = 'cong'\n self.trans['凑'] = 'cou'\n for char in '粗醋簇促':\n self.trans[char] = 'cu'\n for char in '蹿篡窜':\n self.trans[char] = 'cuan'\n for char in '摧崔催脆瘁粹淬翠':\n self.trans[char] = 'cui'\n for char in '村存寸':\n self.trans[char] = 'cun'\n for char in '磋撮搓措挫错':\n self.trans[char] = 'cuo'\n for char in '搭达答瘩打大':\n self.trans[char] = 'da'\n for char in '呆歹傣戴带殆代贷袋待逮怠':\n self.trans[char] = 'dai'\n for char in '耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋':\n self.trans[char] = 'dan'\n for char in '当挡党荡档':\n self.trans[char] = 'dang'\n for char in '刀捣蹈倒岛祷导到稻悼道盗':\n self.trans[char] = 'dao'\n for char in '德得的':\n self.trans[char] = 'de'\n for char in '蹬灯登等瞪凳邓':\n self.trans[char] = 'deng'\n for char in '堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔':\n self.trans[char] = 'di'\n for char in '颠掂滇碘点典靛垫电佃甸店惦奠淀殿':\n self.trans[char] = 'dian'\n for char in '碉叼雕凋刁掉吊钓调':\n self.trans[char] = 'diao'\n for char in '跌爹碟蝶迭谍叠':\n self.trans[char] = 'die'\n for char in '丁盯叮钉顶鼎锭定订':\n self.trans[char] = 'ding'\n self.trans['丢'] = 'diu'\n for char in '东冬董懂动栋侗恫冻洞':\n self.trans[char] = 'dong'\n for char in '兜抖斗陡豆逗痘':\n self.trans[char] = 'dou'\n for char in '都督毒犊独读堵睹赌杜镀肚度渡妒':\n self.trans[char] = 'du'\n for char in '端短锻段断缎':\n self.trans[char] = 'duan'\n for char in '堆兑队对':\n self.trans[char] = 'dui'\n for char in '墩吨蹲敦顿囤钝盾遁':\n self.trans[char] = 'dun'\n for char in '掇哆多夺垛躲朵跺舵剁惰堕':\n self.trans[char] = 'duo'\n for char in '蛾峨鹅俄额讹娥恶厄扼遏鄂饿':\n self.trans[char] = 'e'\n for char in '恩嗯':\n self.trans[char] = 'en'\n for char in '而儿耳尔饵洱二贰':\n self.trans[char] = 'er'\n for char in '发罚筏伐乏阀法珐':\n self.trans[char] = 'fa'\n for char in '藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛':\n self.trans[char] = 'fan'\n for char in '坊芳方肪房防妨仿访纺放':\n self.trans[char] = 'fang'\n for char in '菲非啡飞肥匪诽吠肺废沸费':\n self.trans[char] = 'fei'\n for char in '芬酚吩氛分纷坟焚汾粉奋份忿愤粪':\n self.trans[char] = 'fen'\n for char in '丰封枫蜂峰锋风疯烽逢冯缝讽奉凤':\n self.trans[char] = 'feng'\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in ('夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋'\n '复傅付阜父腹负富讣附妇缚咐'):\n self.trans[char] = 'fu'\n for char in '噶嘎':\n self.trans[char] = 'ga'\n for char in '该改概钙盖溉':\n self.trans[char] = 'gai'\n for char in '干甘杆柑竿肝赶感秆敢赣':\n self.trans[char] = 'gan'\n for char in '冈刚钢缸肛纲岗港杠':\n self.trans[char] = 'gang'\n for char in '篙皋高膏羔糕搞镐稿告':\n self.trans[char] = 'gao'\n for char in '哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各':\n self.trans[char] = 'ge'\n self.trans['给'] = 'gei'\n for char in '根跟':\n self.trans[char] = 'gen'\n for char in '耕更庚羹埂耿梗':\n self.trans[char] = 'geng'\n for char in '工攻功恭龚供躬公宫弓巩汞拱贡共':\n self.trans[char] = 'gong'\n for char in '钩勾沟苟狗垢构购够':\n self.trans[char] = 'gou'\n for char in '辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇':\n self.trans[char] = 'gu'\n for char in '刮瓜剐寡挂褂':\n self.trans[char] = 'gua'\n for char in '乖拐怪':\n self.trans[char] = 'guai'\n for char in '棺关官冠观管馆罐惯灌贯':\n self.trans[char] = 'guan'\n for char in '光广逛':\n self.trans[char] = 'guang'\n for char in '瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽':\n self.trans[char] = 'gui'\n for char in '辊滚棍':\n self.trans[char] = 'gun'\n for char in '锅郭国果裹过':\n self.trans[char] = 'guo'\n self.trans['哈'] = 'ha'\n for char in '骸孩海氦亥害骇':\n self.trans[char] = 'hai'\n for char in '酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉':\n self.trans[char] = 'han'\n for char in '夯杭航':\n self.trans[char] = 'hang'\n for char in '壕嚎豪毫郝好耗号浩':\n self.trans[char] = 'hao'\n for char in '呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺':\n self.trans[char] = 'he'\n for char in '嘿黑':\n self.trans[char] = 'hei'\n for char in '痕很狠恨':\n self.trans[char] = 'hen'\n for char in '哼亨横衡恒':\n self.trans[char] = 'heng'\n for char in '轰哄烘虹鸿洪宏弘红':\n self.trans[char] = 'hong'\n for char in '喉侯猴吼厚候后':\n self.trans[char] = 'hou'\n for char in '呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户':\n self.trans[char] = 'hu'\n for char in '花哗华猾滑画划化话':\n self.trans[char] = 'hua'\n for char in '槐徊怀淮坏':\n self.trans[char] = 'huai'\n for char in '欢环桓还缓换患唤痪豢焕涣宦幻':\n self.trans[char] = 'huan'\n for char in '荒慌黄磺蝗簧皇凰惶煌晃幌恍谎':\n self.trans[char] = 'huang'\n for char in '灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘':\n self.trans[char] = 'hui'\n for char in '荤昏婚魂浑混':\n self.trans[char] = 'hun'\n for char in '豁活伙火获或惑霍货祸':\n self.trans[char] = 'huo'\n for char in ('击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几'\n '脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪'):\n self.trans[char] = 'ji'\n for char in '嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁':\n self.trans[char] = 'jia'\n for char in ('歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健'\n '舰剑饯渐溅涧建'):\n self.trans[char] = 'jian'\n for char in '僵姜将浆江疆蒋桨奖讲匠酱降':\n self.trans[char] = 'jiang'\n for char in '蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖':\n self.trans[char] = 'jiao'\n for char in '揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届':\n self.trans[char] = 'jie'\n for char in '巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲':\n self.trans[char] = 'jin'\n for char in '荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净':\n self.trans[char] = 'jing'\n for char in '囧炯窘':\n self.trans[char] = 'jiong'\n for char in '揪究纠玖韭久灸九酒厩救旧臼舅咎就疚':\n self.trans[char] = 'jiu'\n for char in '鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧':\n self.trans[char] = 'ju'\n for char in '捐鹃娟倦眷卷绢':\n self.trans[char] = 'juan'\n for char in '撅攫抉掘倔爵觉决诀绝':\n self.trans[char] = 'jue'\n for char in '均菌钧军君峻俊竣浚郡骏':\n self.trans[char] = 'jun'\n for char in '喀咖卡咯':\n self.trans[char] = 'ka'\n for char in '开揩楷凯慨':\n self.trans[char] = 'kai'\n for char in '刊堪勘坎砍看':\n self.trans[char] = 'kan'\n for char in '康慷糠扛抗亢炕':\n self.trans[char] = 'kang'\n for char in '考拷烤靠':\n self.trans[char] = 'kao'\n for char in '坷苛柯棵磕颗科壳咳可渴克刻客课':\n self.trans[char] = 'ke'\n for char in '肯啃垦恳':\n self.trans[char] = 'ken'\n for char in '坑吭':\n self.trans[char] = 'keng'\n for char in '空恐孔控':\n self.trans[char] = 'kong'\n for char in '抠口扣寇':\n self.trans[char] = 'kou'\n for char in '枯哭窟苦酷库裤':\n self.trans[char] = 'ku'\n for char in '夸垮挎跨胯':\n self.trans[char] = 'kua'\n for char in '块筷侩快':\n self.trans[char] = 'kuai'\n for char in '宽款':\n self.trans[char] = 'kuan'\n for char in '匡筐狂框矿眶旷况':\n self.trans[char] = 'kuang'\n for char in '亏盔岿窥葵奎魁傀馈愧溃':\n self.trans[char] = 'kui'\n for char in '坤昆捆困':\n self.trans[char] = 'kun'\n for char in '括扩廓阔':\n self.trans[char] = 'kuo'\n for char in '垃拉喇蜡腊辣啦':\n self.trans[char] = 'la'\n for char in '莱来赖':\n self.trans[char] = 'lai'\n for char in '蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥':\n self.trans[char] = 'lan'\n for char in '琅榔狼廊郎朗浪':\n self.trans[char] = 'lang'\n for char in '捞劳牢老佬姥酪烙涝':\n self.trans[char] = 'lao'\n for char in '勒乐':\n self.trans[char] = 'le'\n for char in '雷镭蕾磊累儡垒擂肋类泪':\n self.trans[char] = 'lei'\n for char in '棱楞冷':\n self.trans[char] = 'leng'\n for char in ('厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力'\n '璃哩'):\n self.trans[char] = 'li'\n self.trans['俩'] = 'lia'\n for char in '联莲连镰廉怜涟帘敛脸链恋炼练':\n self.trans[char] = 'lian'\n for char in '粮凉梁粱良两辆量晾亮谅':\n self.trans[char] = 'liang'\n for char in '撩聊僚疗燎寥辽潦了撂镣廖料':\n self.trans[char] = 'liao'\n for char in '列裂烈劣猎':\n self.trans[char] = 'lie'\n for char in '琳林磷霖临邻鳞淋凛赁吝拎':\n self.trans[char] = 'lin'\n for char in '玲菱零龄铃伶羚凌灵陵岭领另令':\n self.trans[char] = 'ling'\n for char in '溜琉榴硫馏留刘瘤流柳六':\n self.trans[char] = 'liu'\n for char in '龙聋咙笼窿隆垄拢陇':\n self.trans[char] = 'long'\n for char in '楼娄搂篓漏陋':\n self.trans[char] = 'lou'\n for char in '芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸':\n self.trans[char] = 'lu'\n for char in '峦挛孪滦卵乱':\n self.trans[char] = 'luan'\n for char in '掠略':\n self.trans[char] = 'lue'\n for char in '抡轮伦仑沦纶论':\n self.trans[char] = 'lun'\n for char in '萝螺罗逻锣箩骡裸落洛骆络漯':\n self.trans[char] = 'luo'\n for char in '驴吕铝侣旅履屡缕虑氯律率滤绿':\n self.trans[char] = 'lv'\n for char in '妈麻玛码蚂马骂嘛吗':\n self.trans[char] = 'ma'\n for char in '埋买麦卖迈脉':\n self.trans[char] = 'mai'\n for char in '瞒馒蛮满蔓曼慢漫谩':\n self.trans[char] = 'man'\n for char in '芒茫盲氓忙莽':\n self.trans[char] = 'mang'\n for char in '猫茅锚毛矛铆卯茂冒帽貌贸':\n self.trans[char] = 'mao'\n self.trans['么'] = 'me'\n for char in '玫枚梅酶霉煤没眉媒镁每美昧寐妹媚':\n self.trans[char] = 'mei'\n for char in '门闷们':\n self.trans[char] = 'men'\n for char in '萌蒙檬盟锰猛梦孟':\n self.trans[char] = 'meng'\n for char in '眯醚靡糜迷谜弥米秘觅泌蜜密幂':\n self.trans[char] = 'mi'\n for char in '棉眠绵冕免勉娩缅面':\n self.trans[char] = 'mian'\n for char in '苗描瞄藐秒渺庙妙':\n self.trans[char] = 'miao'\n for char in '蔑灭':\n self.trans[char] = 'mie'\n for char in '民抿皿敏悯闽':\n self.trans[char] = 'min'\n for char in '明螟鸣铭名命':\n self.trans[char] = 'ming'\n self.trans['谬'] = 'miu'\n for char in '摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌':\n self.trans[char] = 'mo'\n for char in '谋牟某':\n self.trans[char] = 'mou'\n for char in '拇牡亩姆母墓暮幕募慕木目睦牧穆':\n self.trans[char] = 'mu'\n for char in '拿哪呐钠那娜纳':\n self.trans[char] = 'na'\n for char in '氖乃奶耐奈':\n self.trans[char] = 'nai'\n for char in '南男难':\n self.trans[char] = 'nan'\n self.trans['囊'] = 'nang'\n for char in '挠脑恼闹淖':\n self.trans[char] = 'nao'\n self.trans['呢'] = 'ne'\n for char in '馁内':\n self.trans[char] = 'nei'\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in '妮霓倪泥尼拟你匿腻逆溺':\n self.trans[char] = 'ni'\n for char in '蔫拈年碾撵捻念':\n self.trans[char] = 'nian'\n for char in '娘酿':\n self.trans[char] = 'niang'\n for char in '鸟尿':\n self.trans[char] = 'niao'\n for char in '捏聂孽啮镊镍涅':\n self.trans[char] = 'nie'\n self.trans['您'] = 'nin'\n for char in '柠狞凝宁拧泞':\n self.trans[char] = 'ning'\n for char in '牛扭钮纽':\n self.trans[char] = 'niu'\n for char in '脓浓农弄':\n self.trans[char] = 'nong'\n for char in '奴努怒':\n self.trans[char] = 'nu'\n self.trans['暖'] = 'nuan'\n for char in '虐疟':\n self.trans[char] = 'nue'\n for char in '挪懦糯诺':\n self.trans[char] = 'nuo'\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in '欧鸥殴藕呕偶沤':\n self.trans[char] = 'ou'\n for char in '啪趴爬帕怕琶':\n self.trans[char] = 'pa'\n for char in '拍排牌徘湃派':\n self.trans[char] = 'pai'\n for char in '攀潘盘磐盼畔判叛':\n self.trans[char] = 'pan'\n for char in '乓庞旁耪胖':\n self.trans[char] = 'pang'\n for char in '抛咆刨炮袍跑泡':\n self.trans[char] = 'pao'\n for char in '呸胚培裴赔陪配佩沛':\n self.trans[char] = 'pei'\n for char in '喷盆':\n self.trans[char] = 'pen'\n for char in '砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰':\n self.trans[char] = 'peng'\n for char in '坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬':\n self.trans[char] = 'pi'\n for char in '篇偏片骗':\n self.trans[char] = 'pian'\n for char in '飘漂瓢票':\n self.trans[char] = 'piao'\n for char in '撇瞥':\n self.trans[char] = 'pie'\n for char in '拼频贫品聘':\n self.trans[char] = 'pin'\n for char in '乒坪苹萍平凭瓶评屏':\n self.trans[char] = 'ping'\n for char in '坡泼颇婆破魄迫粕剖':\n self.trans[char] = 'po'\n for char in '扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮':\n self.trans[char] = 'pu'\n for char in ('期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄'\n '弃汽泣讫'):\n self.trans[char] = 'qi'\n for char in '掐恰洽':\n self.trans[char] = 'qia'\n for char in '牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉':\n self.trans[char] = 'qian'\n for char in '枪呛腔羌墙蔷强抢':\n self.trans[char] = 'qiang'\n for char in '橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍':\n self.trans[char] = 'qiao'\n for char in '切茄且怯窃':\n self.trans[char] = 'qie'\n for char in '钦侵亲秦琴勤芹擒禽寝沁':\n self.trans[char] = 'qin'\n for char in '青轻氢倾卿清擎晴氰情顷请庆':\n self.trans[char] = 'qing'\n for char in '琼穷':\n self.trans[char] = 'qiong'\n for char in '秋丘邱球求囚酋泅':\n self.trans[char] = 'qiu'\n for char in '趋区蛆曲躯屈驱渠取娶龋趣去':\n self.trans[char] = 'qu'\n for char in '圈颧权醛泉全痊拳犬券劝':\n self.trans[char] = 'quan'\n for char in '缺炔瘸却鹊榷确雀':\n self.trans[char] = 'que'\n for char in '裙群':\n self.trans[char] = 'qun'\n for char in '然燃冉染':\n self.trans[char] = 'ran'\n for char in '瓤壤攘嚷让':\n self.trans[char] = 'rang'\n for char in '饶扰绕':\n self.trans[char] = 'rao'\n for char in '惹热':\n self.trans[char] = 're'\n for char in '壬仁人忍韧任认刃妊纫':\n self.trans[char] = 'ren'\n for char in '扔仍':\n self.trans[char] = 'reng'\n self.trans['日'] = 'ri'\n for char in '戎茸蓉荣融熔溶容绒冗':\n self.trans[char] = 'rong'\n for char in '揉柔肉':\n self.trans[char] = 'rou'\n for char in '茹蠕儒孺如辱乳汝入褥':\n self.trans[char] = 'ru'\n for char in '软阮':\n self.trans[char] = 'ruan'\n for char in '蕊瑞锐':\n self.trans[char] = 'rui'\n for char in '闰润':\n self.trans[char] = 'run'\n for char in '若弱':\n self.trans[char] = 'ruo'\n for char in '撒洒萨':\n self.trans[char] = 'sa'\n for char in '腮鳃塞赛':\n self.trans[char] = 'sai'\n for char in '三叁伞散':\n self.trans[char] = 'san'\n for char in '桑嗓丧':\n self.trans[char] = 'sang'\n for char in '搔骚扫嫂':\n self.trans[char] = 'sao'\n for char in '瑟色涩':\n self.trans[char] = 'se'\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in '莎砂杀刹沙纱傻啥煞':\n self.trans[char] = 'sha'\n for char in '筛晒':\n self.trans[char] = 'shai'\n for char in '珊苫杉山删煽衫闪陕擅赡膳善汕扇缮':\n self.trans[char] = 'shan'\n for char in '墒伤商赏晌上尚裳':\n self.trans[char] = 'shang'\n for char in '梢捎稍烧芍勺韶少哨邵绍':\n self.trans[char] = 'shao'\n for char in '奢赊蛇舌舍赦摄射慑涉社设':\n self.trans[char] = 'she'\n for char in '砷申呻伸身深娠绅神沈审婶甚肾慎渗':\n self.trans[char] = 'shen'\n for char in '声生甥牲升绳省盛剩胜圣':\n self.trans[char] = 'sheng'\n for char in ('师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝'\n '势是嗜噬适仕侍释饰氏市恃室视试'):\n self.trans[char] = 'shi'\n for char in '收手首守寿授售受瘦兽':\n self.trans[char] = 'shou'\n for char in (\n '蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕'):\n self.trans[char] = 'shu'\n for char in '刷耍':\n self.trans[char] = 'shua'\n for char in '摔衰甩帅':\n self.trans[char] = 'shuai'\n for char in '栓拴':\n self.trans[char] = 'shuan'\n for char in '霜双爽':\n self.trans[char] = 'shuang'\n for char in '谁水睡税':\n self.trans[char] = 'shui'\n for char in '吮瞬顺舜':\n self.trans[char] = 'shun'\n for char in '说硕朔烁':\n self.trans[char] = 'shuo'\n for char in '斯撕嘶思私司丝死肆寺嗣四伺似饲巳':\n self.trans[char] = 'si'\n for char in '松耸怂颂送宋讼诵':\n self.trans[char] = 'song'\n for char in '搜艘擞':\n self.trans[char] = 'sou'\n for char in '嗽苏酥俗素速粟僳塑溯宿诉肃':\n self.trans[char] = 'su'\n for char in '酸蒜算':\n self.trans[char] = 'suan'\n for char in '虽隋随绥髓碎岁穗遂隧祟':\n self.trans[char] = 'sui'\n for char in '孙损笋':\n self.trans[char] = 'sun'\n for char in '蓑梭唆缩琐索锁所':\n self.trans[char] = 'suo'\n for char in '塌他它她塔獭挞蹋踏':\n self.trans[char] = 'ta'\n for char in '胎苔抬台泰酞太态汰':\n self.trans[char] = 'tai'\n for char in '坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭':\n self.trans[char] = 'tan'\n for char in '汤塘搪堂棠膛唐糖倘躺淌趟烫':\n self.trans[char] = 'tang'\n for char in '掏涛滔绦萄桃逃淘陶讨套':\n self.trans[char] = 'tao'\n self.trans['特'] = 'te'\n for char in '藤腾疼誊':\n self.trans[char] = 'teng'\n for char in '梯剔踢锑提题蹄啼体替嚏惕涕剃屉':\n self.trans[char] = 'ti'\n for char in '兲天添填田甜恬舔腆':\n self.trans[char] = 'tian'\n for char in '挑条迢眺跳':\n self.trans[char] = 'tiao'\n for char in '贴铁帖':\n self.trans[char] = 'tie'\n for char in '厅听烃汀廷停亭庭挺艇':\n self.trans[char] = 'ting'\n for char in '通桐酮瞳同铜彤童桶捅筒统痛':\n self.trans[char] = 'tong'\n for char in '偷投头透':\n self.trans[char] = 'tou'\n for char in '凸秃突图徒途涂屠土吐兔':\n self.trans[char] = 'tu'\n for char in '湍团':\n self.trans[char] = 'tuan'\n for char in '推颓腿蜕褪退':\n self.trans[char] = 'tui'\n for char in '吞屯臀':\n self.trans[char] = 'tun'\n for char in '拖托脱鸵陀驮驼椭妥拓唾':\n self.trans[char] = 'tuo'\n for char in '挖哇蛙洼娃瓦袜':\n self.trans[char] = 'wa'\n for char in '歪外':\n self.trans[char] = 'wai'\n for char in '豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞':\n self.trans[char] = 'wan'\n for char in '汪王亡枉网往旺望忘妄':\n self.trans[char] = 'wang'\n for char in '威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫':\n self.trans[char] = 'wei'\n for char in '瘟温蚊文闻纹吻稳紊问':\n self.trans[char] = 'wen'\n for char in '嗡翁瓮':\n self.trans[char] = 'weng'\n for char in '挝蜗涡窝我斡卧握沃':\n self.trans[char] = 'wo'\n for char in '巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误':\n self.trans[char] = 'wu'\n for char in ('昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系'\n '隙戏细'):\n self.trans[char] = 'xi'\n for char in '瞎虾匣霞辖暇峡侠狭下厦夏吓':\n self.trans[char] = 'xia'\n for char in '掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线':\n self.trans[char] = 'xian'\n for char in '相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象':\n self.trans[char] = 'xiang'\n for char in '萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效':\n self.trans[char] = 'xiao'\n for char in '楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑':\n self.trans[char] = 'xie'\n for char in '薪芯锌欣辛新忻心信衅':\n self.trans[char] = 'xin'\n for char in '星腥猩惺兴刑型形邢行醒幸杏性姓':\n self.trans[char] = 'xing'\n for char in '兄凶胸匈汹雄熊':\n self.trans[char] = 'xiong'\n for char in '休修羞朽嗅锈秀袖绣':\n self.trans[char] = 'xiu'\n for char in '墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续':\n self.trans[char] = 'xu'\n for char in '轩喧宣悬旋玄选癣眩绚':\n self.trans[char] = 'xuan'\n for char in '靴薛学穴雪血':\n self.trans[char] = 'xue'\n for char in '勋熏循旬询寻驯巡殉汛训讯逊迅':\n self.trans[char] = 'xun'\n for char in '压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶':\n self.trans[char] = 'ya'\n for char in '焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验':\n self.trans[char] = 'yan'\n for char in '殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾':\n self.trans[char] = 'yang'\n for char in '邀腰妖瑶摇尧遥窑谣姚咬舀药要耀':\n self.trans[char] = 'yao'\n for char in '椰噎耶爷野冶也页掖业叶曳腋夜液':\n self.trans[char] = 'ye'\n for char in ('一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿'\n '役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎'):\n self.trans[char] = 'yi'\n for char in '茵荫因殷音阴姻吟银淫寅饮尹引隐印':\n self.trans[char] = 'yin'\n for char in '英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映':\n self.trans[char] = 'ying'\n self.trans['哟'] = 'yo'\n for char in '拥佣臃痈庸雍踊蛹咏泳涌永恿勇用':\n self.trans[char] = 'yong'\n for char in '幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂':\n self.trans[char] = 'you'\n for char in ('淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻'\n '峪御愈欲狱育誉浴寓裕预豫驭'):\n self.trans[char] = 'yu'\n for char in '鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院':\n self.trans[char] = 'yuan'\n for char in '曰约越跃钥岳粤月悦阅':\n self.trans[char] = 'yue'\n for char in '耘云郧匀陨允运蕴酝晕韵孕':\n self.trans[char] = 'yun'\n for char in '匝砸杂':\n self.trans[char] = 'za'\n for char in '栽哉灾宰载再在':\n self.trans[char] = 'zai'\n for char in '咱攒暂赞':\n self.trans[char] = 'zan'\n for char in '赃脏葬':\n self.trans[char] = 'zang'\n for char in '遭糟凿藻枣早澡蚤躁噪造皂灶燥':\n self.trans[char] = 'zao'\n for char in '责择则泽':\n self.trans[char] = 'ze'\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in '增憎曾赠':\n self.trans[char] = 'zeng'\n for char in '扎喳渣札轧铡闸眨栅榨咋乍炸诈':\n self.trans[char] = 'zha'\n for char in '摘斋宅窄债寨':\n self.trans[char] = 'zhai'\n for char in '瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽':\n self.trans[char] = 'zhan'\n for char in '樟章彰漳张掌涨杖丈帐账仗胀瘴障':\n self.trans[char] = 'zhang'\n for char in '招昭找沼赵照罩兆肇召':\n self.trans[char] = 'zhao'\n for char in '遮折哲蛰辙者锗蔗这浙':\n self.trans[char] = 'zhe'\n for char in '珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳':\n self.trans[char] = 'zhen'\n for char in '蒸挣睁征狰争怔整拯正政帧症郑证':\n self.trans[char] = 'zheng'\n for char in ('芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置'\n '帜峙制智秩稚质炙痔滞治窒'):\n self.trans[char] = 'zhi'\n for char in '中盅忠钟衷终种肿重仲众':\n self.trans[char] = 'zhong'\n for char in '舟周州洲诌粥轴肘帚咒皱宙昼骤':\n self.trans[char] = 'zhou'\n for char in '珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻':\n self.trans[char] = 'zhu'\n for char in '抓爪':\n self.trans[char] = 'zhua'\n self.trans['拽'] = 'zhuai'\n for char in '专砖转撰赚篆':\n self.trans[char] = 'zhuan'\n for char in '桩庄装妆撞壮状':\n self.trans[char] = 'zhuang'\n for char in '椎锥追赘坠缀':\n self.trans[char] = 'zhui'\n for char in '谆准':\n self.trans[char] = 'zhun'\n for char in '捉拙卓桌琢茁酌啄着灼浊':\n self.trans[char] = 'zhuo'\n for char in '兹咨资姿滋淄孜紫仔籽滓子自渍字':\n self.trans[char] = 'zi'\n for char in '鬃棕踪宗综总纵':\n self.trans[char] = 'zong'\n for char in '邹走奏揍':\n self.trans[char] = 'zou'\n for char in '租足卒族祖诅阻组':\n self.trans[char] = 'zu'\n for char in '钻纂':\n self.trans[char] = 'zuan'\n for char in '嘴醉最罪':\n self.trans[char] = 'zui'\n for char in '尊遵':\n self.trans[char] = 'zun'\n for char in '昨左佐柞做作坐座':\n self.trans[char] = 'zuo'\n # from:\n # https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans['ଂ'] = 'anusvara'\n self.trans['ઇ'] = 'i'\n self.trans['എ'] = 'e'\n self.trans['ગ'] = 'ga'\n self.trans['ਜ'] = 'ja'\n self.trans['ഞ'] = 'nya'\n self.trans['ଢ'] = 'ddha'\n self.trans['ધ'] = 'dha'\n self.trans['ਬ'] = 'ba'\n self.trans['മ'] = 'ma'\n self.trans['ଲ'] = 'la'\n self.trans['ષ'] = 'ssa'\n self.trans['਼'] = 'nukta'\n self.trans['ാ'] = 'aa'\n self.trans['ୂ'] = 'uu'\n self.trans['ે'] = 'e'\n self.trans['ੌ'] = 'au'\n self.trans['ൎ'] = 'reph'\n self.trans['ੜ'] = 'rra'\n self.trans['՞'] = '?'\n self.trans['ୢ'] = 'l'\n self.trans['૧'] = '1'\n self.trans['੬'] = '6'\n self.trans['൮'] = '8'\n self.trans['୲'] = 'quarter'\n self.trans['ൾ'] = 'll'\n self.trans['ਇ'] = 'i'\n self.trans['ഉ'] = 'u'\n self.trans['ઌ'] = 'l'\n self.trans['ਗ'] = 'ga'\n self.trans['ങ'] = 'nga'\n self.trans['ଝ'] = 'jha'\n self.trans['જ'] = 'ja'\n self.trans['؟'] = '?'\n self.trans['ਧ'] = 'dha'\n self.trans['ഩ'] = 'nnna'\n self.trans['ଭ'] = 'bha'\n self.trans['બ'] = 'ba'\n self.trans['ഹ'] = 'ha'\n self.trans['ଽ'] = 'avagraha'\n self.trans['઼'] = 'nukta'\n self.trans['ੇ'] = 'ee'\n self.trans['୍'] = 'virama'\n self.trans['ૌ'] = 'au'\n self.trans['੧'] = '1'\n self.trans['൩'] = '3'\n self.trans['୭'] = '7'\n self.trans['૬'] = '6'\n self.trans['൹'] = 'mark'\n self.trans['ਖ਼'] = 'khha'\n self.trans['ਂ'] = 'bindi'\n self.trans['ഈ'] = 'ii'\n self.trans['ઍ'] = 'e'\n self.trans['ଌ'] = 'l'\n self.trans['ഘ'] = 'gha'\n self.trans['ઝ'] = 'jha'\n self.trans['ଡ଼'] = 'rra'\n self.trans['ਢ'] = 'ddha'\n self.trans['ന'] = 'na'\n self.trans['ભ'] = 'bha'\n self.trans['ବ'] = 'ba'\n self.trans['ਲ'] = 'la'\n self.trans['സ'] = 'sa'\n self.trans['ઽ'] = 'avagraha'\n self.trans['଼'] = 'nukta'\n self.trans['ੂ'] = 'uu'\n self.trans['ൈ'] = 'ai'\n self.trans['્'] = 'virama'\n self.trans['ୌ'] = 'au'\n self.trans['൨'] = '2'\n self.trans['૭'] = '7'\n self.trans['୬'] = '6'\n self.trans['ੲ'] = 'iri'\n self.trans['ഃ'] = 'visarga'\n self.trans['ં'] = 'anusvara'\n self.trans['ଇ'] = 'i'\n self.trans['ഓ'] = 'oo'\n self.trans['ଗ'] = 'ga'\n self.trans['ਝ'] = 'jha'\n self.trans['?'] = '?'\n self.trans['ണ'] = 'nna'\n self.trans['ઢ'] = 'ddha'\n self.trans['ଧ'] = 'dha'\n self.trans['ਭ'] = 'bha'\n self.trans['ള'] = 'lla'\n self.trans['લ'] = 'la'\n self.trans['ଷ'] = 'ssa'\n self.trans['ൃ'] = 'r'\n self.trans['ૂ'] = 'uu'\n self.trans['େ'] = 'e'\n self.trans['੍'] = 'virama'\n self.trans['ୗ'] = 'mark'\n self.trans['ൣ'] = 'll'\n self.trans['ૢ'] = 'l'\n self.trans['୧'] = '1'\n self.trans['੭'] = '7'\n self.trans['൳'] = '1/4'\n self.trans['୷'] = 'sixteenths'\n self.trans['ଆ'] = 'aa'\n self.trans['ઋ'] = 'r'\n self.trans['ഊ'] = 'uu'\n self.trans['ਐ'] = 'ai'\n self.trans['ଖ'] = 'kha'\n self.trans['છ'] = 'cha'\n self.trans['ച'] = 'ca'\n self.trans['ਠ'] = 'ttha'\n self.trans['ଦ'] = 'da'\n self.trans['ફ'] = 'pha'\n self.trans['പ'] = 'pa'\n self.trans['ਰ'] = 'ra'\n self.trans['ଶ'] = 'sha'\n self.trans['ഺ'] = 'ttta'\n self.trans['ੀ'] = 'ii'\n self.trans['ો'] = 'o'\n self.trans['ൊ'] = 'o'\n self.trans['ୖ'] = 'mark'\n self.trans['୦'] = '0'\n self.trans['૫'] = '5'\n self.trans['൪'] = '4'\n self.trans['ੰ'] = 'tippi'\n self.trans['୶'] = 'eighth'\n self.trans['ൺ'] = 'nn'\n self.trans['ଁ'] = 'candrabindu'\n self.trans['അ'] = 'a'\n self.trans['ઐ'] = 'ai'\n self.trans['ക'] = 'ka'\n self.trans['ਸ਼'] = 'sha'\n self.trans['ਛ'] = 'cha'\n self.trans['ଡ'] = 'dda'\n self.trans['ઠ'] = 'ttha'\n self.trans['ഥ'] = 'tha'\n self.trans['ਫ'] = 'pha'\n self.trans['ર'] = 'ra'\n self.trans['വ'] = 'va'\n self.trans['ୁ'] = 'u'\n self.trans['ી'] = 'ii'\n self.trans['ੋ'] = 'oo'\n self.trans['ૐ'] = 'om'\n self.trans['ୡ'] = 'll'\n self.trans['ૠ'] = 'rr'\n self.trans['੫'] = '5'\n self.trans['ୱ'] = 'wa'\n self.trans['૰'] = 'sign'\n self.trans['൵'] = 'quarters'\n self.trans['ਫ਼'] = 'fa'\n self.trans['ઁ'] = 'candrabindu'\n self.trans['ਆ'] = 'aa'\n self.trans['ઑ'] = 'o'\n self.trans['ଐ'] = 'ai'\n self.trans['ഔ'] = 'au'\n self.trans['ਖ'] = 'kha'\n self.trans['ડ'] = 'dda'\n self.trans['ଠ'] = 'ttha'\n self.trans['ത'] = 'ta'\n self.trans['ਦ'] = 'da'\n self.trans['ର'] = 'ra'\n self.trans['ഴ'] = 'llla'\n self.trans['ુ'] = 'u'\n self.trans['ୀ'] = 'ii'\n self.trans['ൄ'] = 'rr'\n self.trans['ૡ'] = 'll'\n self.trans['ୠ'] = 'rr'\n self.trans['੦'] = '0'\n self.trans['૱'] = 'sign'\n self.trans['୰'] = 'isshar'\n self.trans['൴'] = '1/2'\n self.trans['ਁ'] = 'bindi'\n self.trans['આ'] = 'aa'\n self.trans['ଋ'] = 'r'\n self.trans['ഏ'] = 'ee'\n self.trans['ખ'] = 'kha'\n self.trans['ଛ'] = 'cha'\n self.trans['ട'] = 'tta'\n self.trans['ਡ'] = 'dda'\n self.trans['દ'] = 'da'\n self.trans['ଫ'] = 'pha'\n self.trans['യ'] = 'ya'\n self.trans['શ'] = 'sha'\n self.trans['ി'] = 'i'\n self.trans['ੁ'] = 'u'\n self.trans['ୋ'] = 'o'\n self.trans['ੑ'] = 'udaat'\n self.trans['૦'] = '0'\n self.trans['୫'] = '5'\n self.trans['൯'] = '9'\n self.trans['ੱ'] = 'addak'\n self.trans['ൿ'] = 'k'\n self.trans['ആ'] = 'aa'\n self.trans['ଊ'] = 'uu'\n self.trans['એ'] = 'e'\n self.trans['ਔ'] = 'au'\n self.trans['ഖ'] = 'kha'\n self.trans['ଚ'] = 'ca'\n self.trans['ટ'] = 'tta'\n self.trans['ਤ'] = 'ta'\n self.trans['ദ'] = 'da'\n self.trans['ପ'] = 'pa'\n self.trans['ય'] = 'ya'\n self.trans['ശ'] = 'sha'\n self.trans['િ'] = 'i'\n self.trans['െ'] = 'e'\n self.trans['൦'] = '0'\n self.trans['୪'] = '4'\n self.trans['૯'] = '9'\n self.trans['ੴ'] = 'onkar'\n self.trans['ଅ'] = 'a'\n self.trans['ਏ'] = 'ee'\n self.trans['କ'] = 'ka'\n self.trans['ઔ'] = 'au'\n self.trans['ਟ'] = 'tta'\n self.trans['ഡ'] = 'dda'\n self.trans['ଥ'] = 'tha'\n self.trans['ત'] = 'ta'\n self.trans['ਯ'] = 'ya'\n self.trans['റ'] = 'rra'\n self.trans['ଵ'] = 'va'\n self.trans['ਿ'] = 'i'\n self.trans['ു'] = 'u'\n self.trans['ૄ'] = 'rr'\n self.trans['ൡ'] = 'll'\n self.trans['੯'] = '9'\n self.trans['൱'] = '100'\n self.trans['୵'] = 'sixteenth'\n self.trans['અ'] = 'a'\n self.trans['ਊ'] = 'uu'\n self.trans['ഐ'] = 'ai'\n self.trans['ક'] = 'ka'\n self.trans['ଔ'] = 'au'\n self.trans['ਚ'] = 'ca'\n self.trans['ഠ'] = 'ttha'\n self.trans['થ'] = 'tha'\n self.trans['ତ'] = 'ta'\n self.trans['ਪ'] = 'pa'\n self.trans['ര'] = 'ra'\n self.trans['વ'] = 'va'\n self.trans['ീ'] = 'ii'\n self.trans['ૅ'] = 'e'\n self.trans['ୄ'] = 'rr'\n self.trans['ൠ'] = 'rr'\n self.trans['ਜ਼'] = 'za'\n self.trans['੪'] = '4'\n self.trans['൰'] = '10'\n self.trans['୴'] = 'quarters'\n self.trans['ਅ'] = 'a'\n self.trans['ഋ'] = 'r'\n self.trans['ઊ'] = 'uu'\n self.trans['ଏ'] = 'e'\n self.trans['ਕ'] = 'ka'\n self.trans['ഛ'] = 'cha'\n self.trans['ચ'] = 'ca'\n self.trans['ଟ'] = 'tta'\n self.trans['ਥ'] = 'tha'\n self.trans['ഫ'] = 'pha'\n self.trans['પ'] = 'pa'\n self.trans['ଯ'] = 'ya'\n self.trans['ਵ'] = 'va'\n self.trans['ି'] = 'i'\n self.trans['ോ'] = 'oo'\n self.trans['ୟ'] = 'yya'\n self.trans['൫'] = '5'\n self.trans['૪'] = '4'\n self.trans['୯'] = '9'\n self.trans['ੵ'] = 'yakash'\n self.trans['ൻ'] = 'n'\n self.trans['ઃ'] = 'visarga'\n self.trans['ം'] = 'anusvara'\n self.trans['ਈ'] = 'ii'\n self.trans['ઓ'] = 'o'\n self.trans['ഒ'] = 'o'\n self.trans['ਘ'] = 'gha'\n self.trans['ଞ'] = 'nya'\n self.trans['ણ'] = 'nna'\n self.trans['ഢ'] = 'ddha'\n self.trans['ਲ਼'] = 'lla'\n self.trans['ਨ'] = 'na'\n self.trans['ମ'] = 'ma'\n self.trans['ળ'] = 'lla'\n self.trans['ല'] = 'la'\n self.trans['ਸ'] = 'sa'\n self.trans['¿'] = '?'\n self.trans['ା'] = 'aa'\n self.trans['ૃ'] = 'r'\n self.trans['ൂ'] = 'uu'\n self.trans['ੈ'] = 'ai'\n self.trans['ૣ'] = 'll'\n self.trans['ൢ'] = 'l'\n self.trans['੨'] = '2'\n self.trans['୮'] = '8'\n self.trans['൲'] = '1000'\n self.trans['ਃ'] = 'visarga'\n self.trans['ଉ'] = 'u'\n self.trans['ઈ'] = 'ii'\n self.trans['ਓ'] = 'oo'\n self.trans['ଙ'] = 'nga'\n self.trans['ઘ'] = 'gha'\n self.trans['ഝ'] = 'jha'\n self.trans['ਣ'] = 'nna'\n self.trans['ન'] = 'na'\n self.trans['ഭ'] = 'bha'\n self.trans['ଜ'] = 'ja'\n self.trans['ହ'] = 'ha'\n self.trans['સ'] = 'sa'\n self.trans['ഽ'] = 'avagraha'\n self.trans['ૈ'] = 'ai'\n self.trans['്'] = 'virama'\n self.trans['୩'] = '3'\n self.trans['૨'] = '2'\n self.trans['൭'] = '7'\n self.trans['ੳ'] = 'ura'\n self.trans['ൽ'] = 'l'\n self.trans['ઉ'] = 'u'\n self.trans['ଈ'] = 'ii'\n self.trans['ഌ'] = 'l'\n self.trans['ઙ'] = 'nga'\n self.trans['ଘ'] = 'gha'\n self.trans['ജ'] = 'ja'\n self.trans['ਞ'] = 'nya'\n self.trans['ନ'] = 'na'\n self.trans['ബ'] = 'ba'\n self.trans['ਮ'] = 'ma'\n self.trans['હ'] = 'ha'\n self.trans['ସ'] = 'sa'\n self.trans['ਾ'] = 'aa'\n self.trans['ૉ'] = 'o'\n self.trans['ୈ'] = 'ai'\n self.trans['ൌ'] = 'au'\n self.trans['૩'] = '3'\n self.trans['୨'] = '2'\n self.trans['൬'] = '6'\n self.trans['੮'] = '8'\n self.trans['ർ'] = 'rr'\n self.trans['ଃ'] = 'visarga'\n self.trans['ഇ'] = 'i'\n self.trans['ਉ'] = 'u'\n self.trans['ଓ'] = 'o'\n self.trans['ഗ'] = 'ga'\n self.trans['ਙ'] = 'nga'\n self.trans['ઞ'] = 'nya'\n self.trans['ଣ'] = 'nna'\n self.trans['ധ'] = 'dha'\n self.trans['મ'] = 'ma'\n self.trans['ଳ'] = 'lla'\n self.trans['ഷ'] = 'ssa'\n self.trans['ਹ'] = 'ha'\n self.trans['ਗ਼'] = 'ghha'\n self.trans['ા'] = 'aa'\n self.trans['ୃ'] = 'r'\n self.trans['േ'] = 'ee'\n self.trans['ൗ'] = 'mark'\n self.trans['ଢ଼'] = 'rha'\n self.trans['ୣ'] = 'll'\n self.trans['൧'] = '1'\n self.trans['੩'] = '3'\n self.trans['૮'] = '8'\n self.trans['୳'] = 'half'\n for char in self.trans:\n value = self.trans[char]\n if value == '?':\n continue\n while (value.encode(encoding, 'replace').decode(encoding) == '?'\n and value in self.trans):\n assert value != self.trans[value], \\\n '{!r} == self.trans[{!r}]!'.format(value, value)\n value = self.trans[value]\n self.trans[char] = value",
"def encode_str(encoding = 'utf-8'):\n return function(lambda value: value.encode(encoding) if isinstance(value, unicode) else value)",
"def reencode(s):\n return s.encode('ascii', 'xmlcharrefreplace').decode()",
"def convert(self, s):\r\n if self.input_codec <> self.output_codec:\r\n return unicode(s, self.input_codec).encode(self.output_codec)\r\n else:\r\n return s",
"def cencode(text):\n return _encode(text)[0]",
"def test_encoding_error(self):\n try:\n mark_safe(\"abcdefghijkl<p>mnὀp</p>qrstuwxyz\").encode(\"ascii\")\n except Exception:\n exc_type, exc_value, tb = sys.exc_info()\n reporter = ExceptionReporter(None, exc_type, exc_value, tb)\n html = reporter.get_traceback_html()\n self.assertIn(\"<h2>Unicode error hint</h2>\", html)\n self.assertIn(\"The string that could not be encoded/decoded was: \", html)\n self.assertIn(\"<strong><p>mnὀp</p></strong>\", html)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode ampersands into & | def encode_ampersands(text):
text = re.sub('&(?!([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+);)', '&', text)
return text | [
"def fix_ampersands(qs):\n parts = []\n for p in qs.split('='):\n if p.count('&') > 1:\n l = p.split('&')\n last = l.pop()\n p = '%26'.join(l) + '&' + last\n parts.append(p)\n\n # an & in the last part definitely needs encoding\n parts[-1] = parts[-1].replace('&', '%26')\n\n return '='.join(parts)",
"def RefAmpersand(final_soup_href):\n if final_soup_href != None:\n final_soup_href = re.sub( r'\\&', r'&', final_soup_href)\n return final_soup_href",
"def encode_data(self, data):\r\n if data:\r\n data = urlencode(data)\r\n\r\n return data",
"def url_encode(text):\n return urllib.quote(text)",
"def escapeOnce(data):\n data = data.replace(\"&\", \"&\")\n\n #...but if it was already escaped, make sure it\n # is not done twice....this will turn any tags\n # back to how they were at the start.\n data = data.replace(\"&amp;\", \"&\")\n data = data.replace(\"&gt;\", \">\")\n data = data.replace(\"&lt;\", \"<\")\n data = data.replace(\"&#\", \"&#\")\n\n #..and just in case someone had double-escaped it, do it again\n data = data.replace(\"&amp;\", \"&\")\n data = data.replace(\"&gt;\", \">\")\n data = data.replace(\"&lt;\", \"<\")\n return data",
"def escapeForContent(data):\n if isinstance(data, unicode):\n data = data.encode('utf-8')\n data = data.replace(b'&', b'&'\n ).replace(b'<', b'<'\n ).replace(b'>', b'>')\n return data",
"def escape(data, entities={}):\r\n data = data.replace(\"&\", \"&\")\r\n data = data.replace(\"<\", \"<\")\r\n data = data.replace(\">\", \">\")\r\n if entities:\r\n data = __dict_replace(data, entities)\r\n return data",
"def percent_encode(data, safe=None):\n if data is None:\n return None\n if isinstance(data, (tuple, list, set)):\n return \"&\".join(\n percent_encode(value, safe=safe)\n for value in data\n )\n if isinstance(data, dict):\n return \"&\".join(\n key + \"=\" + percent_encode(value, safe=safe)\n for key, value in data.items()\n )\n return quote(bstr(data), safe or b\"\")",
"def encode_html( self, text):\n\t\thtml_escape_table = {\n\t\t\t\"&\": \"&\",\n\t\t\t'\"': \""\",\n\t\t\t\"'\": \"'\",\n\t\t\t\">\": \">\",\n\t\t\t\"<\": \"<\",\n\t\t\t}\n\t\t\n\t\tdef html_escape(text):\n\t\t\t\"\"\"Produce entities within text.\"\"\"\n\t\t\tL=[]\n\t\t\tfor c in text:\n\t\t\t\tL.append(html_escape_table.get(c,c))\n\t\t\treturn \"\".join(L)\n\n\t\treturn html_escape( text )",
"def encodeString():\n pass",
"def escape(t):\n return (t\n .replace(\""\", '@quot;')\n .replace(\"&\", \"@amp;\").replace(\"<\", \"@lt;\").replace(\">\", \"@gt;\")\n\n .replace(\"&\", \"&\").replace(\"<\", \"<\").replace(\">\", \">\")\n .replace(\"'\", \"'\").replace('\"', \""\")\n .replace(\"\\\\\", \"\\")\n\n .replace(\"@quot;\", '"')\n .replace(\"@amp;\", \"&\").replace(\"@lt;\", \"<\").replace(\"@gt;\", \">\")\n\n )",
"def _params(self, params):\r\n return urllib.urlencode(params)",
"def _encode_query(items: dict, *, mask=False) -> str:\n pairs = []\n for key in sorted(items.keys()):\n value = _MASK if mask and key in _MASKED_PARAMS else items[key]\n item = \"{}={}\".format(key, _quote(value))\n # Ensure 'url' goes last per CLI spec\n if key == \"url\":\n pairs.append(item)\n else:\n pairs.insert(0, item)\n return \"&\".join(pairs)",
"def _qs_encode(params, sep=\"&\"):\n\n pairs = []\n for (k, v) in params.items():\n pairs.append(_urlencode(k) + \"=\" + _urlencode(v))\n pairs.sort()\n return sep.join(pairs)",
"def html_encode_django_chars(txt):\n txt = txt.replace(\"{\", \"{\")\n txt = txt.replace(\"}\", \"}\")\n txt = txt.replace(\"%\", \"%\")\n return txt",
"def _unicodeurlencode(self, params):\n if isinstance(params, dict):\n params = params.items()\n return utils.web.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params])",
"def test_URLEncode_partial_encoded_input():\n res = main({'value': 'https%3A//www.google.com/url@to@encode'})\n assert res == 'https%3A//www.google.com/url%40to%40encode'",
"def uri_encode(uri:str) -> str:\n letters = ['%' + hex(ord(c))[-2:] if c in _uri_tohex else c for c in uri]\n return ''.join(letters)",
"def urlEncode(self, data):\n # type: (Union[str, bytearray]) -> Union[str,bytearray]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a string of wiki markup and outputs a list of genshi Fragments (Elements and strings). This recursive function, with help from the WikiElement objects, does almost all the parsing. When no WikiElement objects are supplied, escapes are removed from ``text`` (except if remove_escapes=True) and it is returned asis. This is the only way for recursion to stop. | def fragmentize(text,wiki_elements, element_store, environ, remove_escapes=True):
while wiki_elements:
# If the first supplied wiki_element is actually a list of elements, \
# search for all of them and match the closest one only.
if isinstance(wiki_elements[0],(list,tuple)):
x = None
mos = None
for element in wiki_elements[0]:
mo = element.regexp.search(text)
if mo:
if x is None or mo.start() < x:
x,wiki_element,mos = mo.start(),element,[mo]
else:
wiki_element = wiki_elements[0]
mos = [mo for mo in wiki_element.regexp.finditer(text)]
if mos:
frags = wiki_element._process(mos, text, wiki_elements, element_store, environ)
break
else:
wiki_elements = wiki_elements[1:]
# remove escape characters
else:
if remove_escapes:
text = esc_to_remove.sub('',text)
frags = fill_from_store(text,element_store)
return frags | [
"def parse_elements(text):\n \n \n # sanitise and split using BeautifulSoup\n soup = BeautifulSoup(parse(text))\n elements = [e for e in soup.contents if type(e) == Tag]\n \n # wrap blocks in <div>\n format = u\"<div class='doccomment-block' id='DE-%d'>\\n%s\\n</div>\"\n for seq,txt in enumerate(elements):\n elements[seq] = format % (seq, txt)\n \n return elements",
"def parseText(self, text):\n results = []\n for tag in self.iterTags(text):\n results.append(self.tagToMarkdown(tag, \n self.cards))\n return '\\n\\n'.join(results)",
"def cleanup_mediawiki(text):\n # This tag was probably setup via SyntaxHighlight GeSHi for biopython.org's wiki\n #\n # <python>\n # import antigravity\n # </python>\n #\n # Replacing it with the following makes pandoc happy,\n #\n # <source lang=python>\n # import antigravity\n # </source>\n #\n # Conversion by pandoc to GitHub Flavour Markdown gives:\n #\n # ``` python\n # import antigravity\n # ```\n #\n # Which is much nicer.\n #\n # =================================================\n #\n # I may have been misled by old links, but right now I don't\n # think there is an easy way to get a table-of-contents with\n # (GitHub Flavoured) Markdown which works on GitHub pages.\n #\n # Meanwhile the MediaWiki __TOC__ etc get left in the .md\n # so I'm just going to remove them here.\n #\n new = []\n for line in text.split(\"\\n\"):\n # line is already unicode\n line = line.replace(\"\\xe2\\x80\\x8e\".decode(\"utf-8\"), \"\") # LEFT-TO-RIGHT\n if line.rstrip() == \"<python>\":\n line = \"<source lang=python>\"\n elif line.rstrip() == \"<perl>\":\n line = \"<source lang=perl>\"\n elif line.rstrip() in [\"</python>\", \"</perl>\"]:\n line = \"</source>\"\n undiv = un_div(line)\n if undiv in [\"__TOC__\", \"__FORCETOC__\", \"__NOTOC__\"]:\n continue\n elif undiv.startswith(\"[[Image:\") and undiv.endswith(\"]]\"):\n # Markdown image wrapped in a div does not render on Github Pages,\n # remove the div and any attempt at styling it (e.g. alignment)\n line = undiv\n new.append(line)\n return \"\\n\".join(new)",
"def inline_markup_to_html(astr):\n\n markup_to_elem = [(r'\\*', '<b>', '</b>'),\n (r'\\/', '<i>', '</i>'),\n (r'`', '<code>', '</code>')]\n\n def replace(matched):\n \"\"\" Take matched, add opening & closing tags, cgi escape if code \"\"\"\n\n matched_str = matched.groups()[0]\n if match == '`':\n matched_str = cgi.escape(matched_str)\n return opener + matched_str + closer\n\n for match, opener, closer in markup_to_elem:\n astr = wrap_match(match).sub(replace, astr)\n\n return fu.pipe(astr, [convert_markup_links, convert_raw_links])",
"def parse(self, text, strip_ansi=False):\r\n # parse everything to ansi first\r\n text = parse_ansi(text, strip_ansi=strip_ansi, xterm256=False)\r\n # convert all ansi to html\r\n result = re.sub(self.re_string, self.do_sub, text)\r\n result = self.re_color(result)\r\n result = self.re_bold(result)\r\n result = self.re_underline(result)\r\n result = self.remove_bells(result)\r\n result = self.convert_linebreaks(result)\r\n result = self.remove_backspaces(result)\r\n result = self.convert_urls(result)\r\n # clean out eventual ansi that was missed\r\n #result = parse_ansi(result, strip_ansi=True)\r\n\r\n return result",
"def clean_text_with_lxml(text, **kwargs):\n ###############\n# pdb.set_trace()\n ###############\n cleaner = LXMLCleaner(**kwargs)\n text = cleaner.clean_html(text)\n return text",
"def texts(self):\n for _, element in etree.iterparse(self.wiki_dump_file):\n if 'text' in element.tag and type(element.text) == str:\n yield self.tokenize_lemmatize(element.text)\n element.clear()\n else:\n element.clear()",
"def remove_unwanted_tags(text):\n # ? is for non-greedy to not go to last tag but end the current first\n # new lines and paragraphs\n text = re.sub(r'<br.?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'<p.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</p>', '', text, flags=re.IGNORECASE)\n # links and other anchors\n text = re.sub(r'<a.*?>', '', text, flags=re.IGNORECASE)\n text = re.sub(r'</a>', '', text, flags=re.IGNORECASE)\n return text",
"def process_tag(text):\n if not isinstance(text, basestring):\n raise TypeError(\"string format required: got %r\" % type(text))\n\n try:\n text = text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")\n\n # replace `[text](http://...) or [text](https://...)`\n # with `text`\n text = re.sub(r\"\"\"\\[([^\\]]+)\\] # [text] parenthesis-captured group \\1\n \\(http\\S+\\) # (http://...)\"\"\", r\"\\1\", text, flags=re.X)\n\n # replace `http://... or https://...`\n # with ``\n text = re.sub(r\"https?://[^\\s\\\"\\']+\", \"\", text)\n\n # replace `(...) ... (...) ...`\n # with ``\n regex = r\"\"\"\\( # left (\n ([^\\(\\)]+) # captured text, parenthesis-captured group \\1\n \\) # right )\"\"\"\n text = re.sub(regex, \"\", text, flags=re.X)\n\n text = text.replace(\" \", \"\")\n text = text.replace(\">\", \"\")\n text = text.replace(\"<\", \"\")\n text = text.replace(\"&\", \"\")\n text = text.replace(\""\", \"\")\n text = text.replace(\"'\", \"\")\n text = text.replace(\"¢\", \"\")\n text = text.replace(\"£\", \"\")\n text = text.replace(\"¥\", \"\")\n text = text.replace(\"€\", \"\")\n text = text.replace(\"©\", \"\")\n text = text.replace(\"®\", \"\")\n text = re.sub(\"[*\\[\\]\\(\\)&%\\$#@\\^]\", \"\", text)\n text = re.sub(\"\\.{2,}\", \" \", text)\n except Exception as inst:\n print \"process_tag: %s\\ninput: %r\" % (inst, text)\n sys.exit(1)\n\n return text",
"def collapse_tags(self, my_etree):\n chars = []\n is_tag_start = False # True if inside tag\n tag_start_node = None # Pointer to current node. \n tag_start_char = '['\n tag_end_char = ']'\n\n # For every node with text\n for node,text in self._itertext(my_etree):\n # Go through each node's text character by character\n for i,c in enumerate(text):\n if c == tag_start_char: # Tag is starting!\n assert not is_tag_start # Better not already be inside a tag!\n is_tag_start = True \n tag_start_node = node \n chars = []\n elif c == tag_end_char: # Tag is ending\n assert is_tag_start # Better have seen a tag start!\n is_tag_start = False\n # If tag_start_node is the same as current node, then we don't need to do anything\n # But otherwise:\n if node != tag_start_node:\n # Tag started in different node, so move all the chars we've encountered since then\n # to the tag_start_node\n chars.append(c)\n tag_start_node.text += ''.join(chars)\n node.text = text[i+1:] # Remove characters from this node\n else:\n # Normal text character\n if is_tag_start and node != tag_start_node:\n # Need to save these chars to append to text in openbrac_node\n chars.append(c)\n\n # If we're here, that means we've consumed all the text in the current node.\n # Check if this node was part of a tag, yet did not start the tag\n if is_tag_start and node!= tag_start_node:\n # Need to remove this text completely as we've saved all of it inside chars for moving\n # into the start_node\n node.text = \"\"",
"def extract_xml_content_from_text(text_data):\n doc_tree = etree.fromstring(text_data, parser=parser)\n # List of strings to filter lines within the XML content by since we \n # only want usable data that can be parsed into TSV easily. This list \n # was generated based off of testing funds list for different tickers\n # and aggregating text that would likely invalidate the final TSV results.\n filter_vals = ['S REPORT SUMMARY',\n 'FORM 13F INFORMATION TABLE',\n 'SHARES/ SH/ PUT/ INVSTMT',\n 'Total ( '\n ]\n if doc_tree is not None:\n xml_results = []\n for node in doc_tree.iter():\n if str(node.tag).lower() == \"table\": # Get the table element\n vals = ''.join(node.itertext())\n # Filter out invalid lines found\n lines = vals.split('\\n')\n for line in lines:\n filter_found = False\n for each in filter_vals:\n if each in line:\n filter_found = True\n break\n if not filter_found:\n xml_results.append(line)\n return '\\n'.join(xml_results)\n else:\n return None",
"def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l",
"def fix_tags(input, removeEmptyTags = False, changeTagsNameCase = 0,\n unNestTags = None, check = False, verbose = False):\n\n if verbose:\n def assume(cond, msg):\n if not cond: print('tagsoupfixer: Parser bug:', msg)\n else:\n def assume(cond, msg): pass\n\n # Tags name comparator\n if changeTagsNameCase == 0: tagNameEqual = lambda a, b: a.lower() == b.lower()\n else: tagNameEqual = lambda a, b: a == b\n # Normalize tags to unNest\n if unNestTags:\n if changeTagsNameCase > 0: unNestTags = map(str.upper, unNestTags)\n else: unNestTags = map(str.lower, unNestTags)\n unNestTags = set(unNestTags)\n\n # Tokenize input\n tokens = _reTag.split(input)\n\n # Debugging\n #~ f = open('pat.txt', mode='w'); f.write(_patTag); f.close()\n #~ print(str(tokens).encode('cp1252'))\n\n # Initialize parser state\n # -- text output\n output = ''\n # -- tags stack; format: [(name, textBefore, markup)*]\n # example: [('div', '... blah <b>di dum</b> ...', '<div class=\"main\">'), ...]\n stack = []\n TAG_NAME = 0; TEXT_BEFORE = 1; MARKUP = 2; ATTRIBUTES = 3\n # -- contextual boolean states\n markupComplete = inTag = endTag = emptyElementTag = False\n # -- buffers for tag name and attributes\n curTagName = curTagAttributes = ''\n\n # http://www.w3.org/TR/2008/REC-xml-20081126/#sec-starttags\n for tok in tokens:\n\n # Simplistic XML parser (don't parse attributes)\n # Open StartTag / EmptyElementTag\n if tok == '<':\n assume(not inTag, 'Unexpected \"<\" inside markup.')\n inTag = True\n # Open EndTag\n elif tok == '</':\n assume(not inTag, 'Unexpected \"</\" inside markup.')\n inTag = endTag = True\n # Close StartTag / EndTag\n elif tok == '>':\n assume(inTag, 'Unexpected \">\" outside markup.')\n markupComplete = True\n # Close EmptyElementTag\n elif tok == '/>':\n assume(inTag, 'Unexpected \"/>\" outside markup.')\n markupComplete = emptyElementTag = True\n # Continue *Tag\n elif inTag:\n # Tag name\n if not curTagName:\n if changeTagsNameCase > 0: curTagName = tok.upper()\n elif changeTagsNameCase < 0: curTagName = tok.lower()\n else: curTagName = tok\n # Tag attributes\n else: curTagAttributes = tok\n # Text\n else:\n output += tok\n\n # We parsed a complete tag (StartTag, EndTag or EmptyElementTag)\n if markupComplete:\n # Quick'n'dirty hack to deal with BRs\n if tagNameEqual(curTagName, 'br'):\n emptyElementTag = True\n # Produce current tag\n curTag = \"<{}{}{}{}>\".format(\n '/' if endTag else '',\n curTagName,\n curTagAttributes,\n '/' if emptyElementTag else ''\n )\n # Process current tag\n # -- EmptyElementTag\n if emptyElementTag:\n # No text to process, output the markup\n output += curTag\n # -- StartTag\n elif not endTag:\n # Push current tag on the stack with current output as textBefore\n # and reset output.\n if unNestTags and curTagName in unNestTags:\n attrs = parse_attributes(curTagAttributes)\n # 20/01/2011: we HAVE to merge the parent's attributes if any\n if len(stack) and stack[-1][TAG_NAME] == curTagName and stack[-1][ATTRIBUTES] and attrs:\n tmp = stack[-1][ATTRIBUTES].copy()\n tmp.update(attrs)\n attrs = tmp\n tag = [curTagName, output, curTag, attrs]\n else: tag = [curTagName, output, curTag]\n output = ''\n stack.append(tag)\n # -- EndTag, try to match a StartTag\n else:\n if len(stack) == 0:\n # Drop this tag\n if verbose: print('tagsoupfixer: '+curTag+': End tag with no match, tag dropped.')\n elif tagNameEqual(stack[-1][TAG_NAME], curTagName):\n # Unnest of the poor (with the parent)\n if unNestTags and len(stack) > 1 and curTagName in unNestTags and stack[-2][TAG_NAME] == curTagName:\n attrs = stack[-1][ATTRIBUTES]\n # 20/01/2011: already done at StartTag\n #attrs.update(stack[-2][ATTRIBUTES])\n attrs = build_attributes(attrs)\n stack[-1][MARKUP] = '</' + curTagName + '>' + '<' + curTagName + attrs + '>'\n #if verbose: print('tagsoupfixer: '+curTag+': rewrote parent: '+stack[-1][MARKUP])\n curTag += stack[-2][MARKUP]\n # Properly nested tags\n if not removeEmptyTags or len(output.strip()) > 0:\n # Tag is not empty / We don't have to strip empty tags\n output = stack[-1][TEXT_BEFORE] + stack[-1][MARKUP] + output + curTag\n else:\n # Tag is empty and we have to strip its nasty markup\n output = stack[-1][TEXT_BEFORE] + output\n if verbose: print('tagsoupfixer: '+curTag+': Removed empty tag.')\n stack.pop()\n elif len(stack) > 1:\n # Detect improperly nested tags\n overlap = None\n for i in reversed(range(len(stack)-1)):\n # Overlapping tags !!\n if tagNameEqual(stack[i][TAG_NAME], curTagName):\n overlap = i; break\n if overlap is not None:\n if verbose:\n print('tagsoupfixer: ['+curTagName+','+stack[overlap-1][TAG_NAME]+']: Overlapping tags.')\n # Fix overlapping by properly closing the tag\n tag = stack[overlap]\n for i in range(overlap+1, len(stack)):\n stack[i][MARKUP] = '</'+tag[TAG_NAME]+'>'+stack[i][MARKUP]+tag[MARKUP]\n output += curTag\n stack[overlap+1][TEXT_BEFORE] = tag[TEXT_BEFORE] + tag[MARKUP] + stack[overlap+1][TEXT_BEFORE]\n stack.pop(overlap)\n # Reset tag parser state\n markupComplete = inTag = endTag = emptyElementTag = False\n curTagName = curTagAttributes = ''\n\n # Output remaining elements on the stack\n for i in reversed(range(len(stack))):\n output = stack[i][TEXT_BEFORE] + stack[i][MARKUP] + output\n\n # Cludgy hack to fix empty tags when unnesting\n if unNestTags and removeEmptyTags:\n output = fix_tags(output, removeEmptyTags=True)\n\n if check:\n oh = strip_tags(input)\n my = strip_tags(output)\n if oh != my:\n print('tagsoupfixer: Sorry, I stripped out some text, aaaaaaargh.\\n', oh, '\\n', my)\n\n return output",
"def render_markup(text):\n if flaskbb_config['MARKUP_TYPE'] == 'bbcode':\n return render_bbcode(text)\n elif flaskbb_config['MARKUP_TYPE'] == 'markdown':\n return render_markdown(text, extras=['tables'])\n return text",
"def iter_text(el):\n if el.text is not None:\n yield (el.text,el)\n\n for child in el:\n for part in iter_text(child):\n yield part\n\n if el.tail is not None:\n yield (el.tail,el)",
"def fragments_fromstring(html, no_leading_text=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n if parser is None:\n parser = html_parser\n\n children = parser.parseFragment(html, 'div', useChardet=guess_charset)\n if children and isinstance(children[0], _strings):\n if no_leading_text:\n if children[0].strip():\n raise etree.ParserError('There is leading text: %r' %\n children[0])\n del children[0]\n return children",
"def generate_parse_trees():\n result = []\n for s in ALL_SENTENCES:\n result.append('\"\"\"' + str(get_chunks(s)) + '\"\"\", ')\n return result",
"def _replaceNestedElementText(self, names, text, escapeAmpLtGt=False):\n openingTagsPattern = r\"\"\n closingTagsPattern = r\"\"\n firstLevel = True\n while names:\n nextName = names.pop(0)\n if not firstLevel:\n openingTagsPattern = openingTagsPattern + r\".*?\"\n closingTagsPattern = r\".*?\" + closingTagsPattern\n openingTagsPattern = openingTagsPattern + r\"<\" + nextName + r\"(?:\\s*|\\s+.*?)>\"\n closingTagsPattern = r\"</\" + nextName + r\"\\s*>\" + closingTagsPattern\n firstLevel = False\n patternString = r\"(?s)(\" + openingTagsPattern + r\")(.*?)(\" + closingTagsPattern + r\")\"\n if text is not None:\n if escapeAmpLtGt:\n text = escape(text)\n #replacementString = r\"\\g<1>\" + text + r\"\\g<3>\"\n replacementFunction = lambda match: match.group(1) + text + match.group(3)\n else:\n #replacementString = r\"\"\n replacementFunction = lambda match: r\"\"\n self._string = re.sub(patternString, replacementFunction, self._string)\n return self",
"def _parse(self, remaining_text, tree, frontier):\n\n # If the tree covers the text, and there's nothing left to\n # expand, then we've found a complete parse; return it.\n if len(remaining_text) == 0 and len(frontier) == 0:\n if self._trace:\n self._trace_succeed(tree, frontier)\n yield tree\n\n # If there's still text, but nothing left to expand, we failed.\n elif len(frontier) == 0:\n if self._trace:\n self._trace_backtrack(tree, frontier)\n\n # If the next element on the frontier is a tree, expand it.\n elif isinstance(tree[frontier[0]], Tree):\n yield from self._expand(remaining_text, tree, frontier)\n\n # If the next element on the frontier is a token, match it.\n else:\n yield from self._match(remaining_text, tree, frontier)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This re_string is for finding generic block elements like lists (ordered, unordered, and definition) that start with a single token. | def re_string(self):
leading_whitespace = r'^([ \t]*'
only_one_token = re.escape(self.token)+ '(?!' + re.escape(self.token) + ')'
rest_of_list = r'.*?(?:\n|\Z))'
only_one_stop_token = '([' + re.escape(self.stop_tokens) + r'])(?!\3)'
look_ahead = '(?=([ \t]*' + only_one_stop_token + '|$))'
return leading_whitespace + only_one_token + rest_of_list + \
look_ahead | [
"def begin_token(self) -> str:",
"def __find_block_start(self):\n try:\n return self.__find_token(self.__block_head)\n except RouteParserError:\n raise StartTokenNotFoundError(_('No match for entry block start'))",
"def test_parse_token_single_element_name(self):\n\n # Parse the token.\n list = parse_token('G')\n\n # Check the list elements.\n self.assertEqual(len(list), 1)\n self.assertEqual(list[0], 'G')",
"def find_iters(template_string):\n\n # {{% match + any number of spaces + whatever + any number of spaces + %}}\n pattern = re.compile('{{%(.*?)\\s+.*\\s+%}}')\n tags = re.findall(pattern, template_string)\n \n return tags",
"def get_definition(text, startswith):\n return [\n re.split('[ ()]', line.strip())[1]\n for line in [line.strip() for line in text.splitlines()]\n if line.startswith(startswith)\n ]",
"def word_start_finder(ignore_subword=False, is_joiner=False) -> Callable:\n if not ignore_subword:\n if is_joiner:\n return _subword_start_by_joiner\n else:\n return _subword_start_by_spacer\n else:\n return lambda tokens: [True] * len(tokens)",
"def _subword_start_by_spacer(tokens: Sequence[str]) -> Sequence[bool]:\n flag = [x.startswith(SubwordMarker.SPACER) for x in tokens]\n flag[0] = True\n return flag",
"def first_character(self, from_end: bool = False) -> \"Regex\":",
"def get_lexer(\n tag_start_string: str = r\"{%\",\n tag_end_string: str = r\"%}\",\n statement_start_string: str = r\"{{\",\n statement_end_string: str = r\"}}\",\n) -> Callable[[str], Iterator[Token]]:\n rules = compile_liquid_rules(\n tag_start_string,\n tag_end_string,\n statement_start_string,\n statement_end_string,\n )\n return partial(_tokenize_template, rules=rules)",
"def first_strings():\n pass",
"def _find_block_starts(self):\n node_headers = []\n element_headers = []\n element_set_headers = []\n for i, line in enumerate(self._abq_file):\n node_header_match = self._node_header_pattern.match(line)\n element_header_match = self._element_header_pattern.match(line)\n elementset_header_match = self._elementset_header_pattern.match(line)\n if node_header_match:\n node_headers.append(i)\n elif element_header_match:\n element_headers.append(i)\n elif elementset_header_match:\n element_set_headers.append(i)\n self._node_block_start = node_headers[0]\n self._element_block_start = element_headers[0]\n self._elementset_block_start = element_set_headers[0]",
"def match_first(cls, pattern : str) -> Optional[\"ConduitBlock\"]:\n return cls._blocks.match_first(pattern)",
"def __find_word_start(self, iterator):\n pattern = re.compile(\"[a-z|A-Z|0-9|<|>|/]\")\n symbols = ('!', '@', '#', '$', '%', '&', '*',\n '(', ')', '-' ,'+', '.', ',', '~', '^')\n iterator.backward_char()\n if iterator.get_char() in symbols:\n return\n while True:\n char = iterator.get_char()\n if not(re.match(pattern, char)):\n iterator.forward_char()\n return\n elif iterator.starts_line():\n return\n else:\n iterator.backward_char()",
"def tokenize(self):",
"def test_starttag_simple():\n inst = _encoder.TextEncoder('foo')\n\n result = inst.starttag(b'xx', iter([]), False)\n assert result == b'[xx]'\n\n result = inst.starttag(b'yy', iter([(b'aa', None), (b'bb', b'cc')]),\n False)\n assert result == b'[yy aa bb=cc]'",
"def isStartOfBlock(self, line):\n line = line.strip()\n if line.startswith(\"----\"):\n return True\n\n if line.startswith(\"=\"):\n return True\n if line.startswith(\"[[\") and line.endswith(\"]]\"):\n return True\n\n return False",
"def lexer(it):\n tokens = []\n token = \"\"\n for c in it:\n if c == \"{\":\n if token:\n tokens.append(token)\n token = \"\"\n tokens.append(c)\n elif c == \"}\":\n if token:\n tokens.append(token)\n token = \"\"\n tokens.append(c)\n else:\n token += c\n if token:\n tokens.append(token)\n return tokens",
"def test_starts_at(line):\n return TEST_START_RE.match(line)",
"def str_const_type(self):\n return bool(re.fullmatch(\"\\\".*?\\\"\", self.current_token)) # \"....\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set `self.reader` by name. | def set_reader(self, reader_name, parser, parser_name):
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser | [
"def set_reader(self, fd, on_readable):\n raise NotImplementedError",
"def setName(self, name):\n self.content = name",
"def setScanner(self, scannerName):\n self.scanner = self.sourceManager.OpenSource(scannerName)",
"def set_name(self, name):\n if self._status == \"lock\":\n raise QiitaAnalysisError(\"analysis can't be changed. It's locked\")\n self._name = name",
"def set_scene_by_name(self, name):\n id = self.extract_scene_id_by_name(name)\n if id:\n self.set_scene(id)",
"def setName(self, new_name):\n self.__NAME = new_name\n self.__file = self.deepCopy(self.__NAME, self.__DIR).__file",
"def __init__(self, file_name, encoding):\n self.reader = codecs.getreader(encoding)(file_name)",
"def setRawName(*args, **kwargs):\n \n pass",
"def set_name(self,name):\r\n if not len(name):\r\n raise Exception(\"The specified morphism name is empty\")\r\n self.name = name",
"def set_name(self, name):\n self.recipe_proto[\"name\"] = name",
"def setName( self, name ):\n if type( name ) == str:\n self.Name = name\n self.graph.graph[ 'name' ] = name",
"def set_dataset_name(self, dataset_name):\n self.name = dataset_name",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"def from_name(self, from_name):\n self._from_name = from_name",
"def read(self, read: SmartSsdReadLookahead):\n\n self._read = read",
"def initread(self, idfname):\n with open(idfname, \"r\") as _:\n # raise nonexistent file error early if idfname doesn't exist\n pass\n iddfhandle = StringIO(iddcurrent.iddtxt)\n if self.getiddname() == None:\n self.setiddname(iddfhandle)\n self.idfname = idfname\n try:\n self.idfabsname = os.path.abspath(self.idfname)\n except TypeError as e:\n pass # it is file handle. the code can handle that\n self.read()",
"def get_reader(self):\n raise NotImplementedError()",
"def set_name(self, name):\n self.pattern.name = name",
"def set_name(self, new_name):\n self.name = new_name",
"def open_file_in_reader(file_name):\n global user_configurations\n command = user_configurations['READER'].replace('%f', '\"%s\"' % file_name)\n try:\n subprocess.call([command], shell=True)\n except OSError:\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set `self.writer` by name. | def set_writer(self, writer_name):
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class() | [
"def get_writer(self, name=None):\n self._create_working_folder()\n name = self.clean_name(name)\n if name not in self.writers:\n self.writers[name] = open(os.path.join(self.working_folder, name), 'wb')\n return self.writers[name]",
"def set_writer(self, fd, on_writable):\n raise NotImplementedError",
"def set_writer_position(self, name, timestamp):\n self.cursor.execute('REPLACE INTO gauged_writer_history '\n '(id, timestamp) VALUES (?, ?)',\n (name, timestamp))",
"def setName( self, name ):\n if type( name ) == str:\n self.Name = name\n self.graph.graph[ 'name' ] = name",
"def setName(self, name):\n self.setAttribute('NAME', name)",
"def set_name(self, name):\n if self._status == \"lock\":\n raise QiitaAnalysisError(\"analysis can't be changed. It's locked\")\n self._name = name",
"def setName(self, name):\n self.content = name",
"def set_output_name(name):\n settings[\"outputName\"] = name",
"def set_name(self, room_name):\n self.name = room_name",
"def setName(self, new_name):\n self.__NAME = new_name\n self.__file = self.deepCopy(self.__NAME, self.__DIR).__file",
"def add_writer(self, writer: IEvaluatorWriter):\n\n self.writers.append(writer)\n self.is_header_written = False # re-write header",
"def set_player_name(self, player):\r\n self.__name = player",
"def change_name(self, name):\n self._player_name = name",
"def set_name(self, name):\n self.pattern.name = name",
"def set_name(self, new_name):\n self.name = new_name",
"def _setName(self,name,value):\n\n if name in SDS['COP']:\n self.COP.__dict__[name] = value\n else:\n self.__dict__[name] = value",
"def set_name(self,name):\r\n if not len(name):\r\n raise Exception(\"The specified morphism name is empty\")\r\n self.name = name",
"def set_player_name(name):\n\n player[\"player_name\"] = name",
"def setName(self, name):\r\n self._thread.setName(name)",
"def write_filename(self, record, pattern, filename):\n\n # Are we currently writing to this file? If not, open/create it.\n if not filename == self.current_filename.get(pattern, None):\n logging.info('LogfileWriter opening new file: %s', filename)\n self.current_filename[pattern] = filename\n self.writer[pattern] = FileWriter(filename=filename,\n header=self.header,\n header_file=self.header_file,\n flush=self.flush)\n # Now, if our logic is correct, should *always* have a matching_writer\n matching_writer = self.writer.get(pattern)\n matching_writer.write(record)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`'s output. | def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
config_section=None, enable_exit_status=False):
exit = None
try:
if self.settings is None:
self.process_command_line(
argv, usage, description, settings_spec, config_section,
**(settings_overrides or {}))
self.set_io()
self.document = self.reader.read(self.source, self.parser,
self.settings)
self.apply_transforms()
output = self.writer.write(self.document, self.destination)
self.writer.assemble_parts()
except SystemExit, error:
exit = 1
exit_status = error.code
except Exception, error:
if not self.settings: # exception too early to report nicely
raise
if self.settings.traceback: # Propagate exceptions?
self.debugging_dumps()
raise
self.report_Exception(error)
exit = True
exit_status = 1
self.debugging_dumps()
if (enable_exit_status and self.document
and (self.document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(self.document.reporter.max_level + 10)
elif exit:
sys.exit(exit_status)
return output | [
"def main(cmdlineargs=None, trimmed_outfile=sys.stdout):\n\tparser = get_option_parser()\n\tif cmdlineargs is None:\n\t\tcmdlineargs = sys.argv[1:]\n\toptions, args = parser.parse_args(args=cmdlineargs)\n\n\tif len(args) == 0:\n\t\tparser.error(\"At least one parameter needed: name of a FASTA or FASTQ file.\")\n\telif len(args) > 2:\n\t\tparser.error(\"Too many parameters.\")\n\n\tinput_filename = args[0]\n\tquality_filename = None\n\tpe_filename = None\n\tif len(args) == 2:\n\t\tif args[1].endswith('.qual'):\n\t\t\tquality_filename = args[1]\n\t\telse:\n\t\t\tpe_filename = args[1]\n\t\t\tif not options.paired_output:\n\t\t\t\tparser.error('you must use --paired-output when trimming paired-end reads')\n\n\tif len(args) == 1 and options.paired_output:\n\t\tparser.error(\"You specified a --paired-output file, but gave only one input file.\")\n\tif input_filename.endswith('.qual') and quality_filename.endswith('fasta'):\n\t\tparser.error(\"FASTA and QUAL file given, but the FASTA file must be first.\")\n\n\tif options.format is not None and options.format.lower() not in ['fasta', 'fastq', 'sra-fastq']:\n\t\tparser.error(\"The input file format must be either 'fasta', 'fastq' or 'sra-fastq' (not '{0}').\".format(options.format))\n\n\t# TODO should this really be an error?\n\tif options.format is not None and quality_filename is not None:\n\t\tparser.error(\"If a pair of .fasta and .qual files is given, the -f/--format parameter cannot be used.\")\n\n\t# default output files (overwritten below)\n\ttoo_short_outfile = None # too short reads go here\n\ttoo_long_outfile = None # too long reads go here\n\tpe_outfile = None\n\tif options.output is not None:\n\t\ttrimmed_outfile = xopen(options.output, 'w')\n\tuntrimmed_outfile = trimmed_outfile # reads without adapters go here\n\tif options.untrimmed_output is not None:\n\t\tuntrimmed_outfile = xopen(options.untrimmed_output, 'w')\n\tif options.too_short_output is not None:\n\t\ttoo_short_outfile = xopen(options.too_short_output, 'w')\n\tif options.too_long_output is not None:\n\t\ttoo_long_outfile = xopen(options.too_long_output, 'w')\n\tif options.paired_output:\n\t\tpe_outfile = xopen(options.paired_output, 'w')\n\n\tif options.maq:\n\t\toptions.colorspace = True\n\t\toptions.double_encode = True\n\t\toptions.trim_primer = True\n\t\toptions.strip_suffix.append('_F3')\n\t\toptions.suffix = \"/1\"\n\t\toptions.zero_cap = True\n\tif options.trim_primer and not options.colorspace:\n\t\tparser.error(\"Trimming the primer makes only sense in color space.\")\n\tif options.double_encode and not options.colorspace:\n\t\tparser.error(\"Double-encoding makes only sense in color space.\")\n\tif options.anywhere and options.colorspace:\n\t\tparser.error(\"Using --anywhere with color space reads is currently not supported (if you think this may be useful, contact the author).\")\n\tif not (0 <= options.error_rate <= 1.):\n\t\tparser.error(\"The maximum error rate must be between 0 and 1.\")\n\tif options.overlap < 1:\n\t\tparser.error(\"The overlap must be at least 1.\")\n\n\tif options.rest_file is not None:\n\t\toptions.rest_file = xopen(options.rest_file, 'w')\n\t\trest_writer = RestFileWriter(options.rest_file)\n\telse:\n\t\trest_writer = None\n\tif options.info_file is not None:\n\t\toptions.info_file = xopen(options.info_file, 'w')\n\tif options.wildcard_file is not None:\n\t\toptions.wildcard_file = xopen(options.wildcard_file, 'w')\n\n\tadapters = []\n\n\tdef parse_adapter_name(seq):\n\t\t\"\"\"\n\t\tParse an adapter given as 'name=adapt' into 'name' and 'adapt'.\n\t\t\"\"\"\n\t\tfields = seq.split('=', 1)\n\t\tif len(fields) > 1:\n\t\t\tname, seq = fields\n\t\t\tname = name.strip()\n\t\telse:\n\t\t\tname = None\n\t\tseq = seq.strip()\n\t\treturn name, seq\n\n\tADAPTER_CLASS = ColorspaceAdapter if options.colorspace else Adapter\n\tdef append_adapters(adapter_list, where):\n\t\tfor seq in adapter_list:\n\t\t\tname, seq = parse_adapter_name(seq)\n\t\t\tw = where\n\t\t\tif w == FRONT and seq.startswith('^'):\n\t\t\t\tseq = seq[1:]\n\t\t\t\tw = PREFIX\n\t\t\telif not options.indels:\n\t\t\t\tparser.error(\"Not allowing indels is currently supported only for anchored 5' adapters.\")\n\t\t\tif not seq:\n\t\t\t\tparser.error(\"The adapter sequence is empty\")\n\t\t\tadapter = ADAPTER_CLASS(seq, w, options.error_rate,\n\t\t\t\toptions.overlap, options.match_read_wildcards,\n\t\t\t\toptions.match_adapter_wildcards, name=name, indels=options.indels)\n\t\t\tadapters.append(adapter)\n\n\tappend_adapters(options.adapters, BACK)\n\tappend_adapters(options.anywhere, ANYWHERE)\n\tappend_adapters(options.front, FRONT)\n\n\t# make sure these aren't used by accident\n\tdel options.adapters\n\tdel options.anywhere\n\tdel options.front\n\n\tif not adapters and options.quality_cutoff == 0 and options.cut == 0:\n\t\tparser.error(\"You need to provide at least one adapter sequence.\")\n\n\tmodifiers = []\n\tif options.cut:\n\t\tmodifiers.append(UnconditionalCutter(options.cut))\n\tif options.quality_cutoff > 0:\n\t\tmodifiers.append(QualityTrimmer(options.quality_cutoff, options.quality_base))\n\tif adapters:\n\t\tadapter_cutter = RepeatedAdapterCutter(adapters, options.times,\n\t\t\t\toptions.wildcard_file, options.info_file, options.trim,\n\t\t\t\trest_writer, options.mask_adapter)\n\t\tmodifiers.append(adapter_cutter)\n\telse:\n\t\tadapter_cutter = None\n\tif options.length_tag:\n\t\tmodifiers.append(LengthTagModifier(options.length_tag))\n\tif options.strip_f3:\n\t\toptions.strip_suffix.append('_F3')\n\tfor suffix in options.strip_suffix:\n\t\tmodifiers.append(SuffixRemover(suffix))\n\tif options.prefix or options.suffix:\n\t\tmodifiers.append(PrefixSuffixAdder(options.prefix, options.suffix))\n\tif options.double_encode:\n\t\tmodifiers.append(DoubleEncoder())\n\tif options.zero_cap:\n\t\tmodifiers.append(ZeroCapper(quality_base=options.quality_base))\n\tif options.trim_primer:\n\t\tmodifiers.append(PrimerTrimmer)\n\n\treadfilter = ReadFilter(options.minimum_length, options.maximum_length,\n\t\ttoo_short_outfile, too_long_outfile, options.discard_trimmed,\n\t\toptions.discard_untrimmed)\n\tstart_time = time.clock()\n\ttry:\n\t\treader = read_sequences(input_filename, quality_filename, colorspace=options.colorspace, fileformat=options.format)\n\t\tif pe_filename:\n\t\t\tpe_reader = read_sequences(pe_filename, None, colorspace=options.colorspace, fileformat=options.format)\n\t\telse:\n\t\t\tpe_reader = None\n\t\tstats = process_reads(reader, pe_reader, modifiers, readfilter, trimmed_outfile, untrimmed_outfile, pe_outfile)\n\texcept IOError as e:\n\t\tif e.errno == errno.EPIPE:\n\t\t\tsys.exit(1)\n\t\traise\n\texcept seqio.FormatError as e:\n\t\tprint(\"Error:\", e, file=sys.stderr)\n\t\tsys.exit(1)\n\t# close open files\n\tfor f in [options.rest_file, options.wildcard_file, options.info_file,\n\t\t\ttoo_short_outfile, too_long_outfile, options.info_file]:\n\t\tif f is not None:\n\t\t\tf.close()\n\t# send statistics to stderr if result was sent to stdout\n\tstat_file = sys.stderr if options.output is None else None\n\n\tprint_statistics(adapters, time.clock() - start_time, stats,\n\t\toptions.trim, adapter_cutter.reads_matched if adapter_cutter else 0,\n\t\toptions.error_rate, readfilter.too_short, readfilter.too_long, cmdlineargs, file=stat_file)",
"def doCommand():\n \n import time\n import sys\n from swap import myStore\n\n # These would just be attributes if this were an object\n global _store\n global workingContext\n option_need_rdf_sometime = 0 # If we don't need it, don't import it\n # (to save errors where parsers don't exist)\n \n option_pipe = 0 # Don't store, just pipe though\n option_inputs = []\n option_reify = 0 # Flag: reify on output (process?)\n option_flat = 0 # Flag: reify on output (process?)\n option_crypto = 0 # Flag: make cryptographic algorithms available\n setTracking(0)\n option_outURI = None\n option_outputStyle = \"-best\"\n _gotInput = 0 # Do we not need to take input from stdin?\n option_meta = 0\n option_normalize_iri = 0\n \n option_flags = { \"rdf\":\"l\", \"n3\":\"\", \"think\":\"\", \"sparql\":\"\"}\n # RDF/XML serializer can't do list (\"collection\") syntax.\n \n option_quiet = 0\n option_with = None # Command line arguments made available to N3 processing\n option_engine = \"llyn\"\n option_why = \"\"\n \n _step = 0 # Step number used for metadata\n _genid = 0\n\n hostname = \"localhost\" # @@@@@@@@@@@ Get real one\n \n # The base URI for this process - the Web equiv of cwd\n _baseURI = uripath.base()\n \n option_format = \"n3\" # set the default format\n option_first_format = None\n \n _outURI = _baseURI\n option_baseURI = _baseURI # To start with - then tracks running base\n \n # First pass on command line - - - - - - - P A S S 1\n \n for argnum in range(1,len(sys.argv)): # options after script name\n arg = sys.argv[argnum]\n if arg.startswith(\"--\"): arg = arg[1:] # Chop posix-style -- to -\n# _equals = string.find(arg, \"=\")\n _lhs = \"\"\n _rhs = \"\"\n try:\n [_lhs,_rhs]=arg.split('=',1)\n try:\n _uri = join(option_baseURI, _rhs)\n except ValueError:\n _uri = _rhs\n except ValueError: pass\n if arg == \"-ugly\": option_outputStyle = arg\n elif _lhs == \"-base\": option_baseURI = _uri\n elif arg == \"-rdf\":\n option_format = \"rdf\"\n if option_first_format == None:\n option_first_format = option_format \n option_need_rdf_sometime = 1\n elif _lhs == \"-rdf\":\n option_format = \"rdf\"\n if option_first_format == None:\n option_first_format = option_format \n option_flags[\"rdf\"] = _rhs\n option_need_rdf_sometime = 1\n elif arg == \"-n3\":\n option_format = \"n3\"\n if option_first_format == None:\n option_first_format = option_format \n elif _lhs == \"-n3\":\n option_format = \"n3\"\n if option_first_format == None:\n option_first_format = option_format \n option_flags[\"n3\"] = _rhs\n elif _lhs == \"-mode\":\n option_flags[\"think\"] = _rhs\n elif _lhs == \"-closure\":\n if \"n\" in _rhs:\n option_normalize_iri = 1\n #elif _lhs == \"-solve\":\n # sys.argv[argnum+1:argnum+1] = ['-think', '-filter=' + _rhs]\n elif _lhs == \"-language\":\n option_format = _rhs\n if option_first_format == None:\n option_first_format = option_format\n elif _lhs == \"-languageOptions\":\n option_flags[option_format] = _rhs\n elif arg == \"-quiet\": option_quiet = 1\n elif arg == \"-pipe\": option_pipe = 1\n elif arg == \"-crypto\": option_crypto = 1\n elif _lhs == \"-why\":\n diag.tracking=1\n diag.setTracking(1)\n option_why = _rhs\n elif arg == \"-why\":\n diag.tracking=1\n diag.setTracking(1)\n option_why = \"\"\n elif arg == \"-track\":\n diag.tracking=1\n diag.setTracking(1)\n elif arg == \"-bySubject\": option_outputStyle = arg\n elif arg == \"-no\": option_outputStyle = \"-no\"\n elif arg == \"-debugString\": option_outputStyle = \"-debugString\"\n elif arg == \"-strings\": option_outputStyle = \"-no\"\n elif arg == \"-sparqlResults\": option_outputStyle = \"-no\"\n elif arg == \"-triples\" or arg == \"-ntriples\":\n option_format = \"n3\"\n option_flags[\"n3\"] = \"bravestpun\"\n option_outputStyle = \"-bySubject\"\n option_quiet = 1\n elif _lhs == \"-outURI\": option_outURI = _uri\n elif _lhs == \"-chatty\":\n setVerbosity(int(_rhs))\n elif arg[:7] == \"-apply=\": pass\n elif arg[:7] == \"-patch=\": pass\n elif arg == \"-reify\": option_reify = 1\n elif arg == \"-flat\": option_flat = 1\n elif arg == \"-help\":\n print doCommand.__doc__\n print notation3.ToN3.flagDocumentation\n print toXML.ToRDF.flagDocumentation\n try:\n from swap import sax2rdf # RDF1.0 syntax parser to N3 RDF stream\n print sax2rdf.RDFXMLParser.flagDocumentation\n except:\n pass\n return\n elif arg == \"-revision\":\n progress( \"cwm=\",cvsRevision, \"llyn=\", llyn.cvsRevision)\n return\n elif arg == \"-with\":\n option_with = sys.argv[argnum+1:] # The rest of the args are passed to n3\n break\n elif arg[0] == \"-\": pass # Other option\n else :\n option_inputs.append(join(option_baseURI, arg))\n _gotInput = _gotInput + 1 # input filename\n \n\n # Between passes, prepare for processing\n setVerbosity(0)\n\n if not option_normalize_iri:\n llyn.canonical = lambda x: x\n\n # Base defauts\n if option_baseURI == _baseURI: # Base not specified explicitly - special case\n if _outURI == _baseURI: # Output name not specified either\n if _gotInput == 1: # But input file *is*, \n _outURI = option_inputs[0] # Just output to same URI\n option_baseURI = _outURI # using that as base.\n if diag.tracking:\n _outURI = RDFSink.runNamespace()[:-1]\n option_baseURI = _outURI\n option_baseURI = splitFrag(option_baseURI)[0]\n\n # Fix the output sink\n if option_format == \"rdf\":\n _outSink = toXML.ToRDF(sys.stdout, _outURI, base=option_baseURI, flags=option_flags[\"rdf\"])\n elif option_format == \"n3\" or option_format == \"sparql\":\n _outSink = notation3.ToN3(sys.stdout.write, base=option_baseURI,\n quiet=option_quiet, flags=option_flags[\"n3\"])\n elif option_format == \"trace\":\n _outSink = RDFSink.TracingRDFSink(_outURI, base=option_baseURI,\n flags=option_flags.get(\"trace\",\"\"))\n if option_pipe:\n # this is really what a parser wants to dump to\n _outSink.backing = llyn.RDFStore( _outURI+\"#_g\",\n argv=option_with, crypto=option_crypto) \n else:\n # this is really what a store wants to dump to \n _outSink.backing = notation3.ToN3(sys.stdout.write,\n base=option_baseURI, quiet=option_quiet,\n flags=option_flags[\"n3\"])\n\n # hm. why does TimBL use sys.stdout.write, above? performance at the \n else:\n raise NotImplementedError\n\n version = \"$Id: cwm.py,v 1.198 2012/01/30 09:30:20 timbl Exp $\"\n if not option_quiet and option_outputStyle != \"-no\":\n _outSink.makeComment(\"Processed by \" + version[1:-1]) # Strip $ to disarm\n _outSink.makeComment(\" using base \" + option_baseURI)\n\n if option_flat:\n _outSink = notation3.Reifier(_outSink, _outURI+ \"#_formula\", flat=1)\n\n if diag.tracking: \n myReason = BecauseOfCommandLine(`sys.argv`)\n # @@ add user, host, pid, pwd, date time? Privacy!\n else:\n myReason = None\n\n if option_pipe:\n _store = _outSink\n workingContext = _outSink #.newFormula()\n else:\n if \"u\" in option_flags[\"think\"]:\n _store = llyn.RDFStore(argv=option_with, crypto=option_crypto)\n else:\n _store = llyn.RDFStore( _outURI+\"#_g\",\n argv=option_with, crypto=option_crypto)\n myStore.setStore(_store)\n\n\n if _gotInput: \n workingContext = _store.newFormula(option_inputs [0]+\"#_work\")\n newTopLevelFormula(workingContext)\n else: # default input\n if option_first_format is None: option_first_format = option_format\n ContentType={ \"rdf\": \"application/xml+rdf\", \"n3\":\n \"text/n3\", \"sparql\":\n \"x-application/sparql\"}[option_first_format]\n workingContext = _store.load(\n # asIfFrom = join(_baseURI, \".stdin\"),\n asIfFrom = _baseURI,\n contentType = ContentType,\n flags = option_flags[option_first_format],\n remember = 0,\n referer = \"\",\n why = myReason, topLevel=True)\n workingContext.reopen()\n workingContext.stayOpen = 1 # Never canonicalize this. Never share it.\n \n\n # ____________________________________________________________________\n # Take commands from command line:- - - - - P A S S 2\n\n option_format = \"n3\" # Use RDF/n3 rather than RDF/XML \n option_flags = { \"rdf\":\"l\", \"n3\":\"\", \"think\": \"\", \"sparql\":\"\" } \n option_quiet = 0\n _outURI = _baseURI\n option_baseURI = _baseURI # To start with\n \n def filterize():\n \"\"\"implementation of --filter\n for the --filter command, so we don't have it printed twice\n \"\"\"\n global workingContext\n global r\n workingContext = workingContext.canonicalize()\n _store._formulaeOfLength = {}\n filterContext = _store.newFormula()\n newTopLevelFormula(filterContext)\n _store.load(_uri, openFormula=filterContext,\n why=myReason, referer=\"\")\n _newContext = _store.newFormula()\n newTopLevelFormula(_newContext)\n applyRules(workingContext, filterContext, _newContext)\n workingContext.close()\n workingContext = _newContext\n\n sparql_query_formula = None\n\n \n for arg in sys.argv[1:]: # Command line options after script name\n if verbosity()>5: progress(\"Processing %s.\" % (arg))\n if arg.startswith(\"--\"): arg = arg[1:] # Chop posix-style -- to -\n _equals = string.find(arg, \"=\")\n _lhs = \"\"\n _rhs = \"\"\n if _equals >=0:\n _lhs = arg[:_equals]\n _rhs = arg[_equals+1:]\n try:\n _uri = join(option_baseURI, _rhs)\n except ValueError:\n _uri =_rhs\n if arg[0] != \"-\":\n _inputURI = join(option_baseURI, splitFrag(arg)[0])\n assert ':' in _inputURI\n ContentType={ \"rdf\": \"application/xml+rdf\", \"n3\":\n \"text/n3\",\n \"sparql\": \"x-application/sparql\"}[option_format]\n\n if not option_pipe: workingContext.reopen()\n try:\n load(_store, _inputURI,\n openFormula=workingContext,\n contentType =ContentType,\n flags=option_flags[option_format],\n referer=\"\",\n why=myReason)\n except:\n progress(_inputURI)\n raise\n\n _gotInput = 1\n\n elif arg == \"-help\":\n pass # shouldn't happen\n elif arg == \"-revision\":\n pass\n elif _lhs == \"-base\":\n option_baseURI = _uri\n if verbosity() > 10: progress(\"Base now \"+option_baseURI)\n\n elif arg == \"-ugly\":\n option_outputStyle = arg \n\n elif arg == \"-crypto\": pass\n elif arg == \"-pipe\": pass\n elif _lhs == \"-outURI\": option_outURI = _uri\n\n elif arg == \"-rdf\": option_format = \"rdf\"\n elif _lhs == \"-rdf\":\n option_format = \"rdf\"\n option_flags[\"rdf\"] = _rhs\n elif _lhs == \"-mode\":\n option_flags[\"think\"] = _rhs\n elif _lhs == \"-closure\":\n workingContext.setClosureMode(_rhs)\n elif arg == \"-n3\": option_format = \"n3\"\n elif _lhs == \"-n3\":\n option_format = \"n3\"\n option_flags[\"n3\"] = _rhs\n elif _lhs == \"-language\":\n option_format = _rhs\n if option_first_format == None:\n option_first_format = option_format\n elif _lhs == \"-languageOptions\":\n option_flags[option_format] = _lhs\n elif arg == \"-quiet\" : option_quiet = 1 \n elif _lhs == \"-chatty\": setVerbosity(int(_rhs))\n elif arg[:7] == \"-track=\":\n diag.tracking = int(_rhs)\n \n elif option_pipe: ############## End of pipable options\n print \"# Command line error: %s illegal option with -pipe\", arg\n break\n\n elif arg == \"-triples\" or arg == \"-ntriples\":\n option_format = \"n3\"\n option_flags[\"n3\"] = \"spartan\"\n option_outputStyle = \"-bySubject\"\n option_quiet = 1\n\n elif arg == \"-bySubject\":\n option_outputStyle = arg\n\n elif arg == \"-debugString\":\n option_outputStyle = arg\n\n elif arg[:7] == \"-apply=\":\n workingContext = workingContext.canonicalize()\n \n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n workingContext.reopen()\n applyRules(workingContext, filterContext);\n\n elif arg[:7] == \"-apply=\":\n workingContext = workingContext.canonicalize()\n \n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n workingContext.reopen()\n applyRules(workingContext, filterContext);\n\n elif arg[:7] == \"-patch=\":\n workingContext = workingContext.canonicalize()\n \n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n workingContext.reopen()\n patch(workingContext, filterContext);\n\n elif _lhs == \"-filter\":\n filterize()\n\n elif _lhs == \"-query\":\n workingContext = workingContext.canonicalize()\n filterContext = _store.load(_uri, \n flags=option_flags[option_format],\n referer=\"\",\n why=myReason, topLevel=True)\n _newContext = _store.newFormula()\n applyQueries(workingContext, filterContext, _newContext)\n workingContext.close()\n workingContext = _newContext\n\n elif _lhs == \"-sparql\":\n workingContext.stayOpen = False\n workingContext = workingContext.canonicalize()\n filterContext = _store.load(_uri, why=myReason,\n referer=\"\", contentType=\"x-application/sparql\")\n _newContext = _store.newFormula()\n _newContext.stayOpen = True\n sparql_query_formula = filterContext\n applySparqlQueries(workingContext, filterContext, _newContext)\n# workingContext.close()\n workingContext = _newContext\n\n elif _lhs == \"-why\" or arg == \"-why\":\n workingContext.stayOpen = False\n workingContext = workingContext.close()\n workingContext = explainFormula(workingContext, option_why)\n # Can't prove proofs\n diag.tracking=0\n diag.setTracking(0)\n\n elif arg == \"-dump\":\n \n workingContext = workingContext.canonicalize()\n progress(\"\\nDump of working formula:\\n\" + workingContext.debugString())\n \n elif arg == \"-purge\":\n workingContext.reopen()\n _store.purge(workingContext)\n \n elif arg == \"-purge-rules\" or arg == \"-data\":\n \n workingContext.reopen()\n _store.purgeExceptData(workingContext)\n\n elif arg == \"-rules\":\n \n workingContext.reopen()\n applyRules(workingContext, workingContext)\n\n elif arg[:7] == \"-think=\":\n \n filterContext = _store.load(_uri, referer=\"\", why=myReason, topLevel=True)\n if verbosity() > 4:\n progress( \"Input rules to --think from \" + _uri)\n workingContext.reopen()\n think(workingContext, filterContext, mode=option_flags[\"think\"])\n\n elif arg[:7] == \"-solve=\":\n # --solve is a combination of --think and --filter.\n think(workingContext, mode=option_flags[\"think\"])\n filterize()\n \n elif _lhs == \"-engine\":\n option_engine = _rhs\n \n elif arg == \"-think\":\n workingContext.isWorkingContext = True\n think(workingContext, mode=option_flags[\"think\"])\n\n elif arg == '-rete':\n from swap import pycwmko \n pythink = pycwmko.directPychinkoQuery(workingContext)\n #return\n #pythink()\n \"\"\"\n from pychinko import interpreter\n from swap.set_importer import Set, ImmutableSet\n pyf = pycwmko.N3Loader.N3Loader()\n conv = pycwmko.ToPyStore(pyf)\n conv.statements(workingContext)\n interp = interpreter.Interpreter(pyf.rules[:])\n interp.addFacts(Set(pyf.facts), initialSet=True)\n interp.run()\n pyf.facts = interp.totalFacts\n workingContext = workingContext.store.newFormula()\n reconv = pycwmko.FromPyStore(workingContext, pyf)\n reconv.run()\n \"\"\"\n\n elif arg == '-sparqlServer':\n from swap.sparql import webserver\n from swap import cwm_sparql\n sandBoxed(True)\n workingContext.stayOpen = False\n workingContext = workingContext.canonicalize()\n def _handler(s):\n return cwm_sparql.sparql_queryString(workingContext, s)\n webserver.sparql_handler = _handler\n webserver.run()\n\n elif arg == \"-lxkbdump\": # just for debugging\n raise NotImplementedError\n\n elif arg == \"-lxfdump\": # just for debugging\n raise NotImplementedError \n\n elif _lhs == \"-prove\":\n\n # code copied from -filter without really being understood -sdh\n _tmpstore = llyn.RDFStore( _outURI+\"#_g\", metaURI=_metaURI, argv=option_with, crypto=option_crypto)\n\n tmpContext = _tmpstore.newFormula(_uri+ \"#_formula\")\n _newURI = join(_baseURI, \"_w_\"+`_genid`) # Intermediate\n _genid = _genid + 1\n _newContext = _tmpstore.newFormula(_newURI+ \"#_formula\")\n _tmpstore.loadURI(_uri)\n\n print targetkb\n\n elif arg == \"-flatten\":\n #raise NotImplementedError\n from swap import reify\n workingContext = reify.flatten(workingContext)\n\n elif arg == \"-unflatten\":\n from swap import reify\n workingContext = reify.unflatten(workingContext)\n #raise NotImplementedError\n \n elif arg == \"-reify\":\n from swap import reify\n workingContext = reify.reify(workingContext)\n \n\n elif arg == \"-dereify\":\n from swap import reify\n workingContext = reify.dereify(workingContext) \n \n\n elif arg == \"-size\":\n progress(\"Size: %i statements in store, %i in working formula.\"\n %(_store.size, workingContext.size()))\n\n elif arg == \"-strings\": # suppress output\n workingContext.outputStrings() \n option_outputStyle = \"-no\"\n\n elif arg == '-sparqlResults':\n from cwm_sparql import outputString, SPARQL_NS\n ns = _store.newSymbol(SPARQL_NS)\n if not sparql_query_formula:\n raise ValueError('No query')\n else:\n sys.stdout.write(outputString(sparql_query_formula, workingContext)[0].encode('utf_8'))\n option_outputStyle = \"-no\"\n \n \n elif arg == \"-no\": # suppress output\n option_outputStyle = arg\n \n elif arg[:8] == \"-outURI=\": pass\n elif arg == \"-with\": break\n else:\n progress( \"cwm: Unknown option: \" + arg)\n sys.exit(-1)\n\n\n\n # Squirt it out if not piped\n\n workingContext.stayOpen = 0 # End its use as an always-open knoweldge base\n if option_pipe:\n workingContext.endDoc()\n else:\n if hasattr(_outSink, \"serializeKB\"):\n raise NotImplementedError\n else:\n if verbosity()>5: progress(\"Begining output.\")\n workingContext = workingContext.close()\n assert workingContext.canonical != None\n\n if option_outputStyle == \"-ugly\":\n _store.dumpChronological(workingContext, _outSink)\n elif option_outputStyle == \"-bySubject\":\n _store.dumpBySubject(workingContext, _outSink)\n elif option_outputStyle == \"-no\":\n pass\n elif option_outputStyle == \"-debugString\":\n print workingContext.debugString()\n else: # \"-best\"\n _store.dumpNested(workingContext, _outSink,\n flags=option_flags[option_format])",
"def asCommandLine(self, args):\n try:\n inFile = args[\"xmlfile\"]\n outFile = args[\"out\"]\n except:\n raise PeachException(\"XmlAnalyzer requires two parameters, xmlfile and out.\")\n xml = _Xml2Peach().xml2Peach(\"file:\" + inFile)\n with open(outFile, \"wb+\") as fo:\n fo.write(xml)",
"def main(self): # just put into if __name__ ...\n parser = self.get_parser()\n args = parser.parse_args()\n self.run(args)",
"def main():\n # set up the program to take in arguments from the command line",
"def add_standard_args(self):\n self.add_argument(\"-v\", \"--verbose\",\n help=\"Set log verbosity to True, nominal debug level.\", action=\"store_true\")\n self.add_argument(\"--verbosity\",\n help=\"Set log verbosity to a specific level: 0..100.\", type=int, default=0)\n self.add_argument(\"--dump-cmdline\", action=\"store_true\",\n help=\"Dump the command line parameters used to start the script to the log.\")\n self.add_argument(\"-R\", \"--readonly-cache\", action=\"store_true\",\n help=\"Don't modify the CRDS cache. Not compatible with options which implicitly modify the cache.\")\n self.add_argument('-I', '--ignore-cache', action='store_true', dest=\"ignore_cache\",\n help=\"Download required files even if they're already in the cache.\")\n self.add_argument(\"-V\", \"--version\",\n help=\"Print the software version and exit.\", action=\"store_true\")\n self.add_argument(\"-J\", \"--jwst\", dest=\"jwst\", action=\"store_true\",\n help=\"Force observatory to JWST for determining header conventions.\"\"\")\n self.add_argument(\"-H\", \"--hst\", dest=\"hst\", action=\"store_true\",\n help=\"Force observatory to HST for determining header conventions.\"\"\")\n self.add_argument(\"--roman\", dest=\"roman\", action=\"store_true\",\n help=\"Force observatory to Roman for determining header conventions.\"\"\")\n self.add_argument(\"--stats\", action=\"store_true\",\n help=\"Track and print timing statistics.\")\n self.add_argument(\"--profile\",\n help=\"Output profile stats to the specified file.\", type=str, default=\"\")\n self.add_argument(\"--log-time\", action=\"store_true\",\n help=\"Add date/time to log messages.\")\n self.add_argument(\"--pdb\",\n help=\"Run under pdb.\", action=\"store_true\")\n self.add_argument(\"--debug-traps\",\n help=\"Bypass exception error message traps and re-raise exception.\", action=\"store_true\")",
"def main():\n args = docopt(__doc__, version='recipy v%s' % __version__)\n\n if args['--debug']:\n print('Command-line arguments: ')\n print(args)\n print('DB path: ', config.get_db_path())\n print('')\n print('Full config file (as interpreted):')\n print('----------------------------------')\n conf = config.read_config_file()\n s = six.StringIO()\n conf.write(s)\n print(s.getvalue())\n print('----------------------------------')\n\n\n if args['search']:\n search(args)\n elif args['latest']:\n latest(args)\n elif args['gui']:\n gui(args)\n elif args['annotate']:\n annotate(args)",
"def run_parser(self, parser: ArgumentParser):",
"def run_with(self, runner):\n runner([self.path] + self.arguments)",
"def process_commandline():\n parser = optparse.OptionParser(__doc__.strip())\n if os.getuid() == 0:\n support_path = \"/Library/\"\n else:\n support_path = os.path.expanduser(\"~/Library/\")\n preference_file = os.path.join(support_path, \"Preferences\",\n \"com.googlecode.pymacadmin.crankd.plist\")\n module_path = os.path.join(support_path, \"Application Support/crankd\")\n\n if os.path.exists(module_path):\n sys.path.append(module_path)\n else:\n print(\n \"Module directory %s does not exist: \"\n \"Python handlers will need to use absolute pathnames\" % module_path,\n file=sys.stderr)\n\n parser.add_option(\n \"-f\",\n \"--config\",\n dest=\"config_file\",\n help=\"Use an alternate config file instead of %default\",\n default=preference_file)\n parser.add_option(\n \"-l\",\n \"--list-events\",\n action=\"callback\",\n callback=list_events,\n help=\"List the events which can be monitored\")\n parser.add_option(\n \"-d\",\n \"--debug\",\n action=\"count\",\n default=False,\n help=\"Log detailed progress information\")\n (options, args) = parser.parse_args()\n\n if args:\n parser.error(\"Unknown command-line arguments: %s\" % args)\n\n options.support_path = support_path\n options.config_file = os.path.realpath(options.config_file)\n\n # This is somewhat messy but we want to alter the command-line to use full\n # file paths in case someone's code changes the current directory or the\n sys.argv = [\n os.path.realpath(sys.argv[0]),\n ]\n\n if options.debug:\n logging.getLogger().setLevel(logging.DEBUG)\n sys.argv.append(\"--debug\")\n\n if options.config_file:\n sys.argv.append(\"--config\")\n sys.argv.append(options.config_file)\n\n return options",
"def run_one(options, args, stem_prefix='', input_file=None):\n if input_file is None:\n input_file = options.input_file\n stem = stem_prefix + '-'.join(args)\n data_filename = output_file_name(options.output_directory, stem, 'h')\n stdout_filename = output_file_name(options.output_directory, stem, 'out')\n stderr_filename = output_file_name(options.output_directory, stem, 'err')\n status_filename = output_file_name(options.output_directory, stem, 'status')\n shutil.copy(input_file, data_filename)\n # Pass only the file basename, not the full path, to avoid getting the\n # directory name in error messages, which would make comparisons\n # between output directories more difficult.\n cmd = [os.path.abspath(options.script),\n '-f', os.path.basename(data_filename)]\n with open(stdout_filename, 'wb') as out:\n with open(stderr_filename, 'wb') as err:\n status = subprocess.call(cmd + args,\n cwd=options.output_directory,\n stdin=subprocess.DEVNULL,\n stdout=out, stderr=err)\n with open(status_filename, 'w') as status_file:\n status_file.write('{}\\n'.format(status))\n return stem + \"+\", data_filename",
"def main(myCommandLine=None):\n myCommandLine = CommandLine()\n\n if myCommandLine.args.hiConfOut:\n hiConfOut = myCommandLine.args.hiConfOut\n\n if myCommandLine.args.bedFile:\n bedFile = myCommandLine.args.bedFile\n\n if myCommandLine.args.speciesName:\n speciesName = myCommandLine.args.speciesName\n else:\n speciesName = ''\n\n if myCommandLine.args.cactusPath:\n cactusPath = myCommandLine.args.cactusPath\n else:\n cactusPath = ''\n\n if myCommandLine.args.modFile:\n modFile = myCommandLine.args.modFile\n else:\n modFile = ''\n\n if myCommandLine.args.chromLengths:\n chromLengths = myCommandLine.args.chromLengths\n else:\n chromLengths = ''\n\n if myCommandLine.args.outPath:\n outPath = myCommandLine.args.outPath\n\n myFileConverter = fileConverter(hiConfOut, bedFile, speciesName, cactusPath, modFile, chromLengths, outPath)\n myFileConverter.makeHiConfBeds()",
"def setup(cls, subparser):\n # creates the parser for options\n parser = subparser.add_parser(cls.__command__, help=cls.__help__)\n\n # adds the arguments\n cls.args(parser)\n\n # sets the default function to invoke\n parser.set_defaults(func=cls.run)\n cls._parser = parser",
"def main(args):\n \n if (len(args) > 2):\n print('Incorrect amount of arguements, run file-extractor.py help for usage')\n exit()\n \n if(args[1] == 'run'):\n fh = FileHandler(\n **{'file' : args[0], })\n\n fh.verify()\n fh.parse_filename()\n fh.byte_counter()\n fh.sha1_digest()\n fh.md5_digest()\n \n fh.print_results()\n\n else:\n print(\n \"\"\"\n Need Command Usage\n Start run file-extractor.py <PathtoFile> run\n \"\"\"\n )\n exit()",
"def main():\n parser = OptionParser()\n parser.add_option('-p', '--population', action='append',\n dest=\"populations\", help='population_files')\n parser.add_option('-a', '--arguments-selection-pipelines',\n dest=\"extra_args\", help=('Arguments to the selection'\n 'pipeline script'))\n parser.add_option('-l', '--log-file', dest=\"log_file\", help=\"Log file\")\n parser.add_option('-i', '--vcf-input-file', dest=\"vcf_input\",\n help=\"VCF Input File\")\n parser.add_option('-c', '--chromosome', dest=\"chromosome\",\n help=(\"Chromosome label doesn't actually have to\"\n \"correspond to the real chromosome but is required\"\n \" to determine what output files to make\"))\n parser.add_option('--config-file', dest='config_file',\n help='Configuration File')\n parser.add_option('--fst-window-size', dest=\"fst_window_size\",\n help=\"FST window size (kb)\")\n parser.add_option('--fst-window-step', dest=\"fst_window_step\",\n help=\"FST window step size (kb)\")\n parser.add_option('--no-clean-up', dest=\"no_clean_up\",\n action=\"store_true\",\n help=\"Do not clean up intermediate datafiles\")\n parser.add_option('--cores', dest=\"cores\", help=(\"Overrides number of \"\n \"cores avaliable as provided in the config file\"))\n parser.add_option('--no-rsb',dest=\"no_rsb\", action=\"store_true\",\n help=\"Do not calculate RSB\")\n (options, args) = parser.parse_args()\n print(options.extra_args)\n assert options.vcf_input is not None, \\\n \"no VCF file has been specified as input\"\n assert os.path.isfile(options.vcf_input), \\\n \"Cannot locate vcf file at path = {0)\".format(options.vcf_input)\n assert options.chromosome is not None, \\\n \"no chromosome has been specified to the script\"\n assert options.populations is not None and \\\n len(options.populations) >= 2, \\\n \"At least two population files are required\"\n if options.config_file is None:\n options.config_file = 'defaults.cfg'\n if not(os.path.isfile(options.config_file)):\n raise Exception(\"Cannot find config file\")\n elif not(os.path.isfile(options.config_file)):\n raise Exception(\"Cannot find config file\")\n config = parse_config(options)\n if options.log_file is None:\n options.log_file = 'multi_population.log'\n logging.basicConfig(format='%(asctime)s %(message)s',\n filename=options.log_file, filemode='w',\n level=logging.INFO)\n if not (check_executables_and_scripts_exist(options, config)):\n sys.exit(CANNOT_FIND_EXECUTABLE)\n if options.no_clean_up is None:\n options.clean_up_files = False\n if options.fst_window_step is None:\n options.fst_window_step = str(1000)\n else:\n options.fst_window_step = str(\n float(options.fst_window_step) * 1e3)\n if options.fst_window_size is None:\n options.fst_window_size = str(1000)\n else:\n options.fst_window_size = str(\n float(options.fst_window_size) * 1e3)\n if options.no_rsb is None:\n options.no_rsb = False\n if options.cores is not None:\n config['system']['cores_avaliable'] = options.cores\n set_environment(config['environment'])\n options.vcf_input = os.path.abspath(options.vcf_input)\n populations = get_populations(options.populations)\n populations = OrderedDict(sorted(populations.items(), key=lambda t: t[0]))\n fst_vcf(options.vcf_input, config, options, populations)\n output_vcfs = subset_vcf(options.vcf_input, config, populations)\n run_selection_pipeline(output_vcfs, options, populations, config)\n # TODO move FST to here on filtered dataset\n if not (options.no_rsb):\n rsb(config, options, populations)\n if not os.path.exists('logs'):\n os.mkdir('logs')\n os.rename(options.log_file, 'logs/' + options.log_file)\n if not options.no_clean_up:\n keep = [os.path.basename(options.vcf_input),os.path.basename(options.config_file)]\n keep.extend(options.populations)\n clean_folder('.', keep=keep)\n logger.info(\"Multi_population Complete\")\n logger.info(\"Goodbye :\")\n print(\"Multi-population selection pipeline completed successfully !:)\")",
"def run():\n parser = argparse.ArgumentParser(\n prog='twitter-scraper', description=\"Scrape twitter public pages without an API key\",\n )\n parser.add_argument('account', type=str, help=\"twitter account\")\n parser.add_argument('-f', '--filename', type=str, help=\"Output filename\")\n parser.add_argument('-p', '--pages', type=int, help=\"Number of pages to download\", default=10)\n parser.add_argument('-v', '--verbose', action='count', help=\"Enable logging\", default=0)\n args = parser.parse_args()\n\n # Enable logging\n if args.verbose > 0:\n args.verbose = min(args.verbose, 3)\n level = {1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG}[args.verbose]\n enable_logging(level)\n\n write_tweets_to_csv(account=args.account, filename=args.filename, page_limit=args.pages)",
"def console(self):\r\n parser = argparse.ArgumentParser(\r\n prog=\"dexofuzzy\",\r\n description=(\"Dexofuzzy - Dalvik EXecutable Opcode Fuzzyhash\"),\r\n add_help=True)\r\n\r\n parser.add_argument(\r\n \"-f\", \"--file\", metavar=\"SAMPLE_FILENAME\",\r\n help=\"the sample to extract dexofuzzy\")\r\n parser.add_argument(\r\n \"-d\", \"--directory\", metavar=\"SAMPLE_DIRECTORY\",\r\n help=\"the directory of samples to extract dexofuzzy\")\r\n\r\n parser.add_argument(\r\n \"-m\", \"--method-fuzzy\", action=\"store_true\",\r\n help=\"extract the fuzzyhash based on method of the sample\"\r\n + \"(must include the -f or -d option by default)\")\r\n\r\n parser.add_argument(\r\n \"-g\", \"--clustering\", metavar=(\"N\", \"M\"), nargs=2, type=int,\r\n help=\"N-Gram Tokenizer and M-Partial Matching clustering\"\r\n + \" based on the sample's dexofuzzy \"\r\n + \"(must include the -d option by default)\")\r\n\r\n parser.add_argument(\r\n \"-s\", \"--score\", metavar=\"DEXOFUZZY\", nargs=2,\r\n help=\"score the dexofuzzy of the sample\")\r\n\r\n parser.add_argument(\r\n \"-c\", \"--csv\", metavar=\"CSV_FILENAME\",\r\n help=\"output as CSV format\")\r\n parser.add_argument(\r\n \"-j\", \"--json\", metavar=\"JSON_FILENAME\",\r\n help=\"output as json format \" +\r\n \"(include method fuzzy or clustering)\")\r\n parser.add_argument(\r\n \"-l\", \"--error-log\", metavar=\"LOG_FILENAME\",\r\n help=\"output the error log\")\r\n\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n return None\r\n\r\n self.args = parser.parse_args()\r\n dexofuzzy_list = []\r\n\r\n if self.args.score:\r\n print(self.__get_dexofuzzy_compare(self.args.score[0], self.args.score[1]))\r\n\r\n if self.args.directory:\r\n for result in self.__search_directory(self.args.directory):\r\n if result is not None:\r\n print(f'{result[\"file_name\"]},{result[\"file_sha256\"]},'\r\n f'{result[\"file_size\"]},{result[\"dexohash\"]},'\r\n f'{result[\"dexofuzzy\"]}')\r\n\r\n if self.args.method_fuzzy:\r\n print(json.dumps(result[\"method_fuzzy\"], indent=4))\r\n\r\n dexofuzzy_list.append(result)\r\n\r\n if self.args.file:\r\n result = self.__search_file(self.args.file)\r\n if result is not None:\r\n print(f'{result[\"file_name\"]},{result[\"file_sha256\"]},'\r\n f'{result[\"file_size\"]},{result[\"dexohash\"]},'\r\n f'{result[\"dexofuzzy\"]}')\r\n\r\n if self.args.method_fuzzy:\r\n print(json.dumps(result[\"method_fuzzy\"], indent=4))\r\n\r\n dexofuzzy_list.append(result)\r\n\r\n if self.args.clustering:\r\n if not self.args.directory:\r\n print(\"must include the -d option by default\")\r\n return None\r\n\r\n dexofuzzy_list = self.__clustering_dexofuzzy(dexofuzzy_list,\r\n self.args.clustering[0],\r\n self.args.clustering[1])\r\n print(json.dumps(dexofuzzy_list, indent=4))\r\n\r\n if self.args.csv:\r\n try:\r\n with open(self.args.csv, \"w\", encoding=\"UTF-8\", newline=\"\") as csv_file:\r\n fieldnames = [\"file_name\", \"file_sha256\", \"file_size\",\r\n \"dexohash\", \"dexofuzzy\"]\r\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n\r\n for output in dexofuzzy_list:\r\n row = {}\r\n row[\"file_name\"] = output[\"file_name\"]\r\n row[\"file_sha256\"] = output[\"file_sha256\"]\r\n row[\"file_size\"] = output[\"file_size\"]\r\n row[\"dexohash\"] = output[\"dexohash\"]\r\n row[\"dexofuzzy\"] = output[\"dexofuzzy\"]\r\n writer.writerow(row)\r\n\r\n except IOError:\r\n print(f\"{inspect.stack()[0][3]} : {traceback.format_exc()}\")\r\n return False\r\n\r\n if self.args.json:\r\n try:\r\n with open(self.args.json, \"w\", encoding=\"UTF-8\") as json_file:\r\n json.dump(dexofuzzy_list, json_file, indent=4)\r\n\r\n except IOError:\r\n print(f\"{inspect.stack()[0][3]} : {traceback.format_exc()}\")\r\n return False",
"def run_from_argv(self, argv):\n parser = self.create_arg_parser(argv)\n self.options = parser.parse_args(argv[2:])\n\n args = self.options.args\n\n # Check that the proper number of arguments have been provided.\n argspec = inspect.getargspec(self.main)\n minargs = len(argspec[0]) - 1\n maxargs = minargs\n\n # Arguments that have a default value are considered optional.\n if argspec[3] is not None:\n minargs -= len(argspec[3])\n\n if argspec[1] is not None:\n maxargs = None\n\n if len(args) < minargs or (maxargs is not None and\n len(args) > maxargs):\n parser.error('Invalid number of arguments provided')\n sys.exit(1)\n\n self.initialize()\n log_command_line('Command line: %s', argv)\n\n try:\n exit_code = self.main(*args) or 0\n except CommandError as e:\n if isinstance(e, ParseError):\n parser.error(e)\n elif self.options.debug:\n raise\n\n logging.error(e)\n exit_code = 1\n except CommandExit as e:\n exit_code = e.exit_code\n except Exception as e:\n # If debugging is on, we'll let python spit out the\n # stack trace and report the exception, otherwise\n # we'll suppress the trace and print the exception\n # manually.\n if self.options.debug:\n raise\n\n logging.critical(e)\n exit_code = 1\n\n cleanup_tempfiles()\n sys.exit(exit_code)",
"def process_cli_args():\n args = parse_cli_args()\n\n # delete empty args\n if not args[\"debug\"]:\n del args[\"debug\"]\n for arg_name in list(args.keys()):\n if args[arg_name] in [None, tuple()]:\n del args[arg_name]\n\n # validate\n validate_cli_args(args)\n\n # --write-config\n if args.pop(\"write_config\"):\n config_values = {}\n if args.get(\"command\"):\n config_values[\"command\"] = \" \".join(\n shlex.quote(subval) for subval in args[\"command\"]\n )\n if args.get(\"watch\"):\n config_values[\"watch\"] = \"\\n\".join(args[\"watch\"])\n if args.get(\"output\"):\n config_values[\"output\"] = \", \".join(args[\"output\"])\n for arg_name in [\"delay\", \"max_execs\", \"name\", \"start\", \"watcher\"]:\n if arg_name in args:\n config_values[arg_name] = args[arg_name]\n\n write_config_file(args, config_values)\n sys.exit(0)\n\n return args"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up & run a `Publisher` for commandlinebased file I/O (input and output file paths taken automatically from the command line). Return the encoded string output also. This is just like publish_cmdline, except that it uses io.BinaryFileOutput instead of io.FileOutput. | def publish_cmdline_to_binary(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=True, argv=None,
usage=default_usage, description=default_description,
destination=None, destination_class=io.BinaryFileOutput
):
pub = Publisher(reader, parser, writer, settings=settings,
destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output | [
"def publish(self, argv=None, usage=None, description=None,\r\n settings_spec=None, settings_overrides=None,\r\n config_section=None, enable_exit_status=False):\r\n exit = None\r\n try:\r\n if self.settings is None:\r\n self.process_command_line(\r\n argv, usage, description, settings_spec, config_section,\r\n **(settings_overrides or {}))\r\n self.set_io()\r\n self.document = self.reader.read(self.source, self.parser,\r\n self.settings)\r\n self.apply_transforms()\r\n output = self.writer.write(self.document, self.destination)\r\n self.writer.assemble_parts()\r\n except SystemExit, error:\r\n exit = 1\r\n exit_status = error.code\r\n except Exception, error:\r\n if not self.settings: # exception too early to report nicely\r\n raise\r\n if self.settings.traceback: # Propagate exceptions?\r\n self.debugging_dumps()\r\n raise\r\n self.report_Exception(error)\r\n exit = True\r\n exit_status = 1\r\n self.debugging_dumps()\r\n if (enable_exit_status and self.document\r\n and (self.document.reporter.max_level\r\n >= self.settings.exit_status_level)):\r\n sys.exit(self.document.reporter.max_level + 10)\r\n elif exit:\r\n sys.exit(exit_status)\r\n return output",
"def main():\n \n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-d\", \"--decode\", action=\"store_true\")\n group.add_argument(\"-e\", \"--encode\", action=\"store_true\")\n parser.add_argument(\"-s\")\n parser.add_argument(\"-file\")\n parser.add_argument(\"-out\")\n args = parser.parse_args()\n\n if(args.decode):\n if(args.file):\n print(\"Reading\", args.file)\n file = open(args.file, \"r\")\n print(\"Result: \", decode(file.readline()))\n else:\n print(\"Result: \", decode(args.s))\n if(args.out):\n file = open(args.out, \"w+\")\n if(args.s):\n file.write(decode(args.s))\n if(args.file):\n encoded_file = open(args.file, \"r\")\n file.write(decode(encoded_file.readline()))\n print(\"Successfull write to\", args.out)\n elif(args.encode):\n if(args.file):\n file = open(args.file, \"r\")\n string = file.readline()\n else:\n string = args.s\n \n run = res_list_to_string(\n tricky(\n convert_bits_to_int_to_char(\n convert_string_binary_to_list(\n convert_list_to_string(\n convert_list_to_binary(\n convert_list_to_int(\n from_string_to_list(string))\n ))))))\n if(args.out):\n file = open(args.out, \"w+\")\n file.write(run)\n print(\"Successfull write in\", args.out)\n else:\n print(\"Result: \", run)\n\n\n else:\n print(\"Nothing to do.\")",
"def run_from_cmd():\n\n input_filepath, output_filepath = parse_command_line_args()\n\n if input_filepath:\n input = open(input_filepath, 'r')\n else:\n input = sys.stdin\n\n if output_filepath:\n output = open(output_filepath, 'wb')\n else:\n output = sys.stdout.buffer\n\n convert(input, output)",
"def main():\n file_reader = Text_Processor()\n publishers = file_reader.read_files()\n json_exporter(publishers)\n run()",
"def write_command(*args, **kwargs):\n encoding = 'default'\n if 'encoding' in kwargs:\n encoding = kwargs['encoding']\n # TODO: should we delete it from kwargs?\n stdin = kwargs['stdin']\n if encoding is None or encoding == 'default':\n stdin = encode(stdin)\n else:\n stdin = encode(stdin, encoding=encoding)\n if _capture_stderr and 'stderr' not in kwargs.keys():\n kwargs['stderr'] = PIPE\n process = feed_command(*args, **kwargs)\n unused, stderr = process.communicate(stdin)\n if encoding is not None:\n unused = _make_unicode(unused, encoding)\n stderr = _make_unicode(stderr, encoding)\n returncode = process.poll()\n if _capture_stderr and returncode:\n sys.stderr.write(stderr)\n return handle_errors(returncode, returncode, args, kwargs)",
"def embedded_pipeline():\n return \"\\n\".join(args['--cmd'])",
"def asCommandLine(self, args):\n try:\n inFile = args[\"xmlfile\"]\n outFile = args[\"out\"]\n except:\n raise PeachException(\"XmlAnalyzer requires two parameters, xmlfile and out.\")\n xml = _Xml2Peach().xml2Peach(\"file:\" + inFile)\n with open(outFile, \"wb+\") as fo:\n fo.write(xml)",
"def convert(inpipe):\r\n temp_args = split(\r\n \"convert -monitor -compress {} - pdf:-\".format(ARGS.compression))\r\n call(temp_args, stdin=inpipe)",
"def _add_input_output(input_files=None, output_file=None, pipe=True):\n\n input_files =\\\n input_files if isinstance(input_files, (list, tuple)) else [input_files]\n\n cmd = ''\n for input_file in input_files:\n if input_file:\n cmd += ' {}'.format(input_file)\n\n if output_file:\n cmd += ' > {}'.format(output_file)\n elif pipe:\n cmd += ' | '\n\n return cmd",
"def generateFile( self ):\n\n with temp_out_file( \"this\", ext( self.to_filename ) ) as outfile:\n args = ( \"pepper\", \"-o\", outfile.name, self.from_filename )\n ret, out, err = run_cmd( args, self.path )\n contents = read_file( outfile.name )\n\n assert ret == 0, ( '\"%s\" should return 0 but returned %d' % (\n cmd_desc( args, repl = ( 2, self.to_filename ) ), ret ) )\n\n expected_contents = read_file( os.path.join(\n self.path, self.to_filename ) )\n\n assert contents == expected_contents, (\n '\"%s\" should result in a file containing:\\n%s' +\n '\\nbut instead it gave:\\n%s' ) % (\n cmd_desc( args, repl = ( 2, self.to_filename ) ),\n expected_contents,\n contents )",
"def run_one(options, args, stem_prefix='', input_file=None):\n if input_file is None:\n input_file = options.input_file\n stem = stem_prefix + '-'.join(args)\n data_filename = output_file_name(options.output_directory, stem, 'h')\n stdout_filename = output_file_name(options.output_directory, stem, 'out')\n stderr_filename = output_file_name(options.output_directory, stem, 'err')\n status_filename = output_file_name(options.output_directory, stem, 'status')\n shutil.copy(input_file, data_filename)\n # Pass only the file basename, not the full path, to avoid getting the\n # directory name in error messages, which would make comparisons\n # between output directories more difficult.\n cmd = [os.path.abspath(options.script),\n '-f', os.path.basename(data_filename)]\n with open(stdout_filename, 'wb') as out:\n with open(stderr_filename, 'wb') as err:\n status = subprocess.call(cmd + args,\n cwd=options.output_directory,\n stdin=subprocess.DEVNULL,\n stdout=out, stderr=err)\n with open(status_filename, 'w') as status_file:\n status_file.write('{}\\n'.format(status))\n return stem + \"+\", data_filename",
"def test_file_write(self):\n\n args = self.parser.parse_args([self.str_len, '--file', '--raw-output'])\n\n self.randstr_output(args).process_parsed_args()\n output = sys.stdout.getvalue()\n\n filename = os.path.join(self.test_dir, args.file)\n with open(filename, 'r') as f:\n random_string = f.read()\n\n self.assertIn(random_string, output)",
"def generate_output(output, out = \".dvi\"):\n print 'hi', output\n # Standard tex inputs required for compiling .tex file\n filename = os.path.join(\"c:\",\"output\")\n tex = \".tex\"; pdf = \".pdf\"; dvi = \".dvi\"; ps = \".ps\"\n begin = [\"\\documentclass[12pt]{article}\\n\",\n \"\\usepackage{amsmath,url}\\n\",\n \"\\\\begin{document}\\n\",\n \"\\section{Cross-Section}\\n\\n\"]\n end = [\"\\end{document}\"]\n \n pieces = []\n # Crappy method to find out the type of the input, and then LaTeXify it\n if not isinstance(output, str):\n \n # Input is a list. Break it up and try to LaTeXify each piece\n if isinstance(output, list):\n try:\n print 'list'\n for i in range(len(output)):\n pieces.append(sp.latex(output[i]))\n except: e\n # Input is probably just a sympy expression\n else:\n try:\n output = sp.latex(output)+\"\\n\"\n except: \n e\n print e\n \n # Input is a string\n else: output = output+\"\\n\\n\"\n\n # If the input was a list, join all the pieces into one string with 2 spaces between them. \n if pieces != []:\n output = '\\n\\n'.join(pieces)\n # If the LaTeXifed input has any commas in it, split the expression at those commas and put some blank lines in between\n else:\n if output.find(',') > 0:\n output = '\\n'.join(output.split(','))\n\n print output\n # Create file and write to it\n FILE = open(filename+tex, \"w\")\n FILE.writelines(begin)\n FILE.writelines(output)\n FILE.writelines(end)\n FILE.close()\n\n if 1:\n # Create commands\n compile = [\"latex\",filename+tex]\n disdvi = [\"yap\", filename+dvi]\n \n # Process commands\n a = sub.Popen(compile,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n a.communicate()\n a.wait()\n \n # BROKEN\n if out == \"pdf\":\n tops = [\"dvips\", filename+dvi]\n topdf = [\"ps2pdf\", filename+ps]\n dispdf = [\"C:/Program Files/Adobe/Reader 9.0/Reader/AcroRd32\", filename+pdf]\n c = sub.check_call(tops)\n # c = sub.Popen(tops,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n # c.communicate\n # c.wait()\n d = sub.Popen(topdf,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n d.communicate\n d.wait()\n e = sub.Popen(dispdf,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n e.communicate\n else:\n b = sub.Popen(disdvi,stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n b.communicate()",
"def _perform_transform(self, data, **kwargs):\n # Ensure that kaleido subprocess is running\n self._ensure_kaleido()\n\n # Perform export\n export_spec = self._json_dumps(dict(kwargs, data=data)).encode('utf-8')\n\n # Write to process and read result within a lock so that can be\n # sure we're reading the response to our request\n with self._proc_lock:\n # Reset _std_error buffer\n self._std_error = io.BytesIO()\n\n # Write and flush spec\n self._proc.stdin.write(export_spec)\n self._proc.stdin.write(\"\\n\".encode('utf-8'))\n self._proc.stdin.flush()\n response = self._proc.stdout.readline()\n\n response_string = response.decode('utf-8')\n if not response_string:\n message = (\n \"Transform failed. Error stream:\\n\\n\" +\n self._get_decoded_std_error()\n )\n raise ValueError(message)\n try:\n response = json.loads(response_string)\n except JSONDecodeError:\n print(\"Invalid JSON: \" + repr(response_string))\n raise\n\n return response",
"def to_stream(stream_name: str) -> IO[Any]:\n if stream_name == \"<stdout>\":\n return sys.__stdout__\n\n if stream_name == \"<stderr>\":\n return sys.__stderr__\n\n stream_file = Path(stream_name)\n if not stream_file.exists() or not stream_file.is_file():\n raise argparse.ArgumentTypeError(f\"{stream_name} is not a file\")\n\n try:\n return stream_file.open(\"a\")\n except:\n raise argparse.ArgumentTypeError(f\"could not open {stream_name} for writing\")",
"def test_save(self):\n\n out,err=Popen(\"cat testdata/mail-001 | python mail2json.py save x.out\", \n stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True).communicate()\n assert not err, err\n output=[ l.split('/x.out/')[1] for l in out.strip('\\n').split('\\n') ]\n # \n # output: json file representing the mime structure of email and any attachments are printed.\n # \n self.assertEqual(output, \n ['%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E/img_1871.mov',\n '%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E/md.json'])\n # \n # output directory layout\n # \n self.assertEqual(\n list(os.walk('./x.out')),\n [('./x.out', \n ['%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E'], \n []), \n ('./x.out/%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E', \n [], \n ['md.json', 'img_1871.mov'])])\n # \n # json-ified mime message looks like this\n # \n self.assertEqual(\n json.load(file('x.out/%3CDEADBEEF-F52B-4B36-85D0-A85CF7B02C40%40i.example.com%3E/md.json')),\n {u'content': [{u'content': u'\\n\\n',\n u'header': {u'content-transfer-encoding': u'7bit',\n u'content-type': u'text/plain;\\n\\tcharset=us-ascii'}},\n {u'content': {u'encoding': u'base64',\n u'md5': u'762bc5d5715b6102111346c6069c23e5',\n u'media': True,\n u'name': u'img_1871.mov',\n u'suffix': u'.mov'},\n u'header': {u'content-disposition': u'attachment;\\n\\tfilename=IMG_1871.MOV',\n u'content-transfer-encoding': u'base64',\n u'content-type': u'video/quicktime;\\n\\tname=IMG_1871.MOV'}}],\n u'from': [u'tengu@example.com'],\n u'header': {u'content-transfer-encoding': u'7bit',\n u'content-type': u'multipart/mixed; boundary=Apple-Mail-E670757C-566F-46A7-82A7-DEADBEEF',\n u'date': u'Fri, 7 Feb 2014 09:07:23 +0900',\n u'delivered-to': u'skydog@example.com',\n u'from': {u'addr': u'tengu@example.com', u'name': u'Tengu'},\n u'message-id': u'<DEADBEEF-F52B-4B36-85D0-A85CF7B02C40@i.example.com>',\n u'mime-version': u'1.0 (1.0)',\n u'received': u'from [10.0.1.4] ([100.100.100.100] [100.100.100.100])\\n by hoge.i.example.com with ESMTP\\n id <20140207000724308.PHJN.36465.hoge.i.example.com@hoge.mailsv.example.com>\\n for <skydog@example.com>; Fri, 7 Feb 2014 09:07:24 +0900',\n u'return-path': u'<tengu@example.com>',\n u'to': u'skydog@example.com',\n u'x-mailer': u'iPhone Mail (11B554a)',\n u'x-original-to': u'skydog@example.com',\n u'x-sb-service': u'Virus-Checked'},\n u'media': [{u'encoding': u'base64',\n u'md5': u'762bc5d5715b6102111346c6069c23e5',\n u'media': True,\n u'name': u'img_1871.mov',\n u'suffix': u'.mov'}],\n u'message-id': [u'<DEADBEEF-F52B-4B36-85D0-A85CF7B02C40@i.example.com>'],\n u'text': [u'\\n\\n']}\n )",
"def dummy_runner(output):\n if isinstance(output, str):\n output = output.encode(ENCODING)\n def runner(*args, **kwargs):\n return output\n\n return runner",
"def convert_stdstreams_to_files(clt: cwl.CommandLineTool) -> None:\n for out in clt.outputs:\n if out.type == \"stdout\":\n if out.outputBinding is not None:\n raise ValidationException(\n \"Not allowed to specify outputBinding when using stdout shortcut.\"\n )\n if clt.stdout is None:\n clt.stdout = str(\n hashlib.sha1( # nosec\n json_dumps(clt.save(), sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n )\n out.type = \"File\"\n out.outputBinding = cwl.CommandOutputBinding(glob=clt.stdout)\n elif out.type == \"stderr\":\n if out.outputBinding is not None:\n raise ValidationException(\n \"Not allowed to specify outputBinding when using stderr shortcut.\"\n )\n if clt.stderr is None:\n clt.stderr = str(\n hashlib.sha1( # nosec\n json_dumps(clt.save(), sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n )\n out.type = \"File\"\n out.outputBinding = cwl.CommandOutputBinding(glob=clt.stderr)\n for inp in clt.inputs:\n if inp.type == \"stdin\":\n if inp.inputBinding is not None:\n raise ValidationException(\n \"Not allowed to specify unputBinding when using stdin shortcut.\"\n )\n if clt.stdin is not None:\n raise ValidationException(\n \"Not allowed to specify stdin path when using stdin type shortcut.\"\n )\n else:\n clt.stdin = (\n \"$(inputs.%s.path)\"\n % cast(str, inp.id).rpartition(\"#\")[2].split(\"/\")[-1]\n )\n inp.type = \"File\"",
"def make_bin_outputs(expt_dir, outputs_fname, cutoff):\n outputs = proc.load_outputs(outputs_fname)\n bin_outputs = proc.bin_transform_outputs(outputs, cutoff)\n bin_out_fname = expt_dir + \"/process/bin_outputs.cutoff_{0}.txt\".format(cutoff)\n proc.write_outputs(bin_outputs, bin_out_fname)",
"def main():\r\n\r\n parser = argparse.ArgumentParser(description=\"LSB Steganography Toy\")\r\n\r\n parser.add_argument(\"-i\",\"--inputfile\", action= \"store\", dest = \"inputfile\", help=\"Stores the input image file\", required= True)\r\n parser.add_argument(\"-m\",\"--message\", action= \"store\", dest=\"message\",help=\"the string you want to encode into an image\")\r\n parser.add_argument(\"-e\", \"--encode\", action = \"store_true\", default = False, dest = \"boolean_switch_encode\", help = \"set switch to true that you want to encode the image\")\r\n parser.add_argument(\"-d\",\"--decode\", action = \"store_true\", default = False, dest = \"boolean_switch_decode\", help = \"set switch to true that you want to decode the image\")\r\n\r\n results = parser.parse_args()\r\n\r\n inputfile = results.inputfile\r\n message = results.message\r\n boolean_switch_encode = results.boolean_switch_encode\r\n boolean_switch_decode = results.boolean_switch_decode\r\n\r\n if (boolean_switch_encode == True):\r\n encode(message, inputfile)\r\n print(\"Encode is successful\")\r\n print(message)\r\n\r\n else:\r\n print(\"Encode unsuccessful, message must be longer.\")\r\n\r\n if (boolean_switch_decode == True):\r\n decode(inputfile)\r\n print (\"Decoded file is saved in images/decoded.png\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input string, returns a dictionary of HTML document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client. | def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=True,
initial_header_level=1):
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts | [
"def direct_from_string(text: str) -> dict:\n return MarkdownTextObject(text=text).to_dict()",
"def _parse_fragment(fragment_string: str) -> Dict[str, str]:\n fragment_string = fragment_string.lstrip('#')\n\n try:\n return dict(\n cast(Tuple[str, str], tuple(key_value_string.split('=')))\n for key_value_string in fragment_string.split('&')\n )\n except ValueError:\n raise ValueError(f'Invalid fragment string {fragment_string}')",
"def direct_from_string(text: str) -> dict:\n return PlainTextObject(text=text).to_dict()",
"def headerDict(header_string):\n\t# >7244:002ce8 FBpp0236088 gene=FBgn0208790 orthodb8_OG=EOG8MGTH1 orthodb8_level=32281 organism_name=`Drosophila virilis` uniprot_de=`GJ21671`\n\t# Handling awful cases like uniprot_de=`Probable tRNA 2`-O-ribose methyltransferase`\n\theader_string = header_string.replace(\"`-\", \"'-\")\n\tquote_split = header_string.split(\"`\")\n\tdef garble(x):\n\t\treturn x.replace(\" \", \"@*#/*\")\n\tdef degarble(x):\n\t\treturn x.replace(\"@*#/*\", \" \")\n\treform = quote_split[0]\n\txi = 1\n\twhile xi < len(quote_split):\n\t\t# string in quotes\n\t\treform += garble(quote_split[xi])\n\t\t# next string\n\t\treform += quote_split[xi+1]\n\t\txi = xi+2\n\t# Split \n\n\td = {}\n\tfor entry in reform.split():\n\t\tif '=' in entry:\n\t\t\tsp = entry.split('=')\n\t\t\td[sp[0]] = degarble(sp[1])\n\t\n\treturn d",
"def parse_resource_document(self, content):\n\n content = content.strip()\n\n if not content.startswith('<html>'):\n # this is not a full HTML doc, probably content w/o title, tags, etc\n return dict(body=content)\n\n result = {}\n if '<title>' in content and '</title>' in content:\n result['subject'] = content[content.find('<title>') + 7:content.find('</title>')].strip()\n result['body'] = content[content.find('<body>') + 6:content.find('</body>')].strip()\n\n return result",
"def tags_from_string(tag_string):\n return dict(map(lambda kv: kv.split('='), tag_string.split(';')))",
"def split_html(html_string):\n\n try:\n i = html_string.index(\"<body\")\n j = html_string.index(\">\", i) + 1\n k = html_string.index(\"</body\")\n except ValueError:\n raise Exception(\"This is not a full html document.\")\n start = html_string[:j]\n body = html_string[j:k]\n ending = html_string[k:]\n return start, body, ending",
"def create_chapter_from_string(self, html_string, url=None, title=None, request_object=None):\n if request_object:\n # Test case: https://new.qq.com/omn/20180816/20180816A0A0D0.html which return headers \"content-type: text/html; charset=GB2312\"\n # ... shouldn't make it utf-8\n if not request_object.encoding: # just in case, default depends on header content-type(alternative to html meta)\n request_object.encoding = 'utf-8'\n html_string = request_object.text\n else:\n # test case(ISO-8859-1): http://castic.xiaoxiaotong.org/2019/studentDetails.html?77061\n try:\n html_string = request_object.text.encode(request_object.encoding).decode('utf-8')\n except UnicodeDecodeError:\n # test case: https://www.dawuxia.net/forum.php?mod=viewthread&tid=1034211\n html_string = request_object.text\n elif not html_string: #if 404, request_object will None\n html_string = '<html></html>'\n #print(html_string)\n clean_html_string = self.clean_function(html_string)\n #print(clean_html_string)\n clean_xhtml_string = clean.html_to_xhtml(clean_html_string)\n if title:\n pass\n else:\n try:\n if request_object:\n root = BeautifulSoup(html_string, 'html.parser')\n meta_encoding = hole_meta_encoding(root)\n #print(meta_encoding)\n if meta_encoding and (meta_encoding.lower() != 'utf-8'):\n print('Encoding to meta encoding: ' + repr(meta_encoding))\n request_object.encoding = meta_encoding\n html_string = request_object.text\n root = BeautifulSoup(html_string, 'html.parser')\n clean_html_string = self.clean_function(html_string)\n clean_xhtml_string = clean.html_to_xhtml(clean_html_string)\n \n else:\n root = BeautifulSoup(html_string, 'html.parser')\n\n title_node = root.title\n if title_node is not None:\n #title = unicode(title_node.string)\n title = title_node.string\n if title == None:\n title = 'Unknown title'\n else:\n raise ValueError\n except (IndexError, ValueError):\n title = 'Ebook Chapter'\n #print(clean_xhtml_string)\n return Chapter(clean_xhtml_string, title, url)",
"def split(self, string):\n\n # I prefer the Jekyll front matter format\n # but for compatibility keep the Flask-FlatPages one:\n\n lines = iter(string.split('\\n'))\n\n meta = '\\n'.join(itertools.takewhile(str.strip, lines))\n body = '\\n'.join(lines)\n\n return meta, body",
"def parseString(input_string):\r\n elements = {}\r\n while input_string:\r\n result = re.match(r\"[{}][{}]?\".format(CAP_CHARS, LOW_CHARS), input_string)\r\n \r\n # for fear that the parser cannot recognize the material string\r\n try:\r\n ele_str = result.group(0)\r\n pos_ele = result.span()[1]\r\n except AttributeError:\r\n return {}\r\n\r\n if pos_ele < len(input_string) and input_string[pos_ele].isdigit():\r\n result = re.match(r\"\\d+\\.?\\d*\", input_string[pos_ele:])\r\n pos_num = result.span()[1]\r\n number = float(result.group(0))\r\n else:\r\n pos_num = 0\r\n number = 1.0\r\n \r\n try:\r\n ele_index = lookupEle(ele_str)\r\n except KeyError:\r\n raise NoSuchElementError(ele_str)\r\n # one element could appear multiple times in one material string\r\n if ele_index not in elements: \r\n elements[ele_index] = 0.0\r\n \r\n elements[ele_index] += number\r\n input_string = input_string[(pos_num+pos_ele):]\r\n \r\n return elements",
"def hl7_str_to_dict(s, use_long_name=True):\n #s = s.replace(\"\\n\", \"\\r\")\n print(s)\n try:\n m = parse_message(s)\n return hl7_message_to_dict(m, use_long_name=use_long_name)\n except ParserError:\n return dict()",
"def read_bibstring(instring, string_dict={}): ###parses a bibtex string into a list of dictionaries\n\tdlist = []\n\tlines = []\n\n# ADDED PARAMETER FOR string_dict\n#\tstring_dict = {}\n\n#\tprint instring\n\tfor line in string.split(instring,'\\n'):\n\t\tif string.find(line,'--BREAK--') >= 0: \n\t\t\tbreak\n\t\telse: lines = lines + [string.strip(line)]\n\tinstring = string.join(lines,'\\n')\n\titems = string.split('\\n'+ instring,'\\n@')\n\t #### must add the leading '\\n' in case string starts with an '@'\n\tfor item in items[1:]:\n\t\t\t(d,string_dict) = read_bibitem(item,string_dict)\n\t\t\tdlist = dlist + [d]\t\n\treturn dlist",
"def _load_root_from_string(text):\n\n root = html.fromstring(text)\n return root",
"def get_form_as_dict(response):\n html_parser = etree.HTMLParser()\n root = etree.fromstring(response.get_data(), html_parser)\n input_elements = CSSSelector(\"input\")(root)\n form = {ie.attrib[\"name\"].replace(\"-input\", \"\"): ie for ie in input_elements}\n form[\"description\"] = CSSSelector(\"textarea#description-textarea\")(root)[0]\n return form",
"def html_to_dict(string):\n soup = BeautifulSoup(string, 'html.parser')\n recipe = {}\n recipe['title'] = soup.find(\"h1\").string\n try:\n recipe['text'] = soup.select(\"h1 + p\")[0].string.strip()\n except IndexError:\n recipe['text'] = None\n try:\n # Case one, author still exists\n recipe['author'] = soup.find(\"div\", class_=\"recipe-author\").find_all(\"span\")[-1].text.strip()\n except IndexError:\n # author was deleted\n recipe['author'] = soup.find(\"div\", class_=\"recipe-author\").find(\"div\", class_=\"ds-mb-right\").text.strip()\n ingredients = soup.find(\"table\", class_=\"ingredients\").find_all(\"tr\")\n recipe['ingredients'] = [(i.find('td', class_=\"td-right\").text.strip(),\n i.find('td', class_=\"td-left\").text.strip().split()\n )\n for i in ingredients\n if i.find(\"th\") is None]\n recipe[\"servings\"] = soup.find(\"div\", class_=\"recipe-servings\").find(\"input\").attrs['value']\n recipe[\"rating\"] = soup.find(\"div\", class_=\"ds-rating-avg\").find(\"strong\").text\n recipe[\"rates\"] = soup.find(\"div\", class_=\"ds-rating-count\").find(\"strong\").text\n # any and all of these might actually be optional. Currently it only looked\n # like kcal is optional.\n recipe[\"preptime\"] = soup.find(\"span\", class_=\"recipe-preptime\").find(text=True, recursive=False).strip()\n recipe[\"difficulty\"] = soup.find(\"span\", class_=\"recipe-difficulty\").find(text=True, recursive=False).strip()\n recipe[\"date\"] = soup.find(\"span\", class_=\"recipe-date\").find(text=True, recursive=False).strip()\n try:\n recipe[\"kcal\"] = soup.find(\"span\", class_=\"recipe-kcalories\").find(text=True, recursive=False).strip()\n except AttributeError:\n recipe[\"kcal\"] = None\n # instruction_meta = soup.find(\"small\", class_=\"ds-recipe-meta\")\n # instruction_meta doesn't seem reliable.\n instructions = soup.select(\"small.ds-recipe-meta + div.ds-box\")[0]\n recipe[\"instructions\"] = instructions.text.strip()\n recipe[\"comment_count\"] = soup.find(\"button\", class_=\"recipe-comments-anchor\").find(\"span\").text\n comments = soup.find(\"article\", class_=\"recipe-comments\")\n comments = comments.find_all(\"div\", class_=\"comment-item\")\n recipe['comments'] = []\n for comment in comments:\n comment = {\n \"user\": comment.find(\"strong\").text.strip(),\n \"text\": comment.find(\"p\").text.strip(),\n \"date\": comment.find(\"div\", class_=\"comment-date\").text.strip(),\n }\n recipe[\"comments\"].append(comment)\n # Get the categories\n recipe['categories'] = [tag.text.strip() for tag in soup.select(\"div > a.ds-tag\")]\n return recipe",
"def find_pdfdocencoding(encoding):\n\n if encoding != 'pdfdocencoding':\n return\n\n # Create the decoding map based on the table in section D.2 of the\n # PDF 1.7 manual\n\n # Start off with the characters with 1:1 correspondence\n decoding_map = set(range(0x20, 0x7F)) | set(range(0xA1, 0x100))\n decoding_map.update((0x09, 0x0A, 0x0D))\n decoding_map.remove(0xAD)\n decoding_map = dict((x, x) for x in decoding_map)\n\n # Add in the special Unicode characters\n decoding_map.update(zip(range(0x18, 0x20), (\n 0x02D8, 0x02C7, 0x02C6, 0x02D9, 0x02DD, 0x02DB, 0x02DA, 0x02DC)))\n decoding_map.update(zip(range(0x80, 0x9F), (\n 0x2022, 0x2020, 0x2021, 0x2026, 0x2014, 0x2013, 0x0192, 0x2044,\n 0x2039, 0x203A, 0x2212, 0x2030, 0x201E, 0x201C, 0x201D, 0x2018,\n 0x2019, 0x201A, 0x2122, 0xFB01, 0xFB02, 0x0141, 0x0152, 0x0160,\n 0x0178, 0x017D, 0x0131, 0x0142, 0x0153, 0x0161, 0x017E)))\n decoding_map[0xA0] = 0x20AC\n\n # Make the encoding map from the decoding map\n encoding_map = codecs.make_encoding_map(decoding_map)\n\n # Not every PDF producer follows the spec, so conform to Postel's law\n # and interpret encoded strings if at all possible. In particular, they\n # might have nulls and form-feeds, judging by random code snippets\n # floating around the internet.\n decoding_map.update(((x, x) for x in range(0x18)))\n\n def encode(input, errors='strict'):\n return codecs.charmap_encode(input, errors, encoding_map)\n\n def decode(input, errors='strict'):\n return codecs.charmap_decode(input, errors, decoding_map)\n\n return codecs.CodecInfo(encode, decode, name='pdfdocencoding')",
"def _str2dict(self, istring):\n retDict = {}\n if istring == EMPTY_FIELD:\n return retDict\n for feat in istring.split(FEAT_SEP):\n # feature format changed in MATE\n if FEAT_VALUE_SEP_RE.search(feat):\n retDict.update((feat.split(FEAT_VALUE_SEP),))\n else:\n retDict.update([self._new2old(feat)])\n return retDict",
"def parse_html(html_string):\n return BeautifulSoup(html_string, \"html.parser\")",
"def get_dict(string: str) -> Dict[str, int]:\n splited = string[1:-1].split(\", \")\n my_dict = {}\n for i in splited:\n key, value = i.split(\":\")\n if key[0] == \"'\" and key[-1] == \"'\":\n key = key[1:-1]\n if value[0] == \"'\" and value[-1] == \"'\":\n value = value[1:-1]\n my_dict[key] = value\n return my_dict",
"def parse_form_encoded_body(form):\n dict = {}\n\n fields = form.split('&')\n for field in fields:\n key_value = field.split('=')\n key = key_value[0]\n value = None if len(key_value) == 1 else key_value[1]\n dict[key] = value\n\n return dict"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an input string, returns an HTML fragment as a string. The return value is the contents of the element. | def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=True, initial_header_level=1):
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment | [
"def fragment_fromstring(html, create_parent=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n accept_leading_text = bool(create_parent)\n\n elements = fragments_fromstring(\n html, guess_charset=guess_charset, parser=parser,\n no_leading_text=not accept_leading_text)\n\n if create_parent:\n if not isinstance(create_parent, _strings):\n create_parent = 'div'\n new_root = Element(create_parent)\n if elements:\n if isinstance(elements[0], _strings):\n new_root.text = elements[0]\n del elements[0]\n new_root.extend(elements)\n return new_root\n\n if not elements:\n raise etree.ParserError('No elements found')\n if len(elements) > 1:\n raise etree.ParserError('Multiple elements found')\n result = elements[0]\n if result.tail and result.tail.strip():\n raise etree.ParserError('Element followed by text: %r' % result.tail)\n result.tail = None\n return result",
"def innerhtml(el: Element, encoding: str=\"utf-8\") -> str:\n children = [_ for _ in el.iterchildren()]\n if not len(children):\n return el.text_content()\n text = \"%s\" % el.text if el.text else \"\"\n return \"%s%s\" % (text, \"\".join([tostring(c).decode(encoding) for\n c in el.iterchildren()]))",
"def HTML(html): # pylint: disable=invalid-name\n return markupsafe.Markup(html)",
"def html(s):\n pattern = s.strip()\n pattern = re.sub(r'\\s*<\\s*', '<', pattern)\n pattern = re.sub(r'\\s*>\\s*', '>', pattern)\n pattern = re.escape(pattern)\n pattern = re.sub(r'(?:\\\\\\s)+', r'\\\\s+', pattern)\n pattern = re.sub(r'<', r'\\\\s*<\\\\s*', pattern)\n pattern = re.sub(r'>', r'\\\\s*>\\\\s*', pattern)\n pattern = '\\\\s*' + pattern + '\\\\s*'\n pattern_object = re.compile(pattern, flags=re.IGNORECASE)\n return lambda c, x: \\\n [(True, c, x[len(pattern_object.match(x)[0]):])] if pattern_object.match(x) \\\n else [(False, s.strip(), x)]",
"def toString(self):\n\n return self.openTagToString() + self.innerHTMLtoString() + self.endTagToString()",
"def fragments_fromstring(html, no_leading_text=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n if parser is None:\n parser = html_parser\n\n children = parser.parseFragment(html, 'div', useChardet=guess_charset)\n if children and isinstance(children[0], _strings):\n if no_leading_text:\n if children[0].strip():\n raise etree.ParserError('There is leading text: %r' %\n children[0])\n del children[0]\n return children",
"def create_test_html():\n return lxml.html.fromstring(\"\"\"<html>\n <head>\n </head>\n <body>\n <div class=\"test\">Some <em>text</em></div>\n <img src=\"some_location\" alt=\"Alt text\" width=540>\n More <b>text</b>\n </body>\n </html>\"\"\")",
"def rst2html(rst_string):\r\n overrides = {'output_encoding': 'latin1', 'initial_header_level': 1}\r\n html_string = docCore.publish_string(\r\n source=rst_string, \r\n writer_name='html', settings_overrides=overrides)\r\n return html_string",
"def innerHTML(self):\n return self._innerHTML",
"def clean_html5lib(input):\n from html5lib import treebuilders, treewalkers, serializer, sanitizer\n\n p = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder(\"dom\"))\n\n dom_tree = p.parseFragment(input)\n\n walker = treewalkers.getTreeWalker(\"dom\")\n\n stream = walker(dom_tree)\n\n s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False)\n\n return \"\".join(s.serialize(stream))",
"def parse_html(html_string):\n return BeautifulSoup(html_string, \"html.parser\")",
"def textContent(node):\n return ''.join(node.itertext())",
"def textstring(el):\n strval = u''\n strval += (el.etree_element.text or u'')\n for elem in el.iter_children():\n strval += textstring(elem)\n strval += (el.etree_element.tail or u'')\n return strval",
"def strip_html_tags(string):\n return re.sub('<[^<]+?>', '', string)",
"def tostring(element):\n rv = []\n\n def serializeElement(element):\n if not hasattr(element, \"tag\"):\n if element.docinfo.internalDTD:\n if element.docinfo.doctype:\n dtd_str = element.docinfo.doctype\n else:\n dtd_str = \"<!DOCTYPE %s>\" % element.docinfo.root_name\n rv.append(dtd_str)\n serializeElement(element.getroot())\n\n elif element.tag == comment_type:\n rv.append(\"<!--%s-->\" % (element.text,))\n\n else:\n # This is assumed to be an ordinary element\n if not element.attrib:\n rv.append(\"<%s>\" % (element.tag,))\n else:\n attr = \" \".join([\"%s=\\\"%s\\\"\" % (name, value)\n for name, value in element.attrib.items()])\n rv.append(\"<%s %s>\" % (element.tag, attr))\n if element.text:\n rv.append(element.text)\n\n for child in element:\n serializeElement(child)\n\n rv.append(\"</%s>\" % (element.tag,))\n\n if hasattr(element, \"tail\") and element.tail:\n rv.append(element.tail)\n\n serializeElement(element)\n\n return \"\".join(rv)",
"def html_to_safe_dom(html_string):\n\n tag_bindings = get_tag_bindings()\n\n node_list = safe_dom.NodeList()\n if not html_string:\n return node_list\n\n def _process_html_tree(elt):\n node_list = safe_dom.NodeList()\n\n tail = elt.tail\n\n if elt.tag in tag_bindings:\n elt = tag_bindings[elt.tag]().render(elt)\n\n out_elt = safe_dom.Element(elt.tag)\n out_elt.add_attribute(**elt.attrib)\n if elt.text:\n out_elt.add_text(elt.text)\n for child in elt:\n out_elt.add_children(_process_html_tree(child))\n node_list.append(out_elt)\n if tail:\n node_list.append(safe_dom.Text(tail))\n return node_list\n\n parser = html5lib.HTMLParser(\n tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),\n namespaceHTMLElements=False)\n root = parser.parseFragment('<div>%s</div>' % html_string)[0]\n\n if root.text:\n node_list.append(safe_dom.Text(root.text))\n\n for elt in root:\n node_list.append(_process_html_tree(elt))\n\n return node_list",
"def unescape_html(s):\n return HTMLParser.unescape.__func__(HTMLParser, s)",
"def pango_markup(text: str, tag: str = \"span\", **attrib) -> str:\n e = Tree.Element(tag, attrib=attrib)\n e.text = text\n return Tree.tostring(e, encoding=\"unicode\")",
"def get_inner_html(self, locator):\n js = \"this.browserbot.findElement('%s').innerHTML\" % locator\n return self.execute_javascript(js).strip()",
"def escape_html(s):\n return cgi.escape(s, quote = True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Store multiple values in `parser.values`. (Option callback.) Store `None` for each attribute named in `args`, and store the value for each key (attribute name) in `kwargs`. | def store_multiple(option, opt, value, parser, *args, **kwargs):
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value) | [
"def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value in self.kwargs.items():\n # setattr(self, key, value)",
"def processArguments(self, args = None):\n\n if hasattr(sys, \"argv\") and args == sys.argv:\n args = sys.argv[1:]\n\n max = len(args) # maximum index + 1\n self.freeValues = [] # array to hold return values\n self.optionValues= {}\n index = 0 # initial index\n self.terminator = None\n self.termValues = []\n\n while index < max:\n # obtain argument\n arg = args[index]\n # increment index -- REMEMBER; it is NOW incremented\n index = index + 1\n\n # terminate immediately if option terminator encountered\n if self._isTerminator(arg):\n self.freeValues = self.freeValues + args[index:]\n self.termValues = args[index:]\n return\n\n # is this possibly an option?\n match = self.optionStartExpr.match(arg)\n if match is None:\n # not an option-- add to freeValues\n self.freeValues = self.freeValues + [arg]\n if not self.orderMixed:\n # mixing not allowed; add rest of args as freeValues\n self.freeValues = self.freeValues + args[index:]\n # return to caller\n return\n else:\n continue\n\n # grab name\n optName = match.group('option')\n\n # obtain next argument-- index has already been incremented\n nextArg = match.group('arg')\n if nextArg:\n nextArg = nextArg[1:]\n index = index - 1 # put it back\n else:\n try:\n nextArg = args[index]\n except:\n nextArg = None\n\n # transpose to lower case, if necessary\n if self.ignoreCase:\n optName = string.lower(optName)\n\n # obtain defining tuple\n tuples = self._getArgTuple(optName)\n\n if tuples == None:\n raise ArgumentError('Illegal option \\'' + arg + '\\'')\n elif len(tuples) > 1:\n raise ArgumentError('Ambiguous option \\'' + arg +\n '\\'; matches ' +\n repr(map(lambda x: x[0], tuples)))\n else:\n config = tuples[0]\n\n # config is now set to the configuration tuple for the\n # argument\n (fullName, spec, realName) = config\n (optType, optMode, optDefault, optMultiple) = spec\n\n # if opt mode required, but nextArg is none, raise an error\n if (optMode == ArgRequired):\n if (not nextArg) or self._isTerminator(nextArg):\n# print nextArg\n raise ArgumentError('Option \\'' + arg +\n '\\' requires an argument of type ' +\n optType)\n\n if (not optMode == None) and nextArg and (not self._isTerminator(nextArg)):\n # nextArg defined, option configured to possibly consume arg\n try:\n # grab conversion function-- the try is more for internal diagnostics\n func = ConversionFunctions[optType]\n try:\n optionValue = func(nextArg)\n index = index + 1\n except:\n # only raise conversion error if REQUIRED to consume argument\n if optMode == ArgRequired:\n raise ArgumentError('Invalid argument to option \\''\n + arg + '\\'; should be \\'' +\n optType + '\\'')\n else:\n optionValue = optDefault\n except ArgumentError:\n raise\n except:\n raise ArgumentError('(' + arg +\n ') Conversion function for \\'' +\n optType + '\\' not found.')\n else:\n optionValue = optDefault\n\n # add value to options dictionary\n if optMultiple:\n # can be multiple values\n try:\n # try to append element\n self.optionValues[realName] = self.optionValues[realName] + [optionValue]\n except:\n # failed-- must not exist; add it\n self.optionValues[realName] = [optionValue]\n else:\n # only one value per\n if self.isPosixCompliant and realName in self.optionValues:\n raise ArgumentError('Argument \\'' + arg +\n '\\' occurs multiple times.')\n\n self.optionValues[realName] = optionValue",
"def get_dictionary_of_values(self, *args):\n values = {}\n for data in args:\n values[data] = self.get_value(data)\n\n return values",
"def _parse_args(parser, argv):\n i = 0\n args = []\n kwargs = {}\n\n for action in parser._option_string_actions.values():\n if action.dest != \"help\":\n kwargs[action.dest] = action.default\n\n positionals = parser._get_positional_actions()\n if len(positionals) == 0:\n wildcard = None\n elif len(positionals) == 1:\n action = positionals[0]\n if action.nargs != argparse.REMAINDER:\n raise api.api_utils.NoTracebackError(\n f\"Cannot parse position argument: {action} with nargs={action.nargs}\"\n )\n wildcard = action.dest\n kwargs[wildcard] = []\n else:\n raise api.api_utils.NoTracebackError(\n f\"Cannot handle multiple positional arguments: {positionals}\"\n )\n\n while i < len(argv):\n arg = argv[i]\n if arg.startswith(\"--\"):\n if \"=\" in arg:\n key, value = arg.split(\"=\", 1)\n else:\n key = arg\n value = None\n try:\n action = parser._option_string_actions[key]\n except KeyError:\n if wildcard is None:\n raise api.api_utils.NoTracebackError(f\"Unknown argument: {arg}\")\n kwargs[wildcard].append(arg)\n if i + 1 < len(argv) and not argv[i + 1].startswith(\"--\"):\n kwargs[wildcard].append(argv[i + 1])\n i += 1\n i += 1\n continue\n\n if isinstance(action, argparse._StoreAction):\n if value is None:\n value = argv[i + 1]\n i += 1\n elif isinstance(action, argparse._StoreConstAction):\n if value is not None:\n raise api.api_utils.NoTracebackError(\n f\"--{key} accepts no arguments, but got: {repr(value)}\"\n )\n value = action.const\n else:\n raise api.api_utils.NoTracebackError(f\"Unknown action: {action}\")\n kwargs[action.dest] = value\n else:\n if wildcard is not None:\n kwargs[wildcard].append(arg)\n else:\n args.append(arg)\n i += 1\n\n if wildcard is not None:\n kwargs[wildcard] = \" \".join(shlex.quote(a) for a in kwargs[wildcard])\n\n return args, kwargs",
"def _apply_kwargs(args, kwargs):\n\n for arg_name in kwargs:\n arg_value = str(kwargs[arg_name])\n args.append(\"--%s\" % (arg_name))\n if arg_value:\n args.append(arg_value)",
"def filter_args(**kwargs) -> Iterator[Tuple[str, Union[int, str]]]:\n for name, value in kwargs.items():\n if value is None:\n try:\n value = FILTER_DEFAULTS[name]()\n except KeyError:\n continue\n yield full_filter_name(name), value",
"def get_option_values(self):\n \n class CommandLineOptions(object):\n def __getattr__(self, name):\n # if an attribute can not be found, this is the last function called\n all_option_names=\", \".join(vars(self).keys())\n error_message=\"Unable to find option '{0}' in command line options.\\n\".format(name)\n error_message+=\"The available options are: {0}\".format(all_option_names)\n raise AttributeError(error_message)\n \n # get arguments from the command line (will not run again if already parsed)\n if not self._user_asked:\n self.ask_user()\n \n args=CommandLineOptions()\n for option in list(self._user_arguments.keys()) + list(self._arguments.keys()):\n option = re.sub(r'-', '_', option)\n value = self.get(option)\n setattr(args,option,value)\n \n return args",
"def get_args(self, argset):\n args = []\n kwargs = {}\n for element in argset or []:\n if isinstance(element, dict):\n kwargs.update(element)\n else:\n args.append(element)\n return args, kwargs",
"def set_attributes_from_kwargs(self, kwargs):\n for val in self.valid_kwargs:\n if val in kwargs:\n setattr(self, val, kwargs[val])",
"def set_arg_attributes(self):\n arg_spec = inspect.getfullargspec(self.func)\n\n self.args = [a for a in arg_spec.args if not a.startswith('default') and not a.startswith('_')]\n self.unpack_args = arg_spec.varargs\n self.unpack_kwargs = arg_spec.varkw\n\n if arg_spec.defaults:\n zipped = zip(reversed(arg_spec.args), reversed(arg_spec.defaults))\n self.default_args = {e[0]: e[1] for e in list(zipped)}",
"def pair_to_args(self, *args, **kwargs) -> Tuple:\n return [*args, *kwargs.values()]",
"def apply_kwargs_parser(parser):\n def inner_decorator(handler):\n def wrapped(**kwargs):\n parser_result = parser(**kwargs)\n kwargs.update(parser_result)\n handler(**kwargs)\n return wrapped\n return inner_decorator",
"def _parse(cls, node, path):\n kwargs = cls._parse_simple_attribs(node)\n kwargs.update(cls._parse_simple_elements(node, path))\n return kwargs",
"def _individual_args(args) -> None:\n if args is None:\n return\n\n if not isinstance(args, list):\n raise PluginValidationError(\n f\"Invalid {ConfigKeys.PLUGIN_ARGS.name} entry '{args}': must be a list\"\n )\n\n for arg in args:\n if not isinstance(arg, str):\n raise PluginValidationError(\n f\"Invalid plugin argument '{arg}': must be a string\"\n )",
"def parse_arglist(args):\n # per https://stackoverflow.com/a/49723227/318857\n\n args = 'f({})'.format(args)\n tree = ast.parse(args)\n funccall = tree.body[0].value\n\n args = [ast.literal_eval(arg) for arg in funccall.args]\n kwargs = {arg.arg: ast.literal_eval(arg.value)\n for arg in funccall.keywords}\n\n if len(args) > 2:\n raise TypeError(\n \"Expected at most 2 positional args but {} were given\".format(len(args)))\n\n if len(args) >= 1:\n kwargs['width'] = int(args[0])\n if len(args) >= 2:\n kwargs['height'] = int(args[1])\n\n return kwargs",
"def parse(self, cli_args=str(_sys.argv)[1:-1]):\n if cli_args is not _sys.argv:\n cli_args = cli_args.split()\n for i in range(len(cli_args)):\n cli_args[i] = cli_args[i].split('=')\n cli_args = sum(cli_args, [])\n\n self._ensure_required(cli_args)\n self._ensure_exclusive(cli_args)\n self._ensure_and_assign_values(cli_args)",
"def xs(name, parser_args, list_args):\n for args, kwargs in list_args:\n if len(set(args) & parser_args) > 0:\n yield args, kwargs\n\n else:\n if 'dest' in kwargs:\n if kwargs['dest'] == name:\n yield args, kwargs",
"def _init_kwargs(self, kwargs, kws):\n for k in kws:\n if k in kwargs:\n setattr(self, k, kwargs[k])",
"def parse_args_kwargs(parser, token):\n bits = token.contents.split(' ')\n\n if len(bits) <= 1:\n raise template.TemplateSyntaxError(\"'%s' takes at least one argument\" % bits[0])\n\n if token.contents[13] == '\"':\n end_quote = token.contents.index('\"', 14) + 1\n args = [template.Variable(token.contents[13:end_quote])]\n kwargs_start = end_quote\n else:\n try:\n next_space = token.contents.index(' ', 14)\n kwargs_start = next_space + 1\n except ValueError:\n next_space = None\n kwargs_start = None\n args = [template.Variable(token.contents[13:next_space])]\n\n kwargs = {}\n kwargs_list = token.contents[kwargs_start:].split(',')\n for kwargs_item in kwargs_list:\n if '=' in kwargs_item:\n k, v = kwargs_item.split('=', 1)\n k = k.strip()\n kwargs[k] = template.Variable(v)\n return args, kwargs",
"def fill_args(args, kwargs):\n for name, param in args.items():\n \n value = request.args.get(name)\n \n if value is None:\n value = request.form.get(name)\n \n if value is None:\n try:\n value = request.get_json(silent=True).get(name)\n except AttributeError:\n pass\n \n if value is None:\n if param.required:\n raise ApiError(message=f'Parameter {name} is required.', fields=name, what=REQUIRED)\n \n else:\n try:\n value = param.converter(value)\n except Exception as e:\n raise UnprocessableEntity(fields=name, what=BAD_VALUE,\n message=f'Failed to validate parameter {name}: {str(e)}')\n \n kwargs[name] = value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a configuration file during option processing. (Option callback.) | def read_config_file(option, opt, value, parser):
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser) | [
"def read(self):\n\n # Add options from config file.\n print self._config.get_all()\n for id, (val, type) in self._config.get_all().items():\n if type == 'src' and not self.check(id, val): # Don't use wrong paths\n log.warning(_('idg.options.not.valid.use.default') + id +\\\n \" \" + val)\n continue\n self._opts[id] = [val, type]\n\n dom = self._config.dom()\n if dom is None:\n log.error(_('idg.options.cant.parse.config.file') +\\\n self._config.path())\n return\n else:\n log.info(_('idg.options.using.config.file') + self._config.path())",
"def __parse_settings(self):\n result = self.load_config(file_name)\n for key, value in self._config.items():\n print \" \", option, \"=\", self.config.get(section, option)",
"def read_config(self, args):\n # Try to load configuration file if provided\n import yaml\n\n # This is all the config information in the file, including\n # things for other stages\n overall_config = yaml.load(open(self.get_input('config')), yaml.FullLoader)\n \n # The user can define global options that are inherited by\n # all the other sections if not already specified there.\n input_config = overall_config.get('global', {})\n\n # This is just the config info in the file for this stage.\n # It may be incomplete - there may be things specified on the\n # command line instead, or just using their default values\n stage_config = overall_config.get(self.name, {})\n input_config.update(stage_config)\n\n # Here we build up the actual configuration we use on this\n # run from all these sources\n my_config = {}\n\n # Loop over the options of the pipeline stage\n for x in self.config_options:\n opt = None\n opt_type = None\n\n # First look for a default value,\n # if a type (like int) is provided as the default it indicates that\n # this option doesn't have a default (i.e. is mandatory) and should\n # be explicitly provided with the specified type\n if type(self.config_options[x]) is type:\n opt_type = self.config_options[x]\n\n elif type(self.config_options[x]) is list:\n v = self.config_options[x][0]\n if type(v) is type:\n opt_type = v\n else:\n opt = self.config_options[x]\n opt_type = type(v)\n else:\n opt = self.config_options[x]\n opt_type = type(opt)\n\n # Second, look for the option in the configuration file and override\n # default if provided TODO: Check types\n if x in input_config:\n opt = input_config[x]\n\n # Finally check for command line option that would override the value\n # in the configuration file. Note that the argument parser should\n # already have taken care of type\n if args[x] is not None:\n opt = args[x]\n\n # Finally, check that we got at least some value for this option\n if opt is None:\n raise ValueError(f\"Missing configuration option {x} for stage {self.name}\")\n\n my_config[x] = opt\n\n # Unspecified parameters can also be copied over.\n # This will be needed for parameters that are more complicated, such\n # as dictionaries or other more structured parameter information.\n for x,val in input_config.items():\n # Omit things we've already dealt with above\n if x in self.config_options:\n continue\n # copy over everything else\n else:\n my_config[x] = val\n\n\n\n return my_config",
"def readConfiguration (configurationFilePath = None):\n \n pathList = getConfigurationPaths(configurationFilePath)\n \n # if not silent:\n # if len(pathList) is 1:\n # print(\"Loading options from {:s}\".format(pathList[0]))\n # else:\n # print(\"Loading overrides from {:s}\".format(pathList[-1]))\n\n configuration = DEFAULTCONFIGURATION\n configuration['CONFIGDIR'] = os.path.dirname(pathList[0])\n for path in pathList:\n configuration = parseConfiguration(path, configuration)\n\n return(configuration)\n\n # TODO: Validate configuration",
"def read_config_file(self):\n self.logger.debug('reading config file')\n path = Path(self.conversion_settings.working_directory, self._config_file)\n\n if path.is_file():\n self.read(path)\n self.logger.info(f'Data read from INI file is {self.__repr__()}')\n else:\n self.logger.warning(f'config.ini missing at {path}, generating new file and settings set to default.')\n if not config.silent:\n print(\"config.ini missing, generating new file.\")\n self.conversion_settings = self._default_quick_setting",
"def _read_config(fin, silent=False):\n \n # Global parameters to be edited\n global _CONFIG, _BLACKLIST, _TYPO_DELETE_SPACE, _TYPO_DELETE_CHAR\n global _TYPO_SWAP, _TYPO_INSERT, _TYPO_REPLACE\n global _PHONO_DELETE, _PHONO_INSERT, _PHONO_REPLACE, _PHONO_GROUP\n\n # Generate default config if it does not exist\n if pathlib.Path(_DEF_CONFIG).exists() == False:\n _default_config(silent=silent)\n \n # Validate input\n if type(fin) != str and fin != None:\n return None\n\n # Do nothing if input is None\n if fin == None:\n return None\n\n # Do nothing if selected file has already been loaded\n if fin == _CONFIG:\n return None\n\n # Regenerate default config\n if fin == _DEF_CONFIG:\n _CONFIG = _DEF_CONFIG\n return _default_config(silent=silent)\n \n # Read INI file and set (or reset) parameters\n if silent == False:\n print(\"Reading config file '\" + fin + \"' ...\")\n \n # Initialize config parser\n config = configparser.ConfigParser(allow_no_value=True)\n\n # Verify that config file exists\n if pathlib.Path(fin).exists() == False:\n if silent == False:\n print(\"Config file '\" + fin + \"' not found.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Read config file\n config.read(fin)\n \n # Read typographical section\n try:\n key = \"delete_space\"\n _TYPO_DELETE_SPACE = float(config[\"typo\"][key])\n key = \"delete_char\"\n _TYPO_DELETE_CHAR = float(config[\"typo\"][key])\n key = \"swap\"\n _TYPO_SWAP = float(config[\"typo\"][key])\n key = \"insert\"\n _TYPO_INSERT = float(config[\"typo\"][key])\n key = \"replace\"\n _TYPO_REPLACE = float(config[\"typo\"][key])\n except KeyError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'typo' section not found in '\" +\n fin + \"'.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n except ValueError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'typo' section in '\" + fin +\n \"' should be a number.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Validate all typographical parameters as probabilities on [0.0,1.0]\n valid = True\n if _TYPO_DELETE_SPACE < 0 or _TYPO_DELETE_SPACE > 1:\n valid = False\n if _TYPO_DELETE_CHAR < 0 or _TYPO_DELETE_CHAR > 1:\n valid = False\n if _TYPO_SWAP < 0 or _TYPO_SWAP > 1:\n valid = False\n if _TYPO_INSERT < 0 or _TYPO_INSERT > 1:\n valid = False\n if _TYPO_REPLACE < 0 or _TYPO_REPLACE > 1:\n valid = False\n if _TYPO_DELETE_CHAR + _TYPO_INSERT + _TYPO_REPLACE > 1:\n valid = False\n if valid == False:\n if silent == False:\n print(\"Invalid 'typo' parameter read in '\" + fin + \"'.\")\n print(\"All parameters should be probabilities between 0.0 and \" +\n \"1.0.\")\n print(\"The sum of 'delete_char', 'insert', and 'replace' should \" +\n \"not exceed 1.0.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n \n # Read phonological section\n try:\n key = \"delete\"\n _PHONO_DELETE = float(config[\"phono\"][key])\n key = \"insert\"\n _PHONO_INSERT = float(config[\"phono\"][key])\n key = \"replace\"\n _PHONO_REPLACE = float(config[\"phono\"][key])\n key = \"group\"\n _PHONO_GROUP = float(config[\"phono\"][key])\n except KeyError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'phono' section not found in '\" +\n fin + \"'.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n except ValueError:\n if silent == False:\n print(\"Key '\" + key + \"' from 'phono' section in '\" + fin +\n \"' should be a number.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Validate all phonological parameters as probabilities on [0.0,1.0]\n valid = True\n if _PHONO_DELETE < 0 or _PHONO_DELETE > 1:\n valid = False\n if _PHONO_INSERT < 0 or _PHONO_INSERT > 1:\n valid = False\n if _PHONO_REPLACE < 0 or _PHONO_REPLACE > 1:\n valid = False\n if _PHONO_GROUP < 0 or _PHONO_GROUP > 1:\n valid = False\n if _PHONO_DELETE + _PHONO_INSERT + _PHONO_REPLACE > 1:\n valid = False\n if valid == False:\n if silent == False:\n print(\"Invalid 'phono' parameter read in '\" + fin + \"'.\")\n print(\"All parameters should be probabilities between 0.0 and \" +\n \"1.0.\")\n print(\"The sum of 'delete', 'insert', and 'replace' should \" +\n \"not exceed 1.0.\")\n print(\"Reverting to default parameters.\")\n return _default_config(silent=silent)\n\n # Read blacklist (section not required)\n if \"blacklist\" in config.sections():\n _BLACKLIST = tuple(dict(config.items(\"blacklist\")))\n else:\n _BLACKLIST = _DEF_BLACKLIST\n \n if silent == False:\n print(\"Config file successfully loaded!\")\n\n # Update current config file\n _CONFIG = fin",
"def __readConfig(self):\r\n\r\n\t\tfr = open(self.__configFilePath, 'r')\r\n\t\t\r\n\r\n\t\tfor line in fr.readlines():\r\n\t\t\tline = line.strip()\r\n\t\t\tif line == \"\":\r\n\t\t\t\tcontinue\r\n\t\t\t\r\n\t\t\tif line[0] != '#': # ignore lines start by #\r\n\t\t\t\tsp = line.split('=')\r\n\t\t\t\tif len(sp) == 2:\r\n\t\t\t\t\tkey = sp[0].strip()\r\n\t\t\t\t\tval = sp[1].strip()\r\n\t\t\t\t\tself.__configDict[key] = val\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.__print(\"Ignore config line: \" + line)\r\n\r\n\t\tself.__print(\"Read configs from: %s\\n%d configs read!\" \\\r\n\t\t\t\t\t\t\t\t % (self.__configFilePath, len(self.__configDict)) \\\r\n\t\t\t\t\t\t\t\t)\r\n\r\n\t\tfr.close()",
"def read_config(filename):\n with open(filename) as fobj:\n return json.load(fobj)",
"def optReadParameterFile(*args):\n return _optcc.optReadParameterFile(*args)",
"def _read_config(self):\n \n config_file_name = \"config.json\"\n \n if not os.path.isfile(config_file_name):\n raise FileNotFoundError(config_file_name + \" is not found\")\n\n try:\n data = json.load(open(config_file_name))\n except:\n raise ValueError(config_file_name + \" is not a valid json\")\n\n self.queries = self._read_config_value(data, \"query\")\n self.sender = self._read_config_value(data, \"sender\")\n self.recipients = self._read_config_value(data, \"recipient\")",
"def readConf(configFile):\n\n try:\n with open(configFile) as json_data_file:\n return json.load(json_data_file)\n except:\n raise",
"def load_cfg(self,filepath):\n config = configparser.ConfigParser()\n config.read([filepath])\n return config",
"def read_user_options(config_filename: Optional[str] = None) -> UserOptions:\n readers = settings.USER_OPTION_READERS.values()\n if config_filename:\n if config_filename.endswith(\".toml\"):\n readers = [settings.USER_OPTION_READERS[\"toml\"]]\n else:\n readers = [settings.USER_OPTION_READERS[\"ini\"]]\n\n for reader in readers:\n options = reader.read_options(config_filename=config_filename)\n if options:\n normalized_options = _normalize_user_options(options)\n return normalized_options\n raise FileNotFoundError(\"Could not read any configuration.\")",
"def _read_config():\n\n import configparser\n import os\n\n basepath = os.getcwd()\n prev = None\n while basepath != prev:\n prev = basepath\n path = os.path.join(basepath, 'uriconfig.ini')\n if os.path.exists(path):\n break\n basepath = os.path.split(basepath)[0]\n\n parser = configparser.ConfigParser()\n parser.read(path)\n return parser",
"def readConfigurationFile(self):\n\t\tconfig_file = os.path.join(script_path, 'assets/config.json')\n\n\t\twith open(config_file, 'r') as f: content = json.load(f)\n\t\t\n\t\treturn content",
"def _read_config_file():\n json_file_path = os.path.join(os.path.dirname(__file__),\n 'users-settings.json')\n with open(json_file_path) as settings:\n return json.load(settings)",
"def _read_process_config(filepath):\n\twith open(filepath) as fh:\n\t\tproc_conf = _json.load(fh)\n\treturn proc_conf",
"def read_conf():\n\tconfFile = os.path.join(CURDIR, '../../Config/conf.json')\n\twith open(confFile, 'r') as f:\n\t\tconf = json.load(f)\n\t\tgame = conf['game']\n\t\tonline = conf['use_cache']\n\tgameConfFile = os.path.join(CURDIR, '../../Config/conf_%s.json' \\\n\t % game.lower())\n\twith open(gameConfFile, 'r') as f:\n\t\tconf = json.load(f)\n\t\tconf['use_cache'] = online\n\n\t\treadPoolJson = conf['read_pool_from_json']\n\t\tif readPoolJson:\n\t\t\tpoolConfFile = os.path.join(CURDIR, '../../Config/%s' \\\n\t\t\t % conf['pool_json'])\n\t\t\twith codecs.open(poolConfFile, 'r', encoding='utf-8') as fpool:\n\t\t\t\tconf['pool'] = json.load(fpool)['pools']\n\n\t\treturn conf",
"def read_config_file():\n \n MIN_RUN_TIME = 300 # min five minutes between runs\n \n config = configparser.ConfigParser(allow_no_value=True)\n configdata = {}\n \n config.read('backgrounder.ini')\n \n configdata['path'] = {}\n configdata['path']['image'] = config['path']['image']\n configdata['subreddits'] = config['subreddits']['subreddits']\n configdata['postsave'] = config['postsave']['method']\n configdata['timing'] = config['timing']['seconds']\n configdata['other'] = {}\n configdata['other']['ignore_duplicates'] = config['other']['ignore_duplicates']\n configdata['other']['download_gallery'] = config['other']['download_gallery']\n \n # validate user-entered config\n valid_dict = validate_config(configdata)\n for key, val in valid_dict.items():\n if val is False:\n messagebox.showinfo('Warning', 'There was an error reading backgrounder.ini.\\n\\nPlease delete your data.pkl file and rerun the program.'\n % (key))\n return None\n \n process_configdata(configdata)\n \n return configdata",
"def read_config_file(self):\n\n # try to import the python2 ConfigParser\n # if unable to import, then try to import the python3 configparser\n try:\n import ConfigParser as configparser\n except ImportError:\n import configparser\n\n config = configparser.ConfigParser()\n\n try:\n config.read(self.config)\n except EnvironmentError:\n sys.exit(\"Unable to read from the config file: \" + self.config)\n\n for section in config.sections():\n config_list = config.items(section)\n for name,value in config_list:\n yield name, value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Interpret filesystem path settings relative to the `base_path` given. Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from `OptionParser.relative_path_settings`. | def make_paths_absolute(pathdict, keys, base_path=None):
if base_path is None:
base_path = os.getcwdu() # type(base_path) == unicode
# to allow combining non-ASCII cwd with unicode values in `pathdict`
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value | [
"def process_path_key(self, dirpath, filename, key_path, dictionary, keys, level, must_exist, can_have_subdict, default_val):\n # found the key_path, process values\n if level == len(keys) - 1:\n key = keys[level]\n # if a wildcard is specified at this level, that means we\n # should process all keys as path values\n if key == \"*\":\n for key, val in dictionary.items():\n dictionary[key] = self.process_path_value(dirpath, filename, key_path, val, must_exist, can_have_subdict)\n elif key in dictionary:\n dictionary[key] = self.process_path_value(dirpath, filename, key_path, dictionary[key], must_exist, can_have_subdict)\n # key was not found, but default value was set, so apply it\n elif default_val:\n dictionary[key] = self.relative_path(dirpath, filename, key_path, default_val, must_exist)\n # otherwise recurse deeper into the dict\n elif level < len(keys) - 1:\n key = keys[level]\n if key in dictionary:\n # if the key refers to a dictionary, recurse into it to go\n # further down the path key\n if isinstance(dictionary[key], dict):\n self.process_path_key(dirpath, filename, key_path, dictionary[key], keys, level + 1,\n must_exist, can_have_subdict, default_val)\n # if the key was not found, but a default value is specified,\n # drill down further to set the default value\n elif default_val:\n dictionary[key] = {}\n self.process_path_key(dirpath, filename, key_path, dictionary[key], keys, level + 1,\n must_exist, can_have_subdict, default_val)",
"def test_paths_from_settings():\n import settings_bipype\n\n namespace = settings_bipype.__dict__\n \n variables = { key: namespace[key]\n for key in namespace\n if key.startswith('PATH') }\n \n for var in variables.values():\n assert path_exists(var)",
"def get_relative_paths(original, relative_to):\n return {k: get_relative_path(v, relative_to) for k, v in original.items()}",
"def get_settings_path() -> Path:\n with open(root_path / 'deeppavlov/paths.json', encoding='utf8') as f:\n paths = json.load(f)\n settings_paths = Path(paths['settings_path']).resolve() if paths['settings_path'][0] == '/' \\\n else root_path / paths['settings_path']\n return settings_paths",
"def _GetPathValue(obj, paths, default_value=None):\n if not obj:\n return default_value\n for p in paths:\n if p in obj:\n obj = obj[p]\n else:\n return default_value\n return obj",
"def process_path_value(self, dirpath, filename, key_path, val, must_exist, can_have_subdict):\n if isinstance(val, str):\n return self.relative_path(dirpath, filename, key_path, val, must_exist)\n elif isinstance(val, list):\n vals = []\n for entry in val:\n if can_have_subdict and isinstance(entry, dict):\n for subkey, subval in entry.items():\n vals.append({subkey: self.relative_path(dirpath, filename, key_path, subval, must_exist)})\n else:\n vals.append(self.relative_path(dirpath, filename, key_path, entry, must_exist))\n return vals",
"def jsonpaths_in_dict(dic, path='$', *, notation='dot'):\n for k, v in dic.items():\n if notation == 'dot':\n json_path = f\"{path}.{k}\"\n elif notation == 'bracket':\n json_path = f\"{path}['{k}']\"\n else:\n json_path = None\n ValueError(f\"Notation: '{notation}' is not supported\")\n\n if isinstance(v, dict):\n for json_path_ in jsonpaths_in_dict(\n v, json_path, notation=notation):\n yield json_path_\n else:\n yield json_path",
"def load_dataset_paths(basedir: str) -> Cord19Paths:\n basedir = Path(basedir)\n paths, filesdir = {}, []\n for p in basedir.iterdir():\n if p.suffix == '.csv':\n paths['metadata'] = p\n elif p.suffix == '.readme':\n paths['readme'] = p\n elif p.is_dir():\n dirdir = p.joinpath(p.name)\n if dirdir.is_dir():\n filesdir.append(dirdir)\n\n paths['dirs'] = filesdir\n for p in filesdir:\n paths[p.name] = p\n return Cord19Paths(**paths)",
"def get_subdict(adict, path, sep=os.sep):\n return reduce(adict.__class__.get, [p for p in path.split(sep) if p], adict)",
"def paths_import_all(self, path_fname):\n\t\twith open (path_fname, \"rb\") as paths_f:\n\t\t\tpaths_dict = pickle.load(paths_f)\n\n\t\t# keyed by environment name\n\t\tunscrambled_dict = {}\n\t\tfor key in paths_dict.keys():\n\t\t\tunscrambled_dict[key] = self.moveit_unscramble(paths_dict[key])\n\n\t\treturn unscrambled_dict",
"def make_paths_absolute(dir_: str, cfg: Dict[str, Any]) -> Dict[str, Any]:\n for key in cfg.keys():\n if hasattr(key, \"endswith\") and key.endswith(\"_path\"):\n if cfg[key].startswith(\"~\"):\n cfg[key] = os.path.expanduser(cfg[key])\n else:\n cfg[key] = os.path.join(dir_, cfg[key])\n cfg[key] = os.path.abspath(cfg[key])\n if type(cfg[key]) is dict:\n cfg[key] = make_paths_absolute(dir_, cfg[key])\n return cfg",
"def _lookup_paths_in_paths(client_dispatcher: IClientDispatcher, lookup_paths: List[str], target_paths: List[str]):\n client = client_dispatcher.current_client\n\n dirs = []\n files = set()\n\n for p in lookup_paths:\n path = Path(get_relative_paths(client.path, [p])[0])\n if path.is_dir():\n dirs.append(path)\n else:\n files.add(path)\n\n target_dirs = []\n target_files = set()\n\n for p in target_paths:\n path = Path(p)\n if path.is_dir():\n target_dirs.append(path)\n else:\n target_files.add(path)\n\n result = set()\n\n for target_file in target_files:\n if target_file in files or any(d in target_file.parents for d in dirs):\n result.add(str(target_file))\n\n for target_dir in target_dirs:\n if target_dir in dirs or any(target_dir in f.parents for f in files):\n result.add(str(target_dir))\n\n return result",
"def common_paths():\n\tpath={}\n\tcurPath = os.path.dirname(os.path.realpath(__file__))\n\tpath[\"current\"] = curPath\n\tsharedPath = \"/usr/share/beadtracker\"\n\tpath[\"translation\"] = firstExistingPath(\n\t\t\t[os.path.join(p, \"lang\") for p in\n\t\t\t (curPath, sharedPath,)])\n\tpath[\"templates\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'templates') for p in\n\t\t\t (curPath, sharedPath,)])\n\n\tpath[\"splash\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'templates','splash.png') for p in\n\t\t\t (curPath, sharedPath,)])\n\tpath[\"themes\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'templates','themes') for p in\n\t\t\t (curPath, sharedPath,)])\n\n\tlang=str(QtCore.QLocale.system().name()) \n\tshortLang=lang[:2]\n\tpath[\"help\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'HELP') for p in\n\t\t\t (os.path.join(curPath,\"help_\"+lang),\n\t\t\t os.path.join(sharedPath,\"help_\"+lang),\n\t\t\t os.path.join(curPath,\"help_\"+shortLang),\n\t\t\t os.path.join(sharedPath,\"help_\"+shortLang),\n\t\t\t os.path.join(curPath,\"help\"),\n\t\t\t os.path.join(sharedPath,\"help\"),\n\t\t\t )\n\t\t\t ])\n\treturn path",
"def _dynamic_paths(path, options):\n path = RouteTrie.clean_path(path)\n possible_paths = set([path])\n\n # Check for string formatting.\n if not options or '{' not in path:\n return possible_paths\n\n for key in options:\n for option in options[key]:\n format_keys = {key: option}\n new_paths = []\n for possible_path in possible_paths:\n new_paths.append(utils.safe_format(possible_path, **format_keys))\n possible_paths = possible_paths.union(new_paths)\n\n paths = set()\n\n # Don't include paths that still have format symbols.\n for possible_path in possible_paths:\n if '{' not in possible_path:\n paths.add(possible_path)\n\n if not paths:\n raise MissingOptionError(path)\n\n return paths",
"def sub_dict(d:dict, paths:list, *, compl=False):\n# k = keys[0]\n# assert type(k) in {list, tuple}\n# res = nested_dict(k, fsl.utils.data.get_item(d, k))\n res = {}\n if compl:\n pp = []\n for p in get_paths(d):\n for q in paths:\n if q == p[:len(q)]:\n break\n else:\n pp.append(p)\n else:\n pp = paths\n\n for k in pp:\n # assert type(k) in {list, tuple}\n setitem(res, k, getitem(d, k))\n return res",
"def _configure_local_paths(local_paths):\n answer = copy(local_paths)\n\n # Ask the user for a repository root.\n while not answer.get('reporoot'):\n logger.info('First, we need to know where you store most code on your '\n 'local machine.')\n logger.info('Other paths (example: toolkit) will derive from this, '\n 'but most are individually configurable.')\n logger.info('The use of ${REPOROOT} in GAPIC YAMLs will point here.')\n logger.info('Note: Use of ~ is fine here.')\n answer['reporoot'] = six.moves.input('Local code path: ')\n answer['reporoot'] = answer['reporoot'].rstrip('/').strip()\n\n # Set up dependent directories.\n reporoot = answer['reporoot']\n for dep in ('api-client-staging', 'googleapis', 'toolkit'):\n location = six.moves.input(\n 'Path for {0} (default: {1}/{0}): '.format(dep, reporoot)\n ).rstrip('/').strip()\n if location:\n answer[dep.replace('-', '_')] = location\n\n # Done; return the answer.\n return answer",
"def load(self, base_settings):\n is_valid_key = lambda k: k.isupper() and not k.startswith('_')\n\n # Base settings, including `LocalSetting`s, loaded from the\n # Django settings module.\n valid_keys = (k for k in base_settings if is_valid_key(k))\n base_settings = DottedAccessDict((k, base_settings[k]) for k in valid_keys)\n\n # Settings read from the settings file; values are unprocessed.\n settings_from_file = self.strategy.read_file(self.file_name, self.section)\n\n # The fully resolved settings.\n settings = Settings(base_settings)\n\n for name, value in settings_from_file.items():\n for prefix in ('PREPEND.', 'APPEND.', 'SWAP.'):\n if name.startswith(prefix):\n name = name[len(prefix):]\n name = '{prefix}({name})'.format(**locals())\n break\n\n settings.set_dotted(name, value)\n\n # See if this setting corresponds to a `LocalSetting`. If\n # so, note that the `LocalSetting` has a value by putting it\n # in the registry. This also makes it easy to retrieve the\n # `LocalSetting` later so its value can be set.\n current_value = base_settings.get_dotted(name, None)\n if isinstance(current_value, LocalSetting):\n self.registry[current_value] = name\n\n self._interpolate_values(settings, settings)\n self._interpolate_keys(settings, settings)\n self._prepend_extras(settings, settings.pop('PREPEND', None))\n self._append_extras(settings, settings.pop('APPEND', None))\n self._swap_list_items(settings, settings.pop('SWAP', None))\n self._import_from_string(settings, settings.pop('IMPORT_FROM_STRING', None))\n self._delete_settings(settings, settings.pop('DELETE', None))\n\n for local_setting, name in self.registry.items():\n local_setting.value = settings.get_dotted(name)\n\n return settings",
"def expandVarsInPaths(repositories):\r\n os.environ[\"SUBUSERDIR\"] = _getSubuserDir()\r\n for reponame,info in repositories.iteritems():\r\n info[\"path\"] = os.path.expandvars(info[\"path\"])",
"def _crawl(\n self, key_path: List[str], env_vars: Mapping[str, Sequence[str]]\n ) -> Dict[str, Any]:\n new_vars: Dict[str, List[str]] = {}\n obj = self._path_get(key_path)\n # Sub-dict -> recurse\n if (\n hasattr(obj, \"keys\")\n and callable(obj.keys)\n and hasattr(obj, \"__getitem__\")\n ):\n for key in obj.keys():\n merged_vars = dict(env_vars, **new_vars)\n merged_path = key_path + [key]\n crawled = self._crawl(merged_path, merged_vars)\n # Handle conflicts\n for key in crawled:\n if key in new_vars:\n err = \"Found >1 source for {}\"\n raise AmbiguousEnvVar(err.format(key))\n # Merge and continue\n new_vars.update(crawled)\n # Other -> is leaf, no recursion\n else:\n new_vars[self._to_env_var(key_path)] = key_path\n return new_vars"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a copy of `settings_spec` excluding/replacing some settings. `settings_spec` is a tuple of configuration settings with a structure described for docutils.SettingsSpec.settings_spec. Optional positional arguments are names of tobeexcluded settings. Keyword arguments are option specification replacements. (See the html4strict writer for an example.) | def filter_settings_spec(settings_spec, *exclude, **replace):
settings = list(settings_spec)
# every third item is a sequence of option tuples
for i in range(2, len(settings), 3):
newopts = []
for opt_spec in settings[i]:
# opt_spec is ("<help>", [<option strings>], {<keyword args>})
opt_name = [opt_string[2:].replace('-', '_')
for opt_string in opt_spec[1]
if opt_string.startswith('--')
][0]
if opt_name in exclude:
continue
if opt_name in replace.keys():
newopts.append(replace[opt_name])
else:
newopts.append(opt_spec)
settings[i] = tuple(newopts)
return tuple(settings) | [
"def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]",
"def get_settings_model(self):\n ignore = [\"layer\", \"layers_cladding\", \"cladding_offset\"]\n s = self.get_settings()\n [s.pop(i) for i in ignore]\n return s",
"def exclude_from_setting(setting_name, items):\n settings_manager.add_action(\n 'exclude_from_setting',\n setting_name=setting_name,\n items=items\n )",
"def avoid(self, excludes): \n if excludes is None or len(excludes) == 0: return self.shallow_copy(excludes=None)\n result = self.shallow_copy(excludes=excludes)\n if self.excludes is not None: result.excludes.extend(self.excludes)\n return result",
"def get_non_optional_params(cls):\n non_optional_params = {}\n for setting_name, description in options['settings'].items():\n if len(description) == 2:\n non_optional_params[\n setting_name] = cls._normalize_desc(description[0],\n description[1])\n return OrderedDict(non_optional_params)",
"def _kwargs_del(self, kwargs: Dict[str, Any],\r\n exclude: Union[str, Iterable[str]]) -> Dict[str, Any]:\r\n if isinstance(exclude, str):\r\n exclude = [exclude]\r\n \r\n return {k: kwargs[k] for k in kwargs.keys() if k not in exclude}",
"def filter_options(\n args, # type: EnvironmentConfig\n argv, # type: t.List[str]\n exclude, # type: t.List[str]\n require, # type: t.List[str]\n): # type: (...) -> t.Iterable[str]\n replace: list[tuple[str, int, t.Optional[t.Union[bool, str, list[str]]]]] = [\n ('--docker-no-pull', 0, False),\n ('--truncate', 1, str(args.truncate)),\n ('--color', 1, 'yes' if args.color else 'no'),\n ('--redact', 0, False),\n ('--no-redact', 0, not args.redact),\n ('--host-path', 1, args.host_path),\n ]\n\n if isinstance(args, TestConfig):\n replace.extend([\n ('--changed', 0, False),\n ('--tracked', 0, False),\n ('--untracked', 0, False),\n ('--ignore-committed', 0, False),\n ('--ignore-staged', 0, False),\n ('--ignore-unstaged', 0, False),\n ('--changed-from', 1, False),\n ('--changed-path', 1, False),\n ('--metadata', 1, args.metadata_path),\n ('--exclude', 1, exclude),\n ('--require', 1, require),\n ('--base-branch', 1, args.base_branch or get_ci_provider().get_base_branch()),\n ])\n\n pass_through_args: list[str] = []\n\n for arg in filter_args(argv, {option: count for option, count, replacement in replace}):\n if arg == '--' or pass_through_args:\n pass_through_args.append(arg)\n continue\n\n yield arg\n\n for option, _count, replacement in replace:\n if not replacement:\n continue\n\n if isinstance(replacement, bool):\n yield option\n elif isinstance(replacement, str):\n yield from [option, replacement]\n elif isinstance(replacement, list):\n for item in replacement:\n yield from [option, item]\n\n yield from args.delegate_args\n yield from pass_through_args",
"def get_other_options(cls, **options):\n return {option : value for option, value in options.items() if option not in cls.config_options}",
"def _sanitize_settings(settings: dict) -> dict:\n resolved_settings = {}\n for k, v in settings.items():\n # Replace with k.lower().removeprefix(\"mongodb_\") when python 3.8 support ends.\n key = _get_name(k[8:]) if k.lower().startswith(\"mongodb_\") else _get_name(k)\n resolved_settings[key] = v\n\n return resolved_settings",
"def test_from_settings_ignores_other_settings():\n expected = {'a': 1, 'b': 2}\n actual = from_settings({'DATABASE_A': 1, 'DATABASE_B': 2, 'OTHER': 3})\n assert actual == expected",
"def filter(self, other):\n\n self.canonify()\n other.canonify()\n\n rej = self.__class__()\n rej.optlist = self.optlist.difference(other.optlist)\n self.optlist.difference_update(rej.optlist)\n for x in self.optdict.copy():\n if x not in other.optdict:\n self.optdict.pop(x)\n rej.optdict[x] = None\n\n return rej",
"def set_exclude(self, exclude):\n self.exclude = exclude\n if exclude:\n log.info('Only considering tags without \"%s\"', exclude)\n return self",
"def get_secret_setting_names(settings: dict) -> Set[str]:\n return {\n key for key in settings.keys()\n if AUTOFIND_SECRET_SETTINGS.match(key)\n and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED\n } | {\n key for key, value in settings['SETTINGS_DEFAULTS'].items()\n if value == PLACEHOLDER_FOR_SECRET\n and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED\n }",
"def process_opt_ignores(self):\n for option in self.options.keys():\n if (option.startswith(\"ignore-\")\n and option[7:] in _COMPONENT_NAMES):\n values = self.options.pop(option)\n name = option.split(\"-\")[1]\n indices = [int(index) for index in values.split()]\n COMPARISON_SETTINGS[\"ignore_templates\"][name] = indices\n self.parent.reporter(\n \"Ignoring indices {0} of {1}\".format(indices, option[7:]))\n continue\n if option == \"ignore_missing\":\n value = self.options.pop(option)\n if value.lower() == \"true\":\n COMPARISON_SETTINGS[\"ignore_missing\"] = True\n self.parent.reporter(\"Ignoring positional header data\")",
"def _warn_about_ignored_settings(_settings_type, _discussion_style):\n # TODO: Leverage the logger instead of warnings\n warn_msg = f\"Because the discussion style is '{_discussion_style}' all {_settings_type}-specific fields \" \\\n \"provided will be ignored.\"\n warnings.warn(warn_msg, UserWarning)",
"def discard_settings(self):\n self._call(\"discardSettings\")",
"def test_cfg_exclude_component_dict(self):\n # create the top level externals file\n desc = self.setup_dict_config()\n # Test an excluded repo\n external = create_externals_description(desc, model_format='dict',\n exclude=['simp_tag',\n 'simp_opt'])\n self.assertIsInstance(external, ExternalsDescriptionDict)\n self.assertFalse('simp_tag' in external)\n self.assertTrue('simp_branch' in external)\n self.assertFalse('simp_opt' in external)\n self.assertTrue('mixed_req' in external)",
"def remove_simulation_kwargs(d: Dict[str, Any]) -> Dict[str, Any]:\n d = d.copy()\n d.pop(\"run\", None)\n d.pop(\"lazy_parallelism\", None)\n d.pop(\"overwrite\", None)\n d.pop(\"animate\", None)\n d.pop(\"wait_to_finish\", None)\n d.pop(\"cores\", None)\n d.pop(\"temp_dir\", None)\n d.pop(\"temp_file_str\", None)\n return d",
"def set_html_exclusions(self, exclusions):\n excl = []\n for (tags,attrs) in exclusions:\n if len(tags)==1 and tags[0]==\"\":\n tags = []\n if len(attrs)==1 and attrs[0]==\"\":\n attrs = []\n excl.append((tags, attrs))\n self.html_exclusions = excl"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call the validator function on applicable settings and evaluate the 'overrides' option. Extends `optparse.Option.process`. | def process(self, opt, value, values, parser):
result = optparse.Option.process(self, opt, value, values, parser)
setting = self.dest
if setting:
if self.validator:
value = getattr(values, setting)
try:
new_value = self.validator(setting, value, parser)
except Exception, error:
raise (optparse.OptionValueError(
'Error in option "%s":\n %s'
% (opt, ErrorString(error))),
None, sys.exc_info()[2])
setattr(values, setting, new_value)
if self.overrides:
setattr(values, self.overrides, None)
return result | [
"def ValidateOptions(self, opt, args):",
"def option_override(options):\n if not options.config_file:\n _logger.warning('config file {0} not found'.format(options.config_file))\n return\n\n config = configparser.RawConfigParser()\n config.read(options.config_file)\n\n section = 'system'\n if config.has_section(section):\n try_update(config, options, section, 'budget')\n try_update(config, options, section, 'sys_area')\n try_update(config, options, section, 'sys_power')\n try_update(config, options, section, 'sys_bw')\n try_update(config, options, section, 'thru_core')\n\n section = 'app'\n if config.has_section(section):\n try_update(config, options, section, 'workload')\n try_update(config, options, section, 'kernels')\n\n section = 'explore-variables'\n if config.has_section(section):\n try_update(config, options, section, 'f_parallel')\n try_update(config, options, section, 'asic_cov')\n try_update(config, options, section, 'asic_perf')\n try_update(config, options, section, 'asic_alloc')\n\n section = 'analysis'\n if config.has_section(section):\n try_update(config, options, section, 'series')\n try_update(config, options, section, 'action')\n try_update(config, options, section, 'fmt')\n try_update(config, options, section, 'nprocs')",
"def clean_and_validate_options(self):\n pass",
"def processCommandLineOptions(self, args):\n del args\n msg = (\"processCommandLineOptions() not implemented, Config must be \"\n \"subclassed.\")\n raise NotImplementedError(msg)",
"def _validate_overrides(cls, filled: Config, overrides: Dict[str, Any]):\n error_msg = \"Invalid override: config value doesn't exist\"\n errors = []\n for override_key in overrides.keys():\n if not cls._is_in_config(override_key, filled):\n errors.append({\"msg\": error_msg, \"loc\": [override_key]})\n if errors:\n raise ConfigValidationError(filled, errors)",
"def opt_validate (optparser):\n (options,args) = optparser.parse_args()\n if not options.fqfilename:\n optparser.print_help()\n sys.exit(1)\n if not options.species:\n optparser.print_help()\n sys.exit(1)\n if not options.dirOut:\n optparser.print_help()\n sys.exit(1)\n return options",
"def _general_argument_parser(self, args_group):\n parser_handler = {UIConsts.MANDATORY: self._parse_mandatory_arguments,\n UIConsts.BASIC_MODE: self._parse_basic_mode_arguments,\n UIConsts.REGEX_MODE: self._parse_regex_mode_arguments,\n UIConsts.CUSTOM_MODE: self._parse_custom_mode_arguments}\n while self.num_of_attempts > 0:\n is_valid = parser_handler[args_group]()\n if not is_valid:\n continue\n else:\n self.num_of_attempts = 3\n return True\n return False",
"def check_options(opts):\n\n sections = baseoptions.keys()\n for s in sections:\n defaults = dict(baseoptions[s])\n for i in defaults:\n if i not in opts:\n opts[i] = defaults[i]\n return opts",
"def test_override_of_mixed_set_of_options(self):\n config_file = \"%s/config_mixed_overrides_1.conf\" % self.test_data_path\n oconfig = ocelog.config.Config(config_file)\n self.assertEqual(oconfig.server.port, 7777) # override \"8888\" \n self.assertEqual(oconfig.server.host, \"localhost\") # default\n self.assertEqual(oconfig.message.default_facility, \"local3\") # override of \"user\"\n self.assertEqual(oconfig.message.default_priority, \"err\") # override of \"notice\"\n self.assertEqual(oconfig.syslog.enabled, False) # default\n self.assertEqual(oconfig.security.require_token, False) # default\n self.assertEqual(oconfig.security.shared_secret, \"fruitpunch\") # override of None",
"def parse_option(self, option, block_name, *values):\n if option == 'run':\n option = 'start_' + option\n\n key = option.split('_', 1)[0]\n self.paths[key] = set(common.extract_app_paths(values))",
"def completing_subcommand_option_util(self, option, words):\n # Example: Return True for: gh view 1 --pag\n if len(words) > 3:\n if option in words:\n return True\n return False",
"def __set_options(self, options):\n for option, value in options.iteritems():\n if option in ('slave_okay', 'slaveok'):\n self.__slave_okay = validate_boolean(option, value)\n elif option == 'read_preference':\n self.__read_pref = validate_read_preference(option, value)\n elif option == 'safe':\n self.__safe = validate_boolean(option, value)\n elif option in SAFE_OPTIONS:\n if option == 'journal':\n self.__set_safe_option('j', value)\n elif option == 'wtimeoutms':\n self.__set_safe_option('wtimeout', value)\n else:\n self.__set_safe_option(option, value)",
"def _do_option(self, line):\n if line.startswith('option verbosity'):\n self._verbosity = int(line[len('option verbosity '):])\n self._write('ok')\n else:\n self._write('unsupported')",
"def flags(self, **kw):\n for k, v in kw.iteritems():\n FLAGS.set_override(k, v)\n self._overridden_opts.append(k)",
"def update_override_settings(self, override_settings: dict) -> None:",
"def genome_options(parser, user_option, prebuilt):\n\n # Checks for custom built genomes using rna-seek build\n if user_option.endswith('.json'):\n # Check file is readable or accessible\n permissions(parser, user_option, os.R_OK)\n # Checks against vaild pre-built options\n # TODO: makes this more dynamic in the future to have it check against\n # a list of genomes (files) in config/genomes/*.json\n elif not user_option in prebuilt:\n # User did NOT provide a vaild choice\n parser.error(\"\"\"provided invalid choice, '{}', to --genome argument!\\n\n Choose from one of the following pre-built genome options: \\n\n \\t{}\\n\n or supply a custom reference genome JSON file generated from rna-seek build.\n \"\"\".format(user_option, prebuilt))\n\n return user_option",
"def filter_options(\n args, # type: EnvironmentConfig\n argv, # type: t.List[str]\n exclude, # type: t.List[str]\n require, # type: t.List[str]\n): # type: (...) -> t.Iterable[str]\n replace: list[tuple[str, int, t.Optional[t.Union[bool, str, list[str]]]]] = [\n ('--docker-no-pull', 0, False),\n ('--truncate', 1, str(args.truncate)),\n ('--color', 1, 'yes' if args.color else 'no'),\n ('--redact', 0, False),\n ('--no-redact', 0, not args.redact),\n ('--host-path', 1, args.host_path),\n ]\n\n if isinstance(args, TestConfig):\n replace.extend([\n ('--changed', 0, False),\n ('--tracked', 0, False),\n ('--untracked', 0, False),\n ('--ignore-committed', 0, False),\n ('--ignore-staged', 0, False),\n ('--ignore-unstaged', 0, False),\n ('--changed-from', 1, False),\n ('--changed-path', 1, False),\n ('--metadata', 1, args.metadata_path),\n ('--exclude', 1, exclude),\n ('--require', 1, require),\n ('--base-branch', 1, args.base_branch or get_ci_provider().get_base_branch()),\n ])\n\n pass_through_args: list[str] = []\n\n for arg in filter_args(argv, {option: count for option, count, replacement in replace}):\n if arg == '--' or pass_through_args:\n pass_through_args.append(arg)\n continue\n\n yield arg\n\n for option, _count, replacement in replace:\n if not replacement:\n continue\n\n if isinstance(replacement, bool):\n yield option\n elif isinstance(replacement, str):\n yield from [option, replacement]\n elif isinstance(replacement, list):\n for item in replacement:\n yield from [option, item]\n\n yield from args.delegate_args\n yield from pass_through_args",
"def run_validators(self, value):\n if isinstance(value, dict):\n to_validate = self._read_only_defaults()\n to_validate.update(value)\n else:\n to_validate = value\n super().run_validators(to_validate)",
"def setPosixCompliance(self, aFlag = 0):\n self.posixCompliance = aFlag\n self.needsParse = 1\n\n if self.posixCompliance:\n self.optionStartExpr = re.compile('(--|-)(?P<option>[A-Za-z0-9_-]+)(?P<arg>=.*)?')\n self.orderMixed = 0\n else:\n self.optionStartExpr = re.compile('(--|-|\\+)(?P<option>[A-Za-z0-9_-]+)(?P<arg>=.*)?')\n self.orderMixed = 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For each component, first populate from the `SettingsSpec.settings_spec` structure, then from the `SettingsSpec.settings_defaults` dictionary. After all components have been processed, check for and populate from each component's `SettingsSpec.settings_default_overrides` dictionary. | def populate_from_components(self, components):
for component in components:
if component is None:
continue
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
option = group.add_option(help=help_text, *option_strings,
**kwargs)
if kwargs.get('action') == 'append':
self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides) | [
"def propagate_defaults(self, requiredvars, config, defaultsection=None):\n for option, infodic in requiredvars.items():\n if 'section' in infodic:\n section = infodic['section']\n else:\n section = defaultsection\n\n default = infodic['default']\n\n if not config.has_section(section):\n config.add_section(section)\n\n if not config.has_option(section, option):\n config.set(section, option, default)",
"def update_defaults(self, default_configs: List[dict]) -> None:\n for c in default_configs:\n self.defaults = add_dicts(self.defaults, unpack(c))",
"def _init_default_properties(self):\n for property_name in type(self).default_properties:\n if self.properties.get(property_name) is None:\n self.properties[property_name] = type(self).default_properties[property_name]",
"def _process_default_values(data, specification, path, apply_defaults):\n\n for cur_key in specification:\n if (not _is_spec(cur_key)) and (cur_key not in data):\n default_value = specification[cur_key]\n default_value_from_spec = False\n if _is_spec(default_value):\n default_value = _instantiate_spec(default_value)\n if default_value.default is DEFAULT_NOT_SET:\n continue\n elif default_value.default is REQUIRED_VALUE:\n raise MakefileError(\"A value MUST be supplified for %r\"\n % (_path_to_str(path + (cur_key,))))\n default_value = default_value.default\n default_value_from_spec = True\n\n if apply_defaults \\\n and not isinstance(default_value, WithoutDefaults):\n if isinstance(default_value, dict):\n # Setting of values in the dict will be accomplished\n # in subsequent calls to _process_default_values\n default_value = {}\n elif isinstance(default_value, list):\n # Lists of specs defaults to empty lists\n if not default_value_from_spec:\n default_value = []\n\n # Prevent clobbering of values when re-using sub-specs\n data[cur_key] = copy.deepcopy(default_value)",
"def set_all_defaults(self):\n for key, param in self.parameters.items():\n valdict = self.param_to_valdict(param)\n self.set_defaults(param, valdict)",
"def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]",
"def update_defaults(self):\r\n # setting names\r\n settings_names = (\"CMDSET_CHARACTER\", \"CMDSET_PLAYER\",\r\n \"BASE_PLAYER_TYPECLASS\", \"BASE_OBJECT_TYPECLASS\",\r\n \"BASE_CHARACTER_TYPECLASS\", \"BASE_ROOM_TYPECLASS\",\r\n \"BASE_EXIT_TYPECLASS\", \"BASE_SCRIPT_TYPECLASS\",\r\n \"BASE_CHANNEL_TYPECLASS\")\r\n # get previous and current settings so they can be compared\r\n settings_compare = zip([ServerConfig.objects.conf(name) for name in settings_names],\r\n [settings.__getattr__(name) for name in settings_names])\r\n mismatches = [i for i, tup in enumerate(settings_compare) if tup[0] and tup[1] and tup[0] != tup[1]]\r\n if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()\r\n # we have a changed default. Import relevant objects and\r\n # run the update\r\n from src.objects.models import ObjectDB\r\n from src.comms.models import ChannelDB\r\n #from src.players.models import PlayerDB\r\n for i, prev, curr in ((i, tup[0], tup[1]) for i, tup in enumerate(settings_compare) if i in mismatches):\r\n # update the database\r\n print \" %s:\\n '%s' changed to '%s'. Updating unchanged entries in database ...\" % (settings_names[i], prev, curr)\r\n if i == 0:\r\n [obj.__setattr__(\"cmdset_storage\", curr) for obj in ObjectDB.objects.filter(db_cmdset_storage__exact=prev)]\r\n if i == 1:\r\n [ply.__setattr__(\"cmdset_storage\", curr) for ply in PlayerDB.objects.filter(db_cmdset_storage__exact=prev)]\r\n if i == 2:\r\n [ply.__setattr__(\"typeclass_path\", curr) for ply in PlayerDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i in (3, 4, 5, 6):\r\n [obj.__setattr__(\"typeclass_path\", curr) for obj in ObjectDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i == 7:\r\n [scr.__setattr__(\"typeclass_path\", curr) for scr in ScriptDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i == 8:\r\n [scr.__setattr__(\"typeclass_path\", curr) for scr in ChannelDB.objects.filter(db_typeclass_path__exact=prev)]\r\n # store the new default and clean caches\r\n ServerConfig.objects.conf(settings_names[i], curr)\r\n ObjectDB.flush_instance_cache()\r\n PlayerDB.flush_instance_cache()\r\n ScriptDB.flush_instance_cache()\r\n ChannelDB.flush_instance_cache()\r\n # if this is the first start we might not have a \"previous\"\r\n # setup saved. Store it now.\r\n [ServerConfig.objects.conf(settings_names[i], tup[1])\r\n for i, tup in enumerate(settings_compare) if not tup[0]]",
"def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }",
"def config_defaults(self):\n return {\n \"ingredients\": [data_ingredient, builder_ingredient],\n \"run_config\": copy(cd.run_config),\n \"loader_config\": copy(cd.loader_config),\n \"builder_config\": copy(cd.builder_config),\n \"tb_config\": copy(cd.tb_config),\n \"lr_config\": copy(cd.lr_config),\n }",
"def dummy_config() -> ml_collections.ConfigDict:\n global_dict = {}\n for agent in get_implemented_agents():\n paper_agent = get_paper_agent(agent)\n global_dict.update(dataclasses.asdict(paper_agent.default))\n return ml_collections.ConfigDict(global_dict, type_safe=False)",
"def _resolveEnvironments(self):\n configuration = self._raw['environments']\n\n default = configuration.get(DEFAULT, {})\n result = {}\n for name, data in configuration.items():\n if name == DEFAULT:\n continue\n new_data = default.copy()\n if isinstance(data, list):\n new_data['slaves'] = data\n else:\n new_data.update(data)\n result[name] = new_data\n\n return result",
"def init_params(self, overrides: Dict[str, List[dict]] = {}) -> None:\n # TODO: Change overrides value type to a named tuple or something else\n # more appropriate than a free-form dict.\n for name, param in self._free_params.items():\n store = None\n for o in overrides.get(param.fqn, []):\n if path_matches_spec(self._fragment_path, o[\"path\"]):\n store = o[\"store\"]\n if not store:\n identity = (param.fqn, self._stringize_path())\n value = param.eval_default(self._get_dataset_or_set_default)\n store = param.make_store(identity, value)\n\n for handle in self._get_all_handles_for_param(name):\n handle.set_store(store)\n\n for s in self._subfragments:\n s.init_params(overrides)",
"def check_options(opts):\n\n sections = baseoptions.keys()\n for s in sections:\n defaults = dict(baseoptions[s])\n for i in defaults:\n if i not in opts:\n opts[i] = defaults[i]\n return opts",
"def load_defaults(defaults_file: list = []):\n cfg = Config(\"configs/default.yaml\")\n # cfg = cfg.update_config(Config(\"configs/dataset.yaml\"))\n for file in defaults_file:\n print(file)\n cfg = deep_update(cfg, Config(file))\n \n cfg = Opts(cfg).parse_args()\n \n cfg = load_enviroment_path(cfg)\n return cfg",
"def test_read_config_found_defaults_in_sections(self):\n for k, v in self.config.items():\n for key in self.config_defaults.keys():\n self.assertTrue(key in v.keys())",
"def _merge_configurations(self):\n m = dict()\n m.update(self._default)\n m.update(self._repo)\n m.update(self._user)\n return m",
"def complete_dflt_vals(cfg):\n dflt = cfg['default_params'] # all default params\n for key, entries in cfg.items():\n if key not in _dict_fields:\n continue\n\n logger.debug(\"check for %s defaults\", key)\n dflts = dflt.get(key, {}) # default params for given section\n\n # if not dflts:\n # continue\n logger.info(\"set defaults for %s\", key)\n if dflts:\n logger.debug(\"defaults %s\", dflts)\n\n for name, entry in sorted(entries.items()):\n logger.debug(\"%s:%s\", key, name)\n\n if 'name' not in entry: # set name field if missing\n logger.debug(\"NAME = %r\", name)\n entry['name'] = name\n\n for dkey, dval in dflts.items():\n if dkey not in entry:\n entry[dkey] = dval\n logger.debug(\"%r = %r\", dkey, dval)",
"def __parseAllHelper( self, parsed ):\n parsedDict = vars(parsed)\n for name, obj in vars(self).iteritems():\n if isinstance( obj, ConfigHelper ):\n for var in obj.getOptions():\n key = \"%s.%s\" %( name,var )\n if key in parsedDict:\n try:\n obj.setOption( var, parsedDict[key] )\n except RuntimeError as e:\n self._errorMessages.append( \"ERROR: %s \" % e )",
"def _load_settings(self):\n with open(DEFAULT_PATH, 'rb') as file_:\n default_settings = yaml.load(file_)\n LOG.info('Loaded defaults: %s', default_settings)\n\n user_settings = {}\n if os.path.isfile(USERSETTINGS_PATH) and os.access(USERSETTINGS_PATH, os.R_OK):\n try:\n with open(USERSETTINGS_PATH, 'rb') as file_:\n user_settings = yaml.load(file_)\n LOG.info('Loaded user settings %s from path %s', user_settings,\n USERSETTINGS_PATH)\n except Exception:\n LOG.exception('Exception during loading of user settings')\n # FIXME check user_settings keys\n else:\n LOG.info('No user settings found, file %s does not exist or is not readable',\n USERSETTINGS_PATH)\n\n self.__class__.settings = ChainMap(user_settings, default_settings)\n self.__class__.settings_names = list(self.settings.keys())",
"def get_component_definitions(\n pipeline_config: Dict[str, Any], overwrite_with_env_variables: bool = True\n) -> Dict[str, Dict[str, Any]]:\n component_definitions = {} # definitions of each component from the YAML.\n\n for raw_component_definition in pipeline_config[\"components\"]:\n name = raw_component_definition[\"name\"]\n # We perform a shallow copy here because of https://github.com/deepset-ai/haystack/issues/2568\n component_definition = {key: copy(value) for key, value in raw_component_definition.items() if key != \"name\"}\n component_definitions[name] = component_definition\n\n if overwrite_with_env_variables:\n for key, value in os.environ.items():\n env_prefix = f\"{name}_params_\".upper()\n if key.startswith(env_prefix):\n param_name = key.replace(env_prefix, \"\").lower()\n if \"params\" not in component_definition:\n component_definition[\"params\"] = {}\n component_definition[\"params\"][param_name] = value\n logger.info(\n \"Param '%s' of component '%s' overwritten with environment variable '%s' value '%s'.\",\n param_name,\n name,\n key,\n \"***\",\n )\n return component_definitions"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of config files, from environment or standard. | def get_standard_config_files(self):
try:
config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
except KeyError:
config_files = self.standard_config_files
# If 'HOME' is not set, expandvars() requires the 'pwd' module which is
# not available under certain environments, for example, within
# mod_python. The publisher ends up in here, and we need to publish
# from within mod_python. Therefore we need to avoid expanding when we
# are in those environments.
expand = os.path.expanduser
if 'HOME' not in os.environ:
try:
import pwd
except ImportError:
expand = lambda x: x
return [expand(f) for f in config_files if f.strip()] | [
"def get_config_files(self):\n if package.backend.FORMAT == \"rpm\":\n return [\"sysconfig/clamd.amavisd\", \"tmpfiles.d/clamd.amavisd.conf\"]\n return []",
"def in_cwd():\n configs = []\n\n for filename in os.listdir(os.getcwd()):\n if filename.startswith('.tmuxp') and is_config_file(filename):\n configs.append(filename)\n\n return configs",
"def all_configs():\n\n path = os.path.expanduser(TESTCONFIG['audits']['config_dir'])\n config_names = []\n for glop in ['*conf']:\n config_names.extend(\n os.path.basename(x)\n for x in glob.iglob(os.path.join(path, glop)))\n return config_names",
"def _readStdConfigFiles(cls):\n\n # Default one first\n cls.readConfigFile(DEFAULT_CONFIG)\n\n # Site specific one can override properties defined in default\n cls.readConfigFile(USER_CONFIG)",
"def get_configs():\n with open(CONFIG_PATH) as f:\n return json.load(f)",
"def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/balanced_vae_study_v1/metric_configs/\"))",
"def global_resources_files(config):\n # type: (dict) -> list\n try:\n files = config['global_resources']['files']\n if util.is_none_or_empty(files):\n raise KeyError()\n except KeyError:\n files = []\n return files",
"def configFiles(component, required=True):\n # Get the config dir\n etc = getConfigDir()\n\n conf = []\n\n # First the default conf\n cfg = os.path.join(etc, \"{0}.conf\".format(component))\n # Is it readable?\n if os.access(cfg, os.R_OK):\n # Yes, we add it to config list\n conf.append(cfg)\n elif required:\n # Nope, and it's required so we raise an error\n raise ConfigError(\"Required config file for component '{0}' not \"\n \"found at {1}\".format(component, cfg))\n else:\n # The default component config was not found, so we do not even look for\n # a site local config.\n return conf\n\n # Check for a site local confi\n cfg = os.path.join(etc, \"{0}.site.conf\".format(component))\n # Is it readable?\n if os.access(cfg, os.R_OK):\n # Yes, we add it to config list\n conf.append(cfg)\n\n return conf",
"def locations(self, exists=True):\n result = []\n for config_files in self.config_paths:\n if not config_files:\n continue\n if os.path.isdir(config_files):\n config_files = [os.path.join(config_files, i)\n for i in sorted(os.listdir(config_files))\n if i.endswith('.conf')]\n else:\n config_files = [config_files]\n for config_file in config_files:\n if not exists or os.path.exists(config_file):\n config_file = os.path.abspath(config_file)\n if config_file in result:\n result.remove(config_file)\n result.append(config_file)\n return result",
"def in_dir(config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json', '.ini']):\n configs = []\n\n for filename in os.listdir(config_dir):\n if is_config_file(filename, extensions) and not filename.startswith('.'):\n configs.append(filename)\n\n return configs",
"def find_default_config_files() -> Iterator[Path]:\n yield from _yield_default_files()\n\n try:\n yield from _find_project_config()\n except OSError:\n pass\n\n try:\n yield from _find_config_in_home_or_environment()\n except OSError:\n pass\n\n try:\n if os.path.isfile(\"/etc/pylintrc\"):\n yield Path(\"/etc/pylintrc\").resolve()\n except OSError:\n pass",
"def get_configs() -> list:\n configs = sh.docker('config', 'ls', '--format', '{{ .Name }}')\n\n return configs.stdout.decode('utf8').splitlines()",
"def _read_config_files(self, *, base: str = '') -> None:\n self.__used_config_files = frozenset(self.read(os.path.join(base, f) for f in reversed(self.CONFIG_FILES)))",
"def get_conf_files_plus(config_file):\n l = get_conf_files()\n l.append(config_file)\n return l",
"def get_project_list(config):\r\n eggs_dir = config.get('eggs_dir', 'eggs')\r\n if os.path.exists(eggs_dir):\r\n projects = os.listdir(eggs_dir)\r\n else:\r\n projects = []\r\n try:\r\n projects += [x[0] for x in config.cp.items('settings')]\r\n except NoSectionError:\r\n pass\r\n return projects",
"def read_config_file():\n file_found = 0\n filename = URLNET_CFG\n search_path=os.environ['PATH']\n paths = ['.',]\n # allow for the possibility that there is no HOME env variable\n home = None\n try:\n home = os.environ['HOME']\n except Exception, e:\n pass\n # \n if home != None and len(home) > 0:\n paths.append(home)\n paths = paths + split(search_path, pathsep)\n \n for path in paths:\n if exists(join(path, filename)):\n file_found = 1\n break\n if file_found:\n path = abspath(join(path, filename))\n try:\n fd = open(path)\n lines = fd.readlines()\n fd.close()\n return lines\n except Exception, e:\n return None\n else:\n return None",
"def get_eval_config_files(self):\n return list(resources.get_files_in_folder(\"config/unsupervised_study_v1/metric_configs/\"))",
"def apps() -> List[str]:\n with Configuration() as config:\n return config.get_apps()",
"def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/correlated_factors_study_ws_id2/metric_configs/\"))",
"def _get_config_dirs():\n config_dirs = [\n USER_CONFIG_DIR,\n os.path.join(\"/\", \"etc\", \"rapport\"),\n os.path.abspath(os.path.join(\"rapport\", \"config\"))\n ]\n return config_dirs"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an option by its dest. If you're supplying a dest which is shared by several options, it is undefined which option of those is returned. A KeyError is raised if there is no option with the supplied dest. | def get_option_by_dest(self, dest):
for group in self.option_groups + [self]:
for option in group.option_list:
if option.dest == dest:
return option
raise KeyError('No option with dest == %r.' % dest) | [
"def get(self, opt, index=0):\n\t\ti = 0\n\t\tfor n, d in self.options:\n\t\t\tif n == opt:\n\t\t\t\tif i == index:\n\t\t\t\t\treturn d\n\t\t\t\ti += 1\n\t\treturn None",
"def get_option(cfg, base, opt):\n if cfg.has_option(base, opt):\n return cfg.get(base, opt)\n else:\n return None",
"def get_option(self, **attrs) -> Optional[Option]:\n return utils.get(self._options, **attrs)",
"def get_option_by_varname(self, varname):\n option = self.options.get(varname, None)\n if option is None:\n raise KeyError(\n \"No option with the variable name '{}' could \"\n \"be found\".format(varname)\n )\n return option",
"def get_option(self, opt_str):\n for opt in self._options.values():\n if opt_str in ('-' + opt.short, '--' + opt.long):\n return opt, False\n if opt_str == '--' + opt.inverse:\n return opt, True\n return None, None",
"def get(self, *args, **kargs):\n return self.get_option(*args, **kargs)",
"def get_plugin_option(self, plugin, key):\n if plugin in self.plugins:\n plugin = self.plugins[plugin]\n return plugin.get_option(key)",
"def get_source_for_option(self, section: str, option: str) -> Optional[str]:",
"def GetCommandOption(self, option, default = None):\n\n for opt, opt_arg in self.__command_options:\n if opt == option:\n return opt_arg\n return default",
"def option(self, name):\n return self._options.get(name)",
"def get_unique_option(self, option):\n for opt in self.options:\n if option == opt.__class__:\n return opt",
"def get(self, id):\n return self._opts.setdefault(id, [None])[0]",
"def getopt(settings, key, strict=False, copy=False):\n from . import defaults\n\n ns = namespace(settings)\n if ns.__contains__(key):\n return ns.__getitem__(key)\n\n # usually fallback to None\n args = [defaults, key, None]\n if strict:\n args.pop()\n default = getattr(*args)\n if copy:\n from copy import deepcopy\n default = deepcopy(default)\n\n return default",
"def get(option, fallback=None):\n from vjezd import device as this_device\n\n o = Config.query.filter(\n # only rules valid for this device\n or_(Config.device == this_device.id,\n Config.device == None),\n # AND meeting the option name\n Config.option == option,\n ).order_by(Config.device.desc(), Config.id.desc()).first()\n\n if o:\n logger.debug('Read option from db: {}'.format(o))\n if o.value == None:\n return fallback\n return o.value\n\n return fallback",
"def getOption(self,optionName):\n for opt in self.options:\n if opt[0]==optionName:\n return opt[1]\n return ''",
"def GetGlobalOption(self, option, default=None):\n\n for opt, opt_arg in self.__global_options:\n if opt == option:\n return opt_arg\n return default",
"def get(self,\n section,\n option):\n return self.__parser.get(section=section, option=option)",
"def _get_opt(corpus, opt, def_val):\n if \"clean\" in corpus[\"transforms\"]:\n value = corpus.get(opt, def_val)\n clean = value\n else:\n clean = None\n return clean",
"def get_option(self, option, keys=None):\n try:\n item = self._table.get_item(\n Key={\n self._store_key: self._store_name,\n self._option_key: option\n }\n )['Item']\n del item[self._store_key]\n del item[self._option_key]\n\n if keys:\n return {\n key: value\n for key, value in item.items()\n if key in keys\n }\n else:\n return {key: value for key, value in item.items()}\n\n except Exception:\n raise",
"def __get_config_option(self, o: str) -> Any:\n try:\n return self.config.get('FAKE_SECTION', o)\n except configparser.NoOptionError:\n return self.defaults[o]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transform '' to '_' so the cmdline form of option names can be used. | def optionxform(self, optionstr):
return optionstr.lower().replace('-', '_') | [
"def to_option(attr):\n return '--%s' % attr.lower().replace('_', '-')",
"def __set_opt(option):\n return \"--\" + option",
"def option_prefix(self, option):\n return \"--\"",
"def attr_to_arg(attr):\n return '--{}'.format(attr.replace('_', '-'))",
"def option_strings(self) -> List[str]:\n\n dashes: List[str] = [] # contains the leading dashes.\n options: List[str] = [] # contains the name following the dashes.\n\n dash = \"-\" if len(self.name) == 1 else \"--\"\n option = f\"{self.prefix}{self.name}\"\n\n if self.field.metadata.get(\"positional\"):\n # Can't be positional AND have flags at same time. Also, need dest to be be this and not just option.\n return [self.dest]\n\n dashes.append(dash)\n options.append(option)\n\n if dash == \"-\":\n # also add a double-dash option:\n dashes.append(\"--\")\n options.append(option)\n\n # add all the aliases that were passed to the `field` function.\n for alias in self.aliases:\n if alias.startswith(\"--\"):\n dash = \"--\"\n name = alias[2:]\n elif alias.startswith(\"-\"):\n dash = \"-\"\n name = alias[1:]\n else:\n dash = \"-\" if len(alias) == 1 else \"--\"\n name = alias\n option = f\"{self.prefix}{name}\"\n\n dashes.append(dash)\n options.append(option)\n\n # Additionally, add all name variants with the \"_\" replaced with \"-\".\n # For example, \"--no-cache\" will correctly set the `no_cache` attribute,\n # even if an alias isn't explicitly created.\n\n if FieldWrapper.add_dash_variants:\n additional_options = [\n option.replace(\"_\", \"-\") for option in options if \"_\" in option\n ]\n additional_dashes = [\n \"-\" if len(option) == 1 else \"--\" for option in additional_options\n ]\n options.extend(additional_options)\n dashes.extend(additional_dashes)\n\n if type(self).add_dest_to_option_strings:\n dashes.append(\"-\" if len(self.dest) == 1 else \"--\")\n options.append(self.dest)\n\n # remove duplicates by creating a set.\n option_strings = set(f\"{dash}{option}\" for dash, option in zip(dashes, options))\n # TODO: possibly sort the option strings, if argparse doesn't do it\n # already.\n return list(sorted(option_strings, key=len))",
"def fix_sys_argv_quotes(self, cmd):\n # handle fixing quotes\n # case 1: \"--val\", \" -nlev 276 \"\n # case 2: \"-val\" , \" -nlev 276 \"\n # case 3: CAM_CONFIG_OPTS=\" -nlev 276 \"\n for i, item in enumerate(cmd):\n if re.match(\"[-]{1,2}val\", item) is not None:\n if i + 1 >= len(cmd):\n continue\n\n # only quote if value contains spaces\n if \" \" in cmd[i + 1]:\n cmd[i + 1] = f'\"{cmd[i + 1]}\"'\n else:\n m = re.search(\"([^=]*)=(.*)\", item)\n\n if m is None:\n continue\n\n g = m.groups()\n\n # only quote if value contains spaces\n if \" \" in g[1]:\n cmd[i] = f'{g[0]}=\"{g[1]}\"'\n\n return cmd",
"def sanitize_options(options):\n sanitized_options = []\n for option in options:\n if isinstance(option, str):\n option = os.path.basename(option)\n sanitized_options.append(option)\n return sanitized_options",
"def build_cli_extra(optargs):\n\n def render(k, v):\n if not isinstance(k, str):\n raise TypeError(\n \"Option name isn't a string: {} ({})\".format(k, type(k)))\n if v is None:\n return k\n if is_collection_like(v):\n v = \" \".join(map(str, v))\n return \"{} {}\".format(k, v)\n\n try:\n data_iter = optargs.items()\n except AttributeError:\n data_iter = optargs\n\n return \" \".join(render(*kv) for kv in data_iter)",
"def convert_name(value: str) -> str:\n return \"--\" + value.replace(\"_\", \"-\")",
"def option_maker(self):\n pass",
"def get_attr_name (self, long_option):\r\n return string.translate(long_option, longopt_xlate)",
"def _createMenuPathName(self, name):\n # hide anything between brackets\n name = re.sub(\"\\(.*\\)\", \"\", name)\n # replace invalid chars\n name = name.replace(\" \", \"_\")\n if name and name[0] in \"0123456789_\":\n name = \"_\" + name\n name = re.sub(\"[^a-zA-z_0-9]\", \"\", name)\n return name.lower()",
"def format_options_name(operation):\n operation = operation.split('#')[-1]\n op_class, op_function = operation.split('.')\n op_class = operations_name(op_class)\n return f\"{op_class}_{op_function}_options\"",
"def _command_name_normalized(cls, command: str) -> str:\n return command.lower().replace(\"-\", \"_\")",
"def quote_names(self):\n return \"\"\"--quote-names\"\"\"",
"def InternalArgNameFrom(arg_external_name):\n return arg_external_name.replace('-', '_')",
"def ExternalArgNameFrom(arg_internal_name):\n return arg_internal_name.replace('_', '-')",
"def optSetStrNr(*args):\n return _optcc.optSetStrNr(*args)",
"def option_group_name(self) -> str:\n ...",
"def skip_opt(self):\n return \"\"\"--skip-opt\"\"\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a given section as a dictionary (empty if the section doesn't exist). | def get_section(self, section):
section_dict = {}
if self.has_section(section):
for option in self.options(section):
section_dict[option] = self.get(section, option)
return section_dict | [
"def get_section(self, section):\n output = {}\n for option in self.__config[section]:\n output[option] = self.__config[section][option]\n return output",
"def get_section(section):",
"def _extract_section(section_content):\n lines = section_content.split(\"\\n\")\n\n section_dict = OrderedDict()\n for line in lines:\n # drop the comment\n if \"!\" in line:\n st_comment = line.find(\"!\")\n line = line[:st_comment]\n\n exps = line.strip().split(\"=\")\n if len(exps) != 2:\n continue\n\n arg_name = exps[0].strip()\n arg_values = [v.strip() for v in exps[1].split(\",\") if v.strip()]\n\n section_dict[arg_name] = arg_values\n return section_dict",
"def _find_section_values(self, section: str) -> Optional[Dict]:\n\n def recurse(mapping: Dict, *, remaining_sections: List[str]) -> Optional[Dict]:\n if not remaining_sections:\n return None\n current_section = remaining_sections[0]\n if current_section not in mapping:\n return None\n section_values = mapping[current_section]\n if len(remaining_sections) > 1:\n return recurse(section_values, remaining_sections=remaining_sections[1:])\n if not self._section_explicitly_defined(section_values):\n return None\n return cast(Dict, section_values)\n\n return recurse(mapping=self.values, remaining_sections=section.split(\".\"))",
"def add_section(self, section):\n if not self.has_section(section):\n self[section] = dict()",
"def __call__(self, section_key):\r\n section_key = self.section_key(section_key)\r\n return self.sections.get(section_key, None)",
"def get_strings_section(config, section):\n options = config.options(section)\n section_dict = {} \n for option in options:\n section_dict[option] = config.get(section, option)\n return section_dict",
"def _confGetSection(conf, section):\n\ttry:\n\t\toptions = {}\n\t\tfor i in conf.items(section):\n\t\t\toptions[i[0]] = i[1]\n\t\treturn options\n\texcept ConfigParser.Error:\n\t\treturn None # ignore missing values",
"def ProcessConfigSection(\n filename: str, section_name: str = None\n) -> Dict[str, str]:\n\n # TODO(b/286571605): Replace typing when python 3.5 is unsupported.\n dictionary = {} # type: Dict[str, str]\n if not os.path.exists(filename):\n return dictionary\n with open(filename) as rcfile:\n in_section = not section_name\n for line in rcfile:\n if line.lstrip().startswith('[') and line.rstrip().endswith(']'):\n next_section = line.strip()[1:-1]\n in_section = section_name == next_section\n continue\n elif not in_section:\n continue\n elif line.lstrip().startswith('#') or not line.strip():\n continue\n flag, equalsign, value = line.partition('=')\n # if no value given, assume stringified boolean true\n if not equalsign:\n value = 'true'\n flag = flag.strip()\n value = value.strip()\n while flag.startswith('-'):\n flag = flag[1:]\n dictionary[flag] = value\n return dictionary",
"def section_to_config(section):\n new_conf = ConfigObj()\n for (key, value) in section.items():\n new_conf[key] = value\n return new_conf",
"def getSection(self,index):\n addr = HopperLowLevel.getSectionAddress(self.__internal_segment_addr__, index)\n if addr == 0:\n return None\n return Section(addr)",
"def get_numbers_section(config, section):\n options = config.options(section)\n section_dict = {} \n for option in options:\n if option in [\"tc\", \"ntr\", \"depth\"]:\n section_dict[option] = config.getint(section, option)\n else:\n try:\n section_dict[option] = config.getfloat(section, option)\n except ValueError:\n opt_list = config.get(section, option).split(',')\n section_dict[option] = np.array([\n float(opt) for opt in opt_list])\n return section_dict",
"def get_section(cfg, section):\n section_lines = []\n is_append_section = False\n\n for line in cfg.splitlines():\n line = line.strip()\n\n if line.startswith('section') and not is_append_section:\n cfg_section = line.split('=', 1)[1].strip()\n if cfg_section == section:\n is_append_section = True\n elif line.startswith('section') and is_append_section:\n break # skip any subsequent sections\n\n if is_append_section:\n section_lines.append(line)\n\n return section_lines",
"def __getitem__(self, section_name):\n return self._toml[section_name]",
"def getSection(self):\n return self.getSegment().getSectionAtAddress(self.getEntryPoint())",
"def _make_section(self, section_header):\n name = self._get_section_name(section_header)\n sectype = section_header['sh_type']\n\n if sectype == 'SHT_STRTAB':\n return StringTableSection(section_header, name, self)\n elif sectype == 'SHT_NULL':\n return NullSection(section_header, name, self)\n elif sectype in ('SHT_SYMTAB', 'SHT_DYNSYM', 'SHT_SUNW_LDYNSYM'):\n return self._make_symbol_table_section(section_header, name)\n elif sectype == 'SHT_SUNW_syminfo':\n return self._make_sunwsyminfo_table_section(section_header, name)\n elif sectype == 'SHT_GNU_verneed':\n return self._make_gnu_verneed_section(section_header, name)\n elif sectype == 'SHT_GNU_verdef':\n return self._make_gnu_verdef_section(section_header, name)\n elif sectype == 'SHT_GNU_versym':\n return self._make_gnu_versym_section(section_header, name)\n elif sectype in ('SHT_REL', 'SHT_RELA'):\n return RelocationSection(section_header, name, self)\n elif sectype == 'SHT_DYNAMIC':\n return DynamicSection(section_header, name, self)\n elif sectype == 'SHT_NOTE':\n return NoteSection(section_header, name, self)\n elif sectype == 'SHT_PROGBITS' and name == '.stab':\n return StabSection(section_header, name, self)\n elif sectype == 'SHT_ARM_ATTRIBUTES':\n return ARMAttributesSection(section_header, name, self)\n else:\n return Section(section_header, name, self)",
"def create_section(section: str):\n if section == \"A\":\n return A\n elif section == \"B\":\n return B\n elif section == \"C\":\n return C\n elif section == \"1\":\n return [A[0], B[0], C[0]]\n elif section == \"2\":\n return [A[1], B[1], C[1]]\n elif section == \"3\":\n return [A[2], B[2], C[2]]\n elif section == '/':\n return [A[2], B[1], C[0]]\n elif section == \"\\\\\":\n return [A[0], B[1], C[2]]\n return None",
"def _parse_section(self, section):\n if '/' in section:\n return section.split('/')\n else:\n return ['main', section]",
"def get_section(section_index):\n return lp_start_end_data[section_index]",
"def _section(self, k):\r\n if k not in self._sections:\r\n self._sections[k] = ConfigSection(self, k)\r\n return self._sections[k]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test, whether the encoding of `stream` matches `encoding`. Returns | def check_encoding(stream, encoding):
try:
return codecs.lookup(stream.encoding) == codecs.lookup(encoding)
except (LookupError, AttributeError, TypeError):
return None | [
"def test_encoding_detection():\n \n url = 'http://lavr.github.io/python-emails/tests/requests/some-utf8-text.html'\n expected_content = u'我需要单间。' # Chinese is for example only. Any other non-european encodings broken too.\n\n r =\trequests.get(url)\n\n # Response.apparent_encoding is good\n assert r.apparent_encoding == 'utf-8'\n real_text = unicode(r.content, r.apparent_encoding)\n assert expected_content in real_text\n\n # but Response.text is broken\n # (the reason is: commit a0ae2e6)\n assert expected_content in r.text",
"def detect_encoding(readline):\n try:\n filename = readline.__self__.name\n except AttributeError:\n filename = None\n bom_found = False\n encoding = None\n default = 'ascii'\n\n def read_or_stop():\n try:\n return readline()\n except StopIteration:\n return b''\n\n def find_cookie(line):\n try:\n # Decode as ASCII, which is Python 2 default\n line_string = line.decode('ascii')\n except UnicodeDecodeError:\n msg = \"invalid or missing encoding declaration\"\n if filename is not None:\n msg = '{} for {!r}'.format(msg, filename)\n raise SyntaxError(msg)\n\n match = cookie_re.match(line_string)\n if not match:\n return None\n encoding = _get_normal_name(match.group(1))\n try:\n lookup(encoding)\n except LookupError:\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = \"unknown encoding: \" + encoding\n else:\n msg = \"unknown encoding for {!r}: {}\".format(filename,\n encoding)\n raise SyntaxError(msg)\n\n if bom_found:\n if encoding != 'utf-8':\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = 'encoding problem: utf-8'\n else:\n msg = 'encoding problem for {!r}: utf-8'\n msg = msg.format(filename)\n raise SyntaxError(msg)\n encoding += '-sig'\n return encoding\n\n first = read_or_stop()\n if first.startswith(BOM_UTF8):\n bom_found = True\n first = first[3:]\n default = 'utf-8-sig'\n if not first:\n return default, []\n\n encoding = find_cookie(first)\n if encoding:\n return encoding, [first]\n if not blank_re.match(first):\n return default, [first]\n\n second = read_or_stop()\n if not second:\n return default, [first]\n\n encoding = find_cookie(second)\n if encoding:\n return encoding, [first, second]\n\n return default, [first, second]",
"def detect_encoding(file_path: str) -> str:\n detector = UniversalDetector()\n\n with open(file_path, 'rb') as file:\n for line in file.readlines():\n detector.feed(line)\n if detector.done:\n break\n\n detector.close()\n\n encoding = detector.result['encoding']\n logger.debug(f'Detected encoding for file \"{file_path}\": {encoding}')\n\n return encoding",
"def is_known_charset(charset):\n try:\n codecs.lookup(charset)\n except LookupError:\n return False\n return True",
"def sniff_encoding(fh):\n sniff = sniff_file(fh)\n\n # WoS files typically include a BOM, which we want to strip from the actual\n # data. The encodings 'utf-8-sig' and 'utf-16' do this for UTF-8 and UTF-16\n # respectively. When dealing with files with BOM, avoid the encodings\n # 'utf-8' (which is fine for non-BOM UTF-8), 'utf-16-le', and 'utf-16-be'.\n # See e.g. http://stackoverflow.com/a/8827604\n encodings = {codecs.BOM_UTF16: 'utf-16',\n codecs.BOM_UTF8: 'utf-8-sig'}\n for bom, encoding in encodings.items():\n if sniff.startswith(bom):\n return encoding\n # WoS export files are either UTF-8 or UTF-16\n return 'utf-8'",
"def detect_file_encoding(self):\n\t\twith open(self.wq, 'r') as filehandle: # read in the file data\n\t\t\tfile_data = filehandle.read()\n\t\t\tself.detected_encoding = chardet.detect(file_data)['encoding']\n\n\t\tif self.detected_encoding == \"UTF-16\":\n\t\t\tself.detected_encoding = \"utf_16_le\" # we'll use this encoding in this case - if it detects UTF-16 off the YSI\n\t\t\t\t\t\t\t\t\t\t\t\t\t# then it's probably UCS-2 LE BOM, AKA UTF-16 LE BOM (sort of)",
"def detect_encoding(readline):\r\n bom_found = False\r\n encoding = None\r\n default = 'utf-8'\r\n def read_or_stop():\r\n try:\r\n return readline()\r\n except StopIteration:\r\n return bytes()\r\n\r\n def find_cookie(line):\r\n try:\r\n line_string = line.decode('ascii')\r\n except UnicodeDecodeError:\r\n return None\r\n\r\n matches = cookie_re.findall(line_string)\r\n if not matches:\r\n return None\r\n encoding = _get_normal_name(matches[0])\r\n try:\r\n codec = lookup(encoding)\r\n except LookupError:\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError(\"unknown encoding: \" + encoding)\r\n\r\n if bom_found:\r\n if codec.name != 'utf-8':\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError('encoding problem: utf-8')\r\n encoding += '-sig'\r\n return encoding\r\n\r\n first = read_or_stop()\r\n if first.startswith(BOM_UTF8):\r\n bom_found = True\r\n first = first[3:]\r\n default = 'utf-8-sig'\r\n if not first:\r\n return default, []\r\n\r\n encoding = find_cookie(first)\r\n if encoding:\r\n return encoding, [first]\r\n\r\n second = read_or_stop()\r\n if not second:\r\n return default, [first]\r\n\r\n encoding = find_cookie(second)\r\n if encoding:\r\n return encoding, [first, second]\r\n\r\n return default, [first, second]",
"def test_encoding_win(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'bad_codec.csv'))\n self.assertEqual(guessed_encoding.lower(), u'windows-1252')",
"def hasEncoding(dbname = None):\n\n # use default db if none passed\n if dbname is None:\n dbname = _defaultDB\n\n # get the db definition\n if dbname not in _databases:\n raise DBRuntimeException(\"Unknown database [%s]\" % dbname)\n dbDef = _databases[dbname]\n\n # get the loosetypes attribute (default to False)\n return dbDef[\"def\"].get(\"encoding\")",
"def check(video):\n\n\tif video.codec != TARGET_CODEC:\n\t\treturn True\n\n\tif video.metadata.get('encoder', None) in TODO_ENCODERS:\n\t\treturn True\n\n\treturn False",
"def decode(self):\n ce = self.headers.get_first(\"content-encoding\")\n if not self.body or ce not in encoding.ENCODINGS:\n return False\n data = encoding.decode(ce, self.body)\n if data is None:\n return False\n self.body = data\n del self.headers[\"content-encoding\"]\n return True",
"def is_valid_utf8(o):\n try:\n o.decode(\"utf-8\")\n except (UnicodeDecodeError, AttributeError):\n return False\n else:\n return True",
"def get_file_encoding(content):\r\n encoding = None\r\n try:\r\n lines_to_check = content.split(\"\\n\", 2)\r\n for index in range(2):\r\n if len(lines_to_check) > index:\r\n line_encoding = _search_coding_line(lines_to_check[index])\r\n if line_encoding:\r\n encoding = line_encoding\r\n break\r\n except UnicodeDecodeError as error:\r\n #add logger\r\n print(error)\r\n #if not encoding is set then use UTF-8 as default\r\n if encoding is None:\r\n encoding = \"UTF-8\"\r\n return encoding",
"def testInvalidContentEncoding(self):\n r = Request.blank(\"/\").get_response(filters.decode_filter(invalid_content_encoding_server))\n self.assert_(\"lying about it's encoding\" in r.text, r.body)",
"def is_exact_taint(stream) -> bool:\n # The fuzzer has to get 8 characters right. This may be a bit much,\n # however, when found it shows a high level of control over the data.\n if stream == 'FROMFUZZ':\n return True\n\n return False",
"def test_encoding_amazon_de_reviews_is_utf8(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon_de_reviews_200.csv'))\n self.assertEqual(guessed_encoding.lower(), u'utf-8')",
"def check_file_encodings(self, file: str = None, encoding: str = None, cycle_bit: int = 0) -> str: \n if not file:\n file = self.filename\n if not encoding:\n encoding = self.encoding \n \n if cycle_bit > 0:\n common_encodings = [\"UTF-8\",\"Latin-1\", \"UTF-16\", \"ascii\", \"cp037\", \"cp437\", \"UTF-32\"]\n return common_encodings[cycle_bit]\n \n else:\n try:\n with open(file, encoding = encoding) as f:\n f.seek(10000,0)\n f.readline()\n f.close()\n return encoding\n except:\n common_encodings = [\"Latin-1\", \"UTF-16\", \"ascii\", \"cp037\", \"cp437\", \"UTF-32\"]\n \n for codec in common_encodings:\n try:\n with open(file, encoding = codec) as f:\n f.readline()\n f.close()\n return codec\n except:\n continue\n print(\"Your file is an unusual type - can you specify the encoding for us?\")",
"def verifyContentType(self, desiredType, desiredCharset=\"\"):\n content_type = self.headers.get('Content-Type')\n \n if content_type == None:\n return False\n \n parts = content_type.split(\";\")\n mediatype = parts[0]\n charset = parts[1] if len(parts) > 1 else \"\"\n \n isDesiredMediaType = True if mediatype.lower() == desiredType.lower() else False\n\n if len(desiredCharset):\n isDesiredCharset = True if desiredCharset.lower() in charset.lower() else False\n else:\n isDesiredCharset = True\n \n return isDesiredMediaType and isDesiredCharset",
"def charset_exists(charset):\r\n import codecs\r\n try:\r\n codecs.lookup(charset)\r\n except LookupError:\r\n return False\r\n return True",
"def test_encoding_ascii(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon-sample-1000.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decode a string, `data`, heuristically. Raise UnicodeError if unsuccessful. The client application should call ``locale.setlocale`` at the | def decode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, unicode):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8', 'latin-1']
if locale_encoding:
encodings.insert(1, locale_encoding)
for enc in encodings:
try:
decoded = unicode(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace(u'\ufeff', u'')
except (UnicodeError, LookupError), err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error))) | [
"def smart_decode(data, charset):\n try:\n if isinstance(data, str):\n # It's already unicode so just return it\n return data\n else:\n return data.decode(charset, errors='strict')\n\n except UnicodeDecodeError: # PY3\n # Looks like the charset lies, try to detect it\n return guess_encoding_and_decode(data, claimed=charset)\n\n except LookupError:\n # They gave us a crap encoding\n return guess_encoding_and_decode(data, claimed=charset)",
"def decoder(data):\n\n def next_byte(_it, start, count):\n try:\n return next(_it)[1]\n except StopIteration:\n raise UnicodeDecodeError(\n NAME, data, start, start + count, \"incomplete byte sequence\"\n )\n\n it = iter(enumerate(data))\n for i, d in it:\n if d == 0x00: # 00000000\n raise UnicodeDecodeError(\n NAME, data, i, i + 1, \"embedded zero-byte not allowed\"\n )\n\n if d & 0x80: # 1xxxxxxx\n if d & 0x40: # 11xxxxxx\n if d & 0x20: # 111xxxxx\n if d & 0x10: # 1111xxxx\n raise UnicodeDecodeError(\n NAME, data, i, i + 1, \"invalid encoding character\"\n )\n\n if d == 0xED:\n value = 0\n for i1, dm in enumerate(DECODE_MAP[6]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 1110xxxx\n value = d & 0x0F\n for i1, dm in enumerate(DECODE_MAP[3]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 110xxxxx\n value = d & 0x1F\n for i1, dm in enumerate(DECODE_MAP[2]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 10xxxxxx\n raise UnicodeDecodeError(\n NAME, data, i, i + 1, \"misplaced continuation character\"\n )\n else: # 0xxxxxxx\n value = d\n # noinspection PyCompatibility\n yield mutf8_unichr(value)",
"def decode(self, data): # pragma: no cover\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')",
"def decode(self, s):\n\n if isinstance(s, unicode):\n return s\n for (name, decoder) in self.decoders:\n try:\n return decoder(s)[0]\n except ValueError:\n logger.verbose(\"Encoding '%s' failed for string %r\" % (name, s))\n\n if self.fallback_decoder is not None:\n (name, decoder) = self.fallback_decoder\n return decoder(s, 'replace')[0]\n else:\n raise UnicodeError()",
"def decode_utf8(self, text):\n try:\n return text.decode('utf-8', 'strict') if self.utf8 else text.decode(self.fallback, errors='replace')\n except UnicodeDecodeError:\n return text.decode(self.fallback, 'replace')",
"def urlDecode(self, data):\n # type: (Union[str, bytearray]) -> Union[str, bytearray]",
"def decode(cls, data):\n raise NotImplementedError()",
"def test_unicode_decode_errors(self):\n self.assertEqual(decode.decode('Why, %c', b'\\x01', True),\n 'Why, ' + error('%c ERROR', -1))\n\n self.assertEqual(\n decode.decode('%sXY%+ldxy%u', b'\\x83N\\x80!\\x01\\x02', True),\n '{}XY{}xy{}'.format(error('%s ERROR', \"'N\\\\x80!'\"),\n error('%+ld SKIPPED', -1),\n error('%u SKIPPED', 1)))\n\n self.assertEqual(\n decode.decode('%s%lld%9u', b'\\x82$\\x80\\x80', True),\n '{0}{1}{2}'.format(error(\"%s ERROR ('$\\\\x80')\"),\n error('%lld SKIPPED'), error('%9u SKIPPED')))\n\n self.assertEqual(decode.decode('%c', b'\\xff\\xff\\xff\\xff\\x0f', True),\n error('%c ERROR', -2147483648))",
"def decode(data):\n obj , data = _decode(data)\n if data and len(data) == 0:\n return obj",
"def decode(self, data):\n\t\traise NotImplementedError()",
"def decode_string(s):\n try:\n return s.decode('utf-8')\n except (UnicodeDecodeError, AttributeError):\n return s",
"def decode(self, text):\n # only decode byte strings into unicode if it hasn't already\n # been done by a subclass\n if isinstance(text, six.text_type):\n return text\n\n # empty text? nothing to decode\n if not text:\n return u''\n\n # use chardet to automatically detect the encoding text\n result = chardet.detect(text)\n return text.decode(result['encoding'])",
"def decode(data):\n try:\n return _decode(data)\n except VecBufEOB:\n raise DecoderError('Incomplete encoded data')",
"def decode(self, line):\n\n # loc = locale.getdefaultlocale()[1]\n\n try:\n line = line.decode(\"utf-8\")\n except Exception:\n pass\n return line",
"def decode_modified_utf8(data, errors=\"strict\"):\n value, length = \"\", 0\n it = iter(decoder(byte_to_int(d) for d in data))\n while True:\n try:\n value += next(it)\n length += 1\n except StopIteration:\n break\n except UnicodeDecodeError as e:\n if errors == \"strict\":\n raise e\n\n if errors == \"ignore\":\n pass\n elif errors == \"replace\":\n value += \"\\uFFFD\"\n length += 1\n return value, length",
"def decode_unicode_string(string):\n if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):\n return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])\n return string",
"def decode_bytes(data: bytes, default_encoding: str = 'utf-8') -> str:\n encoding = default_encoding\n if HAS_CHARDET:\n detected = chardet.detect(data) or {}\n confidence = detected.get('confidence') or 0\n if confidence >= 0.5:\n encoding = detected['encoding']\n logger.debug(\n \"Data encoding detected as '{}' \"\n \"with a confidence of {}\".format(encoding, confidence))\n\n try:\n return data.decode(encoding)\n except UnicodeDecodeError:\n raise ActivityFailed(\n \"Failed to decode bytes using encoding '{}'\".format(encoding))",
"def decode(byte_data):\n if byte_data is None:\n return None\n return byte_data.decode()",
"def decode(val):\n if isinstance(val, str):\n # it was an already decoded unicode object\n return val\n else:\n # assume it is an encoded bytes object\n return val.decode('utf-8')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to determine the encoding of `data` by looking in `data`. Check for a byte order mark (BOM) or an encoding declaration. | def determine_encoding_from_data(self, data):
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None | [
"def get_data_encoding():",
"def strip_byte_order_mark(cls, data):\n encoding = None\n if isinstance(data, str):\n return (\n data, encoding)\n else:\n if len(data) >= 4:\n if data[:2] == b'\\xfe\\xff':\n if data[2:4] != '\\x00\\x00':\n encoding = 'utf-16be'\n data = data[2:]\n if len(data) >= 4:\n if data[:2] == b'\\xff\\xfe':\n if data[2:4] != '\\x00\\x00':\n encoding = 'utf-16le'\n data = data[2:]\n if data[:3] == b'\\xef\\xbb\\xbf':\n encoding = 'utf-8'\n data = data[3:]\n else:\n if data[:4] == b'\\x00\\x00\\xfe\\xff':\n encoding = 'utf-32be'\n data = data[4:]\n else:\n if data[:4] == b'\\xff\\xfe\\x00\\x00':\n encoding = 'utf-32le'\n data = data[4:]\n return (\n data, encoding)",
"def AutoDetectEncoding(self, srcFile):\n srcFile.seek(0)\n magic = srcFile.read(4)\n while len(magic) < 4:\n magic = magic + 'Q'\n if magic[:2] == '\\xff\\xfe' or magic[:2] == '\\xfe\\xff':\n if magic[2:] != '\\x00\\x00':\n magic = magic[:2]\n elif magic[:3] == '\\xef\\xbb\\xbf':\n magic = mage[:3]\n self.encoding, seekPos, self.bom = self.MagicTable.get(\n magic, ('utf-8', 0, False))\n srcFile.seek(seekPos)",
"def detect_file_encoding(self):\n\t\twith open(self.wq, 'r') as filehandle: # read in the file data\n\t\t\tfile_data = filehandle.read()\n\t\t\tself.detected_encoding = chardet.detect(file_data)['encoding']\n\n\t\tif self.detected_encoding == \"UTF-16\":\n\t\t\tself.detected_encoding = \"utf_16_le\" # we'll use this encoding in this case - if it detects UTF-16 off the YSI\n\t\t\t\t\t\t\t\t\t\t\t\t\t# then it's probably UCS-2 LE BOM, AKA UTF-16 LE BOM (sort of)",
"def detect_encoding(readline):\n try:\n filename = readline.__self__.name\n except AttributeError:\n filename = None\n bom_found = False\n encoding = None\n default = 'ascii'\n\n def read_or_stop():\n try:\n return readline()\n except StopIteration:\n return b''\n\n def find_cookie(line):\n try:\n # Decode as ASCII, which is Python 2 default\n line_string = line.decode('ascii')\n except UnicodeDecodeError:\n msg = \"invalid or missing encoding declaration\"\n if filename is not None:\n msg = '{} for {!r}'.format(msg, filename)\n raise SyntaxError(msg)\n\n match = cookie_re.match(line_string)\n if not match:\n return None\n encoding = _get_normal_name(match.group(1))\n try:\n lookup(encoding)\n except LookupError:\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = \"unknown encoding: \" + encoding\n else:\n msg = \"unknown encoding for {!r}: {}\".format(filename,\n encoding)\n raise SyntaxError(msg)\n\n if bom_found:\n if encoding != 'utf-8':\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = 'encoding problem: utf-8'\n else:\n msg = 'encoding problem for {!r}: utf-8'\n msg = msg.format(filename)\n raise SyntaxError(msg)\n encoding += '-sig'\n return encoding\n\n first = read_or_stop()\n if first.startswith(BOM_UTF8):\n bom_found = True\n first = first[3:]\n default = 'utf-8-sig'\n if not first:\n return default, []\n\n encoding = find_cookie(first)\n if encoding:\n return encoding, [first]\n if not blank_re.match(first):\n return default, [first]\n\n second = read_or_stop()\n if not second:\n return default, [first]\n\n encoding = find_cookie(second)\n if encoding:\n return encoding, [first, second]\n\n return default, [first, second]",
"def get_file_encoding(content):\r\n encoding = None\r\n try:\r\n lines_to_check = content.split(\"\\n\", 2)\r\n for index in range(2):\r\n if len(lines_to_check) > index:\r\n line_encoding = _search_coding_line(lines_to_check[index])\r\n if line_encoding:\r\n encoding = line_encoding\r\n break\r\n except UnicodeDecodeError as error:\r\n #add logger\r\n print(error)\r\n #if not encoding is set then use UTF-8 as default\r\n if encoding is None:\r\n encoding = \"UTF-8\"\r\n return encoding",
"def test_encoding_win(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'bad_codec.csv'))\n self.assertEqual(guessed_encoding.lower(), u'windows-1252')",
"def test_encoding_amazon_de_reviews_is_utf8(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon_de_reviews_200.csv'))\n self.assertEqual(guessed_encoding.lower(), u'utf-8')",
"def test_encoding_empty(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'empty_file.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')",
"def decode(self, data):\r\n if self.encoding and self.encoding.lower() == 'unicode':\r\n assert isinstance(data, unicode), (\r\n 'input encoding is \"unicode\" '\r\n 'but input is not a unicode object')\r\n if isinstance(data, unicode):\r\n # Accept unicode even if self.encoding != 'unicode'.\r\n return data\r\n if self.encoding:\r\n # We believe the user/application when the encoding is\r\n # explicitly given.\r\n encodings = [self.encoding]\r\n else:\r\n data_encoding = self.determine_encoding_from_data(data)\r\n if data_encoding:\r\n # If the data declares its encoding (explicitly or via a BOM),\r\n # we believe it.\r\n encodings = [data_encoding]\r\n else:\r\n # Apply heuristics only if no encoding is explicitly given and\r\n # no BOM found. Start with UTF-8, because that only matches\r\n # data that *IS* UTF-8:\r\n encodings = ['utf-8', 'latin-1']\r\n if locale_encoding:\r\n encodings.insert(1, locale_encoding)\r\n for enc in encodings:\r\n try:\r\n decoded = unicode(data, enc, self.error_handler)\r\n self.successful_encoding = enc\r\n # Return decoded, removing BOMs.\r\n return decoded.replace(u'\\ufeff', u'')\r\n except (UnicodeError, LookupError), err:\r\n error = err # in Python 3, the <exception instance> is\r\n # local to the except clause\r\n raise UnicodeError(\r\n 'Unable to decode input data. Tried the following encodings: '\r\n '%s.\\n(%s)' % (', '.join([repr(enc) for enc in encodings]),\r\n ErrorString(error)))",
"def detect_encoding(readline):\r\n bom_found = False\r\n encoding = None\r\n default = 'utf-8'\r\n def read_or_stop():\r\n try:\r\n return readline()\r\n except StopIteration:\r\n return bytes()\r\n\r\n def find_cookie(line):\r\n try:\r\n line_string = line.decode('ascii')\r\n except UnicodeDecodeError:\r\n return None\r\n\r\n matches = cookie_re.findall(line_string)\r\n if not matches:\r\n return None\r\n encoding = _get_normal_name(matches[0])\r\n try:\r\n codec = lookup(encoding)\r\n except LookupError:\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError(\"unknown encoding: \" + encoding)\r\n\r\n if bom_found:\r\n if codec.name != 'utf-8':\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError('encoding problem: utf-8')\r\n encoding += '-sig'\r\n return encoding\r\n\r\n first = read_or_stop()\r\n if first.startswith(BOM_UTF8):\r\n bom_found = True\r\n first = first[3:]\r\n default = 'utf-8-sig'\r\n if not first:\r\n return default, []\r\n\r\n encoding = find_cookie(first)\r\n if encoding:\r\n return encoding, [first]\r\n\r\n second = read_or_stop()\r\n if not second:\r\n return default, [first]\r\n\r\n encoding = find_cookie(second)\r\n if encoding:\r\n return encoding, [first, second]\r\n\r\n return default, [first, second]",
"def _validate_charset(data, charset):\n if len(charset) > 1:\n charset_data_length = 0\n for symbol_charset in charset:\n if symbol_charset not in ('A', 'B', 'C'):\n raise Code128.CharsetError\n charset_data_length += 2 if symbol_charset is 'C' else 1\n if charset_data_length != len(data):\n raise Code128.CharsetLengthError\n elif len(charset) == 1:\n if charset not in ('A', 'B', 'C'):\n raise Code128.CharsetError\n elif charset is not None:\n raise Code128.CharsetError",
"def validUTF8(data):\n if data is None or len(data) == 0:\n return True\n numOfFiller = 0\n for byte in data:\n if numOfFiller > 0:\n tmp = verify_byte(byte, 5)\n numOfFiller = numOfFiller - 1\n if not tmp:\n return False\n else:\n if verify_byte(byte, 1):\n numOfFiller = 0\n elif verify_byte(byte, 2):\n numOfFiller = 1\n elif verify_byte(byte, 3):\n numOfFiller = 2\n elif verify_byte(byte, 4):\n numOfFiller = 3\n else:\n return False\n if numOfFiller > 0:\n return False\n return True",
"def get_encoding(byte_string):\n return detect(byte_string)['encoding']",
"def test_encoding_ascii(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon-sample-1000.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')",
"def validUTF8(data):\n # Use maks, to clean byte of anything beyond 8 least significant bits.\n cleanByte = [rawByte & 0b11111111 for rawByte in data]\n\n # Cast to byte type.\n byte = bytes(cleanByte)\n\n # Attempt to decode byte data.\n try:\n byte.decode()\n except UnicodeDecodeError:\n # If decoding fails, return False.\n return False\n\n return True",
"def sniff_encoding(fh):\n sniff = sniff_file(fh)\n\n # WoS files typically include a BOM, which we want to strip from the actual\n # data. The encodings 'utf-8-sig' and 'utf-16' do this for UTF-8 and UTF-16\n # respectively. When dealing with files with BOM, avoid the encodings\n # 'utf-8' (which is fine for non-BOM UTF-8), 'utf-16-le', and 'utf-16-be'.\n # See e.g. http://stackoverflow.com/a/8827604\n encodings = {codecs.BOM_UTF16: 'utf-16',\n codecs.BOM_UTF8: 'utf-8-sig'}\n for bom, encoding in encodings.items():\n if sniff.startswith(bom):\n return encoding\n # WoS export files are either UTF-8 or UTF-16\n return 'utf-8'",
"def smart_decode(data, charset):\n try:\n if isinstance(data, str):\n # It's already unicode so just return it\n return data\n else:\n return data.decode(charset, errors='strict')\n\n except UnicodeDecodeError: # PY3\n # Looks like the charset lies, try to detect it\n return guess_encoding_and_decode(data, claimed=charset)\n\n except LookupError:\n # They gave us a crap encoding\n return guess_encoding_and_decode(data, claimed=charset)",
"def guess_file_encoding(filename, default):\n try:\n f = open(filename, \"rb\")\n the_text = f.read()\n f.close()\n except Exception as details:\n warn(\"Error while trying to guess the encoding of file %s: %s\" \\\n % (filename, details))\n return default\n\n bomdict = { codecs.BOM_UTF8 : 'UTF8',\n codecs.BOM_UTF16_BE : 'UTF-16BE',\n codecs.BOM_UTF16_LE : 'UTF-16LE' }\n\n # check if there is Unicode signature\n for bom, encoding in bomdict.items():\n if the_text.startswith(bom):\n the_text = the_text[len(bom):]\n break\n else:\n bom = None\n encoding = None\n\n if encoding is None: # there was no BOM\n try:\n unicode_text, encoding = guess_encoding(the_text)\n except UnicodeError:\n warn(\"Can't work out the encoding of file '%s'.\" % filename)\n warn(\"Assuming the default encoding: %s\" % default)\n return default\n warn(\"Guessed encoding for file '%s': %s\" % (filename, encoding))\n return encoding",
"def guess_encoding(text: bytes, default: Encoding=DEFAULT_ENCODING) -> Encoding:\n result = chardet.detect(text)\n return normalize_result(result, default=default)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode `data`, write it to a single file, and return it. With Python 3 or binary output mode, `data` is returned unchanged, except when specified encoding and output encoding differ. | def write(self, data):
if not self.opened:
self.open()
if ('b' not in self.mode and sys.version_info < (3,0)
or check_encoding(self.destination, self.encoding) is False
):
if sys.version_info >= (3,0) and os.linesep != '\n':
data = data.replace('\n', os.linesep) # fix endings
data = self.encode(data)
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
self.destination.write(data)
except TypeError, e:
if sys.version_info >= (3,0) and isinstance(data, bytes):
try:
self.destination.buffer.write(data)
except AttributeError:
if check_encoding(self.destination,
self.encoding) is False:
raise ValueError('Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
self.destination.encoding, self.encoding))
else:
raise e
except (UnicodeError, LookupError), err:
raise UnicodeError(
'Unable to encode output data. output-encoding is: '
'%s.\n(%s)' % (self.encoding, ErrorString(err)))
finally:
if self.autoclose:
self.close()
return data | [
"def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError as e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError) as err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data",
"def encode(input, output, encoding):\n if encoding == 'base64':\n import base64\n return base64.encode(input, output)\n if encoding == 'quoted-printable':\n import quopri\n return quopri.encode(input, output, 0)\n if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):\n import uu\n return uu.encode(input, output)\n if encoding in ('7bit', '8bit'):\n return output.write(input.read())\n if encoding in encodetab:\n pipethrough(input, encodetab[encoding], output)\n else:\n raise ValueError, \\\n 'unknown Content-Transfer-Encoding: %s' % encoding",
"def write_to_file(self, data):",
"def write_binary_file(output_path, data):\n with open(output_path, \"wb\") as f:\n f.write(data)",
"def compress(data, compression_level):\n buffer = cStringIO.StringIO()\n gz_file = GzipFile(None, 'wb', compression_level, buffer)\n if isinstance(data, unicode):\n data = data.encode(response.charset)\n gz_file.write(data)\n gz_file.close()\n return buffer.getvalue()",
"def encode(self, data):\n\t\traise NotImplementedError()",
"def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))",
"def data_encode(data, encoding=DEFAULT_ENCODING):\r\n # http://stackoverflow.com/questions/1254454/fastest-way-to-convert-a-dicts-keys-values-from-unicode-to-str\r\n if isinstance(data, compat.unicode_type):\r\n return data.encode(encoding)\r\n elif isinstance(data, dict):\r\n return dict(map(data_encode, compat.iteritems(data)))\r\n elif isinstance(data, list) or isinstance(data, tuple):\r\n return list(map(data_encode, data))\r\n else:\r\n return data",
"def write_to_output_file(output_dir, filename, data):\n\n if not output_dir or not prepare_output_dir(output_dir):\n return\n filename = os.path.join(output_dir, filename)\n try:\n with open(filename, 'w') as outfile:\n if isinstance(data, string_types):\n outfile.write(data)\n else:\n json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)\n # pylint: disable=broad-except; do not want serialization/write to break for any reason\n except Exception as exc:\n display.warning(\"Could not write output file {}: {}\".format(filename, exc))",
"def write_file(self, data) -> None:\n pass",
"def write(self, data):\r\n if self.stream is False:\r\n return\r\n if isinstance(data, Exception):\r\n data = unicode(SafeString(data, self.encoding,\r\n self.encoding_errors, self.decoding_errors))\r\n try:\r\n self.stream.write(data)\r\n except UnicodeEncodeError:\r\n self.stream.write(data.encode(self.encoding, self.encoding_errors))\r\n except TypeError: # in Python 3, stderr expects unicode\r\n if self.stream in (sys.stderr, sys.stdout):\r\n self.stream.buffer.write(data) # write bytes to raw stream\r\n else:\r\n self.stream.write(unicode(data, self.encoding,\r\n self.decoding_errors))",
"def write_bytes(self, data):\n # type-check for the buffer interface before truncating the file\n view = memoryview(data)\n with self.open(mode='wb') as f:\n return f.write(view)",
"def zip_compress(data):\n out = io.BytesIO()\n with zipfile.ZipFile(file=out, mode=\"w\") as z:\n with z.open(\"myfile\", \"w\") as zf:\n zf.write(data)\n out.seek(0)\n return out.read()",
"def write_raw_file(self, data: bytes) -> None:\n pass",
"def save_data(self, data):\n file = self.get_file()\n with open(file, \"w\") as f:\n f.write(data)",
"def _save_and_compress(self, filename = None, data = None):\n if os.path.exists(filename):\n os.remove(filename)\n \n fileContents = gzip.open(filename, 'wb', compresslevel = 3)\n pickle.dump(data, fileContents, protocol = pickle.HIGHEST_PROTOCOL)\n fileContents.close()",
"def write(object_data):\n output = pickle.dumps(object_data)\n return output",
"def _dumpPickle(self, data):\r\n \r\n return codecs.encode(cPickle.dumps(data,protocol=cPickle.HIGHEST_PROTOCOL), \"base64\").decode()",
"def write_bytes(out_data):\n if sys.version_info[0] >= 3:\n if isinstance(out_data, type(u'')):\n return out_data.encode('utf-8')\n elif isinstance(out_data, type(b'')):\n return out_data\n else:\n if isinstance(out_data, type(u'')):\n return out_data.encode('utf-8')\n elif isinstance(out_data, type(str(''))):\n return out_data\n msg = \"Invalid value for out_data neither unicode nor byte string: {}\".format(out_data)\n raise ValueError(msg)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Encode `data`, store it in `self.destination`, and return it. | def write(self, data):
self.destination = self.encode(data)
return self.destination | [
"def encode(self, data):\n\t\traise NotImplementedError()",
"def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError, e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError), err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data",
"def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError as e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError) as err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data",
"def urlEncode(self, data):\n # type: (Union[str, bytearray]) -> Union[str,bytearray]",
"def encode_data(self, data):\r\n if data:\r\n data = urlencode(data)\r\n\r\n return data",
"def _op_push_data(self, data):\n\n # expects data in hexadecimal characters and converts appropriately\n # TODO maybe, for convenience, also accept objects for public keys,\n # addresses, etc. and use isinstance and convert manually\n data_bytes = unhexlify(data)\n\n if len(data_bytes) < 0x4c:\n return chr(len(data_bytes)).encode() + data_bytes\n elif len(data_bytes) < 0xff:\n return b'\\x4c' + chr(len(data_bytes)).encode() + data_bytes\n elif len(data_bytes) < 0xffff:\n return b'\\x4d' + struct.pack('<H', len(data_bytes)) + data_bytes\n elif len(data_bytes) < 0xffffffff:\n return b'\\x4e' + struct.pack('<I', len(data_bytes)) + data_bytes\n else:\n raise ValueError(\"Data too large. Cannot push into script\")",
"def encode( data: JSONData ) -> bytes:\n\n try:\n s = json.dumps( data )\n return s.encode( _ENCODING )\n except UnicodeError as e:\n raise ConnectionError( f\"Failed to encode message: '{s}'\" ) from e",
"def base64Encode(self, data):\n # type: (Union[str, bytearray]) -> Union[str,bytearray]",
"def _writeSomeData(self, data):\n sent = self.transport._originalWriteSomeData(data)\n self.dataSentEvent(sent)\n return sent",
"def encode_data(self, data):\r\n return json.dumps(data)",
"def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))",
"def serialize(self, data):\n raise NotImplementedError()",
"def _dumpPickle(self, data):\r\n \r\n return codecs.encode(cPickle.dumps(data,protocol=cPickle.HIGHEST_PROTOCOL), \"base64\").decode()",
"def _encode_data(self, data, **kwargs):\n return json.dumps(data, cls=JSONEncoder, **kwargs)",
"def encode(self, data):\n return_list = [list() for _ in range(self.num_segments)]\n encoder = Encoder(self.min_segments, self.num_segments)\n for data_slice in _slice_generator(data, block_size):\n for segment_list, zfec_share in zip(\n return_list, encoder.encode(data_slice)\n ):\n segment_list.append(zfec_share)\n \n return [\"\".join(sublist) for sublist in return_list]",
"def serialize(self, data) -> str:\n pass",
"def sendToComparer(self, data):\n # type: (bytearray) -> ()",
"def encode(self, data_string):\r\n\r\n if type(data_string) is not bytes:\r\n raise ValueError('Must pass bytes to encode')\r\n\r\n binary_string = ''\r\n\r\n # Match ASCII to entries in the lookup table\r\n for byte in data_string:\r\n binary_string += self.huffman_table[byte]\r\n\r\n # Convert binary string into ASCII\r\n encoded_string = b'';\r\n for i in range(0, len(binary_string), 8):\r\n binary = binary_string[i:i+8]\r\n encoded_string += bytes([int(binary[::-1], 2)])\r\n\r\n # If the huffman-coded string is longer than the original\r\n # string, return the original string instead. Putting an\r\n # ASCII value 0xff where the padding bit should be signals to\r\n # the decoder that the message is not encoded.\r\n if len(data_string) <= len(encoded_string):\r\n return b'\\xff' + data_string\r\n\r\n # In the first byte, store the number of padding bits\r\n padding_value = (8 - (len(binary_string) % 8)) % 8\r\n encoded_string = bytes([padding_value]) + encoded_string\r\n\r\n return encoded_string",
"def encode(self) -> bytes:\n \n pass",
"def __bytes__(self):\n\n return bytes(self._data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an iterable containing self (if include_self is true) all descendants in tree traversal order (if descend is true) all siblings (if siblings is true) and their descendants (if also descend is true) the siblings of the parent (if ascend is true) and their descendants (if also descend is true), and so on If `condition` is not None, the iterable contains only nodes for which ``condition(node)`` is true. If `condition` is a node class ``cls``, it is equivalent to a function consisting of ``return isinstance(node, cls)``. If ascend is true, assume siblings to be true as well. | def traverse(self, condition=None, include_self=True, descend=True,
siblings=False, ascend=False):
if ascend:
siblings=True
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(include_self=True, descend=True,
siblings=False, ascend=False,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=True,
descend=descend,
siblings=False, ascend=False,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r | [
"def traverse(self, condition=None, include_self=True, descend=True,\r\n siblings=False, ascend=False):\r\n if ascend:\r\n siblings=True\r\n # Check for special argument combinations that allow using an\r\n # optimized version of traverse()\r\n if include_self and descend and not siblings:\r\n if condition is None:\r\n return self._all_traverse()\r\n elif isinstance(condition, type):\r\n return self._fast_traverse(condition)\r\n # Check if `condition` is a class (check for TypeType for Python\r\n # implementations that use only new-style classes, like PyPy).\r\n if isinstance(condition, type):\r\n node_class = condition\r\n def condition(node, node_class=node_class):\r\n return isinstance(node, node_class)\r\n r = []\r\n if include_self and (condition is None or condition(self)):\r\n r.append(self)\r\n if descend and len(self.children):\r\n for child in self:\r\n r.extend(child.traverse(include_self=True, descend=True,\r\n siblings=False, ascend=False,\r\n condition=condition))\r\n if siblings or ascend:\r\n node = self\r\n while node.parent:\r\n index = node.parent.index(node)\r\n for sibling in node.parent[index+1:]:\r\n r.extend(sibling.traverse(include_self=True,\r\n descend=descend,\r\n siblings=False, ascend=False,\r\n condition=condition))\r\n if not ascend:\r\n break\r\n else:\r\n node = node.parent\r\n return r",
"def nodes(self, method='dfs', criteria=lambda x: True):\n if method == 'bfs':\n def bfs_iter():\n queue = [self]\n while True:\n try:\n n = queue.pop(0)\n except IndexError:\n raise StopIteration\n queue.extend(n._children)\n if criteria(n):\n yield n\n return bfs_iter() # call the generator\n elif method == 'dfs':\n def dfs_iter():\n stack = (self,)\n while True:\n try:\n n = stack[0]\n except IndexError:\n raise StopIteration\n # TODO check whether using tuple here is actually faster than list\n stack = tuple(n._children) + stack[1:] # prepend\n if criteria(n):\n yield n\n return dfs_iter() # call the generator",
"def iter_elements(self, condition):\n for elem in self.iter():\n if condition(elem):\n yield elem",
"def traverse(self, order=\"draw\", includeSelf=True, inclStmtComment=False):\n if order == 'pick':\n if inclStmtComment and hasattr(self, 'stmtComment'):\n yield self.stmtComment\n else:\n if includeSelf:\n yield self\n # For \"pick\" order to be the true opposite of \"draw\", this loop should run in\n # reverse, but child icons are not intended to overlap in a detectable way.\n for child in self.children():\n if child is None:\n print('icon has null child', self)\n yield from child.traverse(order)\n if order == \"pick\":\n if includeSelf:\n yield self\n else:\n if inclStmtComment and hasattr(self, 'stmtComment'):\n yield self.stmtComment",
"def traverse_depthwise(self, flag=None):\n queue = deque([self]) \n while len(queue) != 0:\n node = queue.popleft()\n if node.has_children():\n for child in node.get_children():\n if child is not None:\n queue.append(child)\n if flag is not None:\n if node.is_marked(flag):\n yield node\n else:\n yield node",
"def ancestor_finder(resource, predicate, include_self=False):\n resource = resource if include_self else getattr(resource, \"__parent__\", None)\n while resource is not None:\n if predicate(resource):\n yield resource\n resource = getattr(resource, \"__parent__\", None)",
"def nodes_where(self, conditions=None, data=False, **kwargs):\n conditions = conditions or {}\n conditions.update(kwargs)\n\n for key, attr in self.nodes(True):\n is_match = True\n attr = attr or {}\n\n for name, value in conditions.items():\n method = getattr(self, name, None)\n\n if callable(method):\n val = method(key)\n if isinstance(val, list):\n if value not in val:\n is_match = False\n break\n break\n if isinstance(value, (tuple, list)):\n minval, maxval = value\n if val < minval or val > maxval:\n is_match = False\n break\n else:\n if value != val:\n is_match = False\n break\n\n else:\n if name not in attr:\n is_match = False\n break\n if isinstance(attr[name], list):\n if value not in attr[name]:\n is_match = False\n break\n break\n if isinstance(value, (tuple, list)):\n minval, maxval = value\n if attr[name] < minval or attr[name] > maxval:\n is_match = False\n break\n else:\n if value != attr[name]:\n is_match = False\n break\n\n if is_match:\n if data:\n yield key, attr\n else:\n yield key",
"def setDescendantCondTypes(self):\n self.setConditionalType()\n for child in self.childList:\n child.setDescendantCondTypes()",
"def get_children(self, flag=None, reverse=False):\n \n if self.has_children(flag=flag):\n if not reverse:\n #\n # Go in usual order\n # \n for pos in self._child_positions:\n child = self.children[pos]\n if child is not None:\n if flag is None:\n yield child\n elif child.is_marked(flag):\n yield child\n else: \n #\n # Go in reverse order\n # \n for pos in reversed(self._child_positions):\n child = self.children[pos]\n if child is not None:\n if flag is None:\n yield child\n elif child.is_marked(flag):\n yield child",
"def descendants(self) -> Iterable[\"Type\"]:\n return self._hier.closure(self, lambda t: t.child_types)",
"def descendantGen(self):\n yield self\n for child in self.childList:\n for item in child.descendantGen():\n yield item",
"def iter_children(self):\n for child in self.children:\n if not child.is_null():\n yield child",
"def children(self):\r\n c = self.child\r\n while c:\r\n yield c\r\n c = c.nxt",
"def children_iter(self):\n for child in self.children:\n if child:\n yield child",
"def get_all_ancestors(node):\n return node.iterancestors()",
"def iter_siblings(self):\r\n if self._parent:\r\n for sibling in self._parent.iter_children():\r\n if sibling is not self:\r\n yield sibling\r\n else:\r\n raise StopIteration()",
"def walk_dependency_graph(self, reverse=False):\n if reverse:\n graph_name = 'reverse_dependencies'\n else:\n graph_name = 'dependencies'\n\n # self first\n yield self\n\n # Use Breadth First Search (BFS) algorithm\n vqueue = [self]\n discovered = set(vqueue)\n while vqueue:\n u = vqueue.pop()\n for v in getattr(u, graph_name):\n if v not in discovered:\n discovered.add(v)\n vqueue.append(v)\n yield v",
"def all(self):\n def walk(nodes):\n for node in nodes:\n yield node\n if self.recurse and node.is_container:\n for result in walk(node.children):\n yield result\n return Query(walk(self))",
"def children(self):\n node = self.first_child\n while node is not None:\n yield node\n node = node.next"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the first node in the iterable returned by traverse(), or None if the iterable is empty. Parameter list is the same as of traverse. Note that include_self defaults to 0, though. | def next_node(self, condition=None, include_self=False, descend=True,
siblings=False, ascend=False):
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None | [
"def start_node(self):\n if len(self._nodes) == 0:\n return None\n return self._nodes[0]",
"def get_first_node(self):\n return self._nodes[0]",
"def suggested_node(self):\n for _ in range(0, len(self.node.children)):\n if self.current_idx == len(self.node.children):\n self.current_idx = 0\n node = self.node.children[self.current_idx]\n if node:\n return node\n\n return None",
"def first(self):\n\t\tif self.is_empty():\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self._make_position(0) #position of first element",
"def _get_next_node(self) -> Optional[BaseInvocation]:\n g = self.execution_graph.nx_graph()\n\n # Depth-first search with pre-order traversal is a depth-first topological sort\n sorted_nodes = nx.dfs_preorder_nodes(g)\n\n next_node = next(\n (\n n\n for n in sorted_nodes\n if n not in self.executed # the node must not already be executed...\n and all((e[0] in self.executed for e in g.in_edges(n))) # ...and all its inputs must be executed\n ),\n None,\n )\n\n if next_node is None:\n return None\n\n return self.execution_graph.nodes[next_node]",
"def next(self) -> Optional[BaseInvocation]:\n\n # TODO: enable multiple nodes to execute simultaneously by tracking currently executing nodes\n # possibly with a timeout?\n\n # If there are no prepared nodes, prepare some nodes\n next_node = self._get_next_node()\n if next_node is None:\n prepared_id = self._prepare()\n\n # Prepare as many nodes as we can\n while prepared_id is not None:\n prepared_id = self._prepare()\n next_node = self._get_next_node()\n\n # Get values from edges\n if next_node is not None:\n self._prepare_inputs(next_node)\n\n # If next is still none, there's no next node, return None\n return next_node",
"def find_node(self, func):\n\n for obj in self.lst_hierobj:\n if func(obj):\n return obj\n return None",
"def get_leftmost_child(self) -> Node or None:\n if len(self.children) == 0: return None\n return self.children[0]",
"def next(self):\n if self.is_complete():\n return None\n return self.tree.children[self.dot]",
"def _first(self, node: etree._Entity, expr: str) -> etree._Entity | None:\n for entity in self.xpath(node, expr):\n return entity\n return None",
"def firstChild(self):\n # return self.currentNode.firstChild\n return traverseChildren(self, 'first')",
"def _get_node(self, index):\n if not (-self._length <= index <= self._length - 1):\n return None\n\n # Converts negative indexes to positive.\n index = index + self._length if index < 0 else index\n\n cur_node = self._first\n for i in range(index):\n cur_node = cur_node.next\n\n return cur_node",
"def getFirstChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[0]\n return None",
"def first(self, default=None):\n try:\n return iter(self).next()\n except StopIteration:\n return default",
"def getFirstTopLevelNode(self) -> retval:\n ...",
"def peek(self):\n # TODO: Return top item, if any\n \n if self.list.is_empty():\n return None \n\n # looks at top of ll which is top of stack and grabs the head.\n return self.list.head.data",
"def depth_first_traversal(self, start=None):\n if start is None:\n start = self.root\n traverse = []\n\n return traverse",
"def first(self, default=None):\r\n try:\r\n return next(iter(self))\r\n except StopIteration:\r\n return default",
"def first(self):\n return next(self)",
"def first(iterator):\n return next(iterator)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.