query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Interpret filesystem path settings relative to the `base_path` given. Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from `OptionParser.relative_path_settings`.
def make_paths_absolute(pathdict, keys, base_path=None): if base_path is None: base_path = os.getcwd() # type(base_path) == unicode # to allow combining non-ASCII cwd with unicode values in `pathdict` for key in keys: if key in pathdict: value = pathdict[key] if isinstance(value, list): value = [make_one_path_absolute(base_path, path) for path in value] elif value: value = make_one_path_absolute(base_path, value) pathdict[key] = value
[ "def make_paths_absolute(pathdict, keys, base_path=None):\r\n if base_path is None:\r\n base_path = os.getcwdu() # type(base_path) == unicode\r\n # to allow combining non-ASCII cwd with unicode values in `pathdict`\r\n for key in keys:\r\n if key in pathdict:\r\n value = pathdict[key]\r\n if isinstance(value, list):\r\n value = [make_one_path_absolute(base_path, path)\r\n for path in value]\r\n elif value:\r\n value = make_one_path_absolute(base_path, value)\r\n pathdict[key] = value", "def process_path_key(self, dirpath, filename, key_path, dictionary, keys, level, must_exist, can_have_subdict, default_val):\n # found the key_path, process values\n if level == len(keys) - 1:\n key = keys[level]\n # if a wildcard is specified at this level, that means we\n # should process all keys as path values\n if key == \"*\":\n for key, val in dictionary.items():\n dictionary[key] = self.process_path_value(dirpath, filename, key_path, val, must_exist, can_have_subdict)\n elif key in dictionary:\n dictionary[key] = self.process_path_value(dirpath, filename, key_path, dictionary[key], must_exist, can_have_subdict)\n # key was not found, but default value was set, so apply it\n elif default_val:\n dictionary[key] = self.relative_path(dirpath, filename, key_path, default_val, must_exist)\n # otherwise recurse deeper into the dict\n elif level < len(keys) - 1:\n key = keys[level]\n if key in dictionary:\n # if the key refers to a dictionary, recurse into it to go\n # further down the path key\n if isinstance(dictionary[key], dict):\n self.process_path_key(dirpath, filename, key_path, dictionary[key], keys, level + 1,\n must_exist, can_have_subdict, default_val)\n # if the key was not found, but a default value is specified,\n # drill down further to set the default value\n elif default_val:\n dictionary[key] = {}\n self.process_path_key(dirpath, filename, key_path, dictionary[key], keys, level + 1,\n must_exist, can_have_subdict, default_val)", "def test_paths_from_settings():\n import settings_bipype\n\n namespace = settings_bipype.__dict__\n \n variables = { key: namespace[key]\n for key in namespace\n if key.startswith('PATH') }\n \n for var in variables.values():\n assert path_exists(var)", "def get_relative_paths(original, relative_to):\n return {k: get_relative_path(v, relative_to) for k, v in original.items()}", "def get_settings_path() -> Path:\n with open(root_path / 'deeppavlov/paths.json', encoding='utf8') as f:\n paths = json.load(f)\n settings_paths = Path(paths['settings_path']).resolve() if paths['settings_path'][0] == '/' \\\n else root_path / paths['settings_path']\n return settings_paths", "def _GetPathValue(obj, paths, default_value=None):\n if not obj:\n return default_value\n for p in paths:\n if p in obj:\n obj = obj[p]\n else:\n return default_value\n return obj", "def process_path_value(self, dirpath, filename, key_path, val, must_exist, can_have_subdict):\n if isinstance(val, str):\n return self.relative_path(dirpath, filename, key_path, val, must_exist)\n elif isinstance(val, list):\n vals = []\n for entry in val:\n if can_have_subdict and isinstance(entry, dict):\n for subkey, subval in entry.items():\n vals.append({subkey: self.relative_path(dirpath, filename, key_path, subval, must_exist)})\n else:\n vals.append(self.relative_path(dirpath, filename, key_path, entry, must_exist))\n return vals", "def jsonpaths_in_dict(dic, path='$', *, notation='dot'):\n for k, v in dic.items():\n if notation == 'dot':\n json_path = f\"{path}.{k}\"\n elif notation == 'bracket':\n json_path = f\"{path}['{k}']\"\n else:\n json_path = None\n ValueError(f\"Notation: '{notation}' is not supported\")\n\n if isinstance(v, dict):\n for json_path_ in jsonpaths_in_dict(\n v, json_path, notation=notation):\n yield json_path_\n else:\n yield json_path", "def get_subdict(adict, path, sep=os.sep):\n return reduce(adict.__class__.get, [p for p in path.split(sep) if p], adict)", "def load_dataset_paths(basedir: str) -> Cord19Paths:\n basedir = Path(basedir)\n paths, filesdir = {}, []\n for p in basedir.iterdir():\n if p.suffix == '.csv':\n paths['metadata'] = p\n elif p.suffix == '.readme':\n paths['readme'] = p\n elif p.is_dir():\n dirdir = p.joinpath(p.name)\n if dirdir.is_dir():\n filesdir.append(dirdir)\n\n paths['dirs'] = filesdir\n for p in filesdir:\n paths[p.name] = p\n return Cord19Paths(**paths)", "def paths_import_all(self, path_fname):\n\t\twith open (path_fname, \"rb\") as paths_f:\n\t\t\tpaths_dict = pickle.load(paths_f)\n\n\t\t# keyed by environment name\n\t\tunscrambled_dict = {}\n\t\tfor key in paths_dict.keys():\n\t\t\tunscrambled_dict[key] = self.moveit_unscramble(paths_dict[key])\n\n\t\treturn unscrambled_dict", "def make_paths_absolute(dir_: str, cfg: Dict[str, Any]) -> Dict[str, Any]:\n for key in cfg.keys():\n if hasattr(key, \"endswith\") and key.endswith(\"_path\"):\n if cfg[key].startswith(\"~\"):\n cfg[key] = os.path.expanduser(cfg[key])\n else:\n cfg[key] = os.path.join(dir_, cfg[key])\n cfg[key] = os.path.abspath(cfg[key])\n if type(cfg[key]) is dict:\n cfg[key] = make_paths_absolute(dir_, cfg[key])\n return cfg", "def _lookup_paths_in_paths(client_dispatcher: IClientDispatcher, lookup_paths: List[str], target_paths: List[str]):\n client = client_dispatcher.current_client\n\n dirs = []\n files = set()\n\n for p in lookup_paths:\n path = Path(get_relative_paths(client.path, [p])[0])\n if path.is_dir():\n dirs.append(path)\n else:\n files.add(path)\n\n target_dirs = []\n target_files = set()\n\n for p in target_paths:\n path = Path(p)\n if path.is_dir():\n target_dirs.append(path)\n else:\n target_files.add(path)\n\n result = set()\n\n for target_file in target_files:\n if target_file in files or any(d in target_file.parents for d in dirs):\n result.add(str(target_file))\n\n for target_dir in target_dirs:\n if target_dir in dirs or any(target_dir in f.parents for f in files):\n result.add(str(target_dir))\n\n return result", "def common_paths():\n\tpath={}\n\tcurPath = os.path.dirname(os.path.realpath(__file__))\n\tpath[\"current\"] = curPath\n\tsharedPath = \"/usr/share/beadtracker\"\n\tpath[\"translation\"] = firstExistingPath(\n\t\t\t[os.path.join(p, \"lang\") for p in\n\t\t\t (curPath, sharedPath,)])\n\tpath[\"templates\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'templates') for p in\n\t\t\t (curPath, sharedPath,)])\n\n\tpath[\"splash\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'templates','splash.png') for p in\n\t\t\t (curPath, sharedPath,)])\n\tpath[\"themes\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'templates','themes') for p in\n\t\t\t (curPath, sharedPath,)])\n\n\tlang=str(QtCore.QLocale.system().name()) \n\tshortLang=lang[:2]\n\tpath[\"help\"] = firstExistingPath(\n\t\t\t[os.path.join(p,'HELP') for p in\n\t\t\t (os.path.join(curPath,\"help_\"+lang),\n\t\t\t os.path.join(sharedPath,\"help_\"+lang),\n\t\t\t os.path.join(curPath,\"help_\"+shortLang),\n\t\t\t os.path.join(sharedPath,\"help_\"+shortLang),\n\t\t\t os.path.join(curPath,\"help\"),\n\t\t\t os.path.join(sharedPath,\"help\"),\n\t\t\t )\n\t\t\t ])\n\treturn path", "def _dynamic_paths(path, options):\n path = RouteTrie.clean_path(path)\n possible_paths = set([path])\n\n # Check for string formatting.\n if not options or '{' not in path:\n return possible_paths\n\n for key in options:\n for option in options[key]:\n format_keys = {key: option}\n new_paths = []\n for possible_path in possible_paths:\n new_paths.append(utils.safe_format(possible_path, **format_keys))\n possible_paths = possible_paths.union(new_paths)\n\n paths = set()\n\n # Don't include paths that still have format symbols.\n for possible_path in possible_paths:\n if '{' not in possible_path:\n paths.add(possible_path)\n\n if not paths:\n raise MissingOptionError(path)\n\n return paths", "def sub_dict(d:dict, paths:list, *, compl=False):\n# k = keys[0]\n# assert type(k) in {list, tuple}\n# res = nested_dict(k, fsl.utils.data.get_item(d, k))\n res = {}\n if compl:\n pp = []\n for p in get_paths(d):\n for q in paths:\n if q == p[:len(q)]:\n break\n else:\n pp.append(p)\n else:\n pp = paths\n\n for k in pp:\n # assert type(k) in {list, tuple}\n setitem(res, k, getitem(d, k))\n return res", "def _configure_local_paths(local_paths):\n answer = copy(local_paths)\n\n # Ask the user for a repository root.\n while not answer.get('reporoot'):\n logger.info('First, we need to know where you store most code on your '\n 'local machine.')\n logger.info('Other paths (example: toolkit) will derive from this, '\n 'but most are individually configurable.')\n logger.info('The use of ${REPOROOT} in GAPIC YAMLs will point here.')\n logger.info('Note: Use of ~ is fine here.')\n answer['reporoot'] = six.moves.input('Local code path: ')\n answer['reporoot'] = answer['reporoot'].rstrip('/').strip()\n\n # Set up dependent directories.\n reporoot = answer['reporoot']\n for dep in ('api-client-staging', 'googleapis', 'toolkit'):\n location = six.moves.input(\n 'Path for {0} (default: {1}/{0}): '.format(dep, reporoot)\n ).rstrip('/').strip()\n if location:\n answer[dep.replace('-', '_')] = location\n\n # Done; return the answer.\n return answer", "def load(self, base_settings):\n is_valid_key = lambda k: k.isupper() and not k.startswith('_')\n\n # Base settings, including `LocalSetting`s, loaded from the\n # Django settings module.\n valid_keys = (k for k in base_settings if is_valid_key(k))\n base_settings = DottedAccessDict((k, base_settings[k]) for k in valid_keys)\n\n # Settings read from the settings file; values are unprocessed.\n settings_from_file = self.strategy.read_file(self.file_name, self.section)\n\n # The fully resolved settings.\n settings = Settings(base_settings)\n\n for name, value in settings_from_file.items():\n for prefix in ('PREPEND.', 'APPEND.', 'SWAP.'):\n if name.startswith(prefix):\n name = name[len(prefix):]\n name = '{prefix}({name})'.format(**locals())\n break\n\n settings.set_dotted(name, value)\n\n # See if this setting corresponds to a `LocalSetting`. If\n # so, note that the `LocalSetting` has a value by putting it\n # in the registry. This also makes it easy to retrieve the\n # `LocalSetting` later so its value can be set.\n current_value = base_settings.get_dotted(name, None)\n if isinstance(current_value, LocalSetting):\n self.registry[current_value] = name\n\n self._interpolate_values(settings, settings)\n self._interpolate_keys(settings, settings)\n self._prepend_extras(settings, settings.pop('PREPEND', None))\n self._append_extras(settings, settings.pop('APPEND', None))\n self._swap_list_items(settings, settings.pop('SWAP', None))\n self._import_from_string(settings, settings.pop('IMPORT_FROM_STRING', None))\n self._delete_settings(settings, settings.pop('DELETE', None))\n\n for local_setting, name in self.registry.items():\n local_setting.value = settings.get_dotted(name)\n\n return settings", "def expandVarsInPaths(repositories):\r\n os.environ[\"SUBUSERDIR\"] = _getSubuserDir()\r\n for reponame,info in repositories.iteritems():\r\n info[\"path\"] = os.path.expandvars(info[\"path\"])", "def _crawl(\n self, key_path: List[str], env_vars: Mapping[str, Sequence[str]]\n ) -> Dict[str, Any]:\n new_vars: Dict[str, List[str]] = {}\n obj = self._path_get(key_path)\n # Sub-dict -> recurse\n if (\n hasattr(obj, \"keys\")\n and callable(obj.keys)\n and hasattr(obj, \"__getitem__\")\n ):\n for key in obj.keys():\n merged_vars = dict(env_vars, **new_vars)\n merged_path = key_path + [key]\n crawled = self._crawl(merged_path, merged_vars)\n # Handle conflicts\n for key in crawled:\n if key in new_vars:\n err = \"Found >1 source for {}\"\n raise AmbiguousEnvVar(err.format(key))\n # Merge and continue\n new_vars.update(crawled)\n # Other -> is leaf, no recursion\n else:\n new_vars[self._to_env_var(key_path)] = key_path\n return new_vars" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a copy of `settings_spec` excluding/replacing some settings. `settings_spec` is a tuple of configuration settings with a structure described for docutils.SettingsSpec.settings_spec. Optional positional arguments are names of tobeexcluded settings. Keyword arguments are option specification replacements. (See the html4strict writer for an example.)
def filter_settings_spec(settings_spec, *exclude, **replace): settings = list(settings_spec) # every third item is a sequence of option tuples for i in range(2, len(settings), 3): newopts = [] for opt_spec in settings[i]: # opt_spec is ("<help>", [<option strings>], {<keyword args>}) opt_name = [opt_string[2:].replace('-', '_') for opt_string in opt_spec[1] if opt_string.startswith('--') ][0] if opt_name in exclude: continue if opt_name in list(replace.keys()): newopts.append(replace[opt_name]) else: newopts.append(opt_spec) settings[i] = tuple(newopts) return tuple(settings)
[ "def filter_settings_spec(settings_spec, *exclude, **replace):\r\n settings = list(settings_spec)\r\n # every third item is a sequence of option tuples\r\n for i in range(2, len(settings), 3):\r\n newopts = []\r\n for opt_spec in settings[i]:\r\n # opt_spec is (\"<help>\", [<option strings>], {<keyword args>})\r\n opt_name = [opt_string[2:].replace('-', '_')\r\n for opt_string in opt_spec[1]\r\n if opt_string.startswith('--')\r\n ][0]\r\n if opt_name in exclude:\r\n continue\r\n if opt_name in replace.keys():\r\n newopts.append(replace[opt_name])\r\n else:\r\n newopts.append(opt_spec)\r\n settings[i] = tuple(newopts)\r\n return tuple(settings)", "def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]", "def get_settings_model(self):\n ignore = [\"layer\", \"layers_cladding\", \"cladding_offset\"]\n s = self.get_settings()\n [s.pop(i) for i in ignore]\n return s", "def exclude_from_setting(setting_name, items):\n settings_manager.add_action(\n 'exclude_from_setting',\n setting_name=setting_name,\n items=items\n )", "def avoid(self, excludes): \n if excludes is None or len(excludes) == 0: return self.shallow_copy(excludes=None)\n result = self.shallow_copy(excludes=excludes)\n if self.excludes is not None: result.excludes.extend(self.excludes)\n return result", "def get_non_optional_params(cls):\n non_optional_params = {}\n for setting_name, description in options['settings'].items():\n if len(description) == 2:\n non_optional_params[\n setting_name] = cls._normalize_desc(description[0],\n description[1])\n return OrderedDict(non_optional_params)", "def _kwargs_del(self, kwargs: Dict[str, Any],\r\n exclude: Union[str, Iterable[str]]) -> Dict[str, Any]:\r\n if isinstance(exclude, str):\r\n exclude = [exclude]\r\n \r\n return {k: kwargs[k] for k in kwargs.keys() if k not in exclude}", "def filter_options(\n args, # type: EnvironmentConfig\n argv, # type: t.List[str]\n exclude, # type: t.List[str]\n require, # type: t.List[str]\n): # type: (...) -> t.Iterable[str]\n replace: list[tuple[str, int, t.Optional[t.Union[bool, str, list[str]]]]] = [\n ('--docker-no-pull', 0, False),\n ('--truncate', 1, str(args.truncate)),\n ('--color', 1, 'yes' if args.color else 'no'),\n ('--redact', 0, False),\n ('--no-redact', 0, not args.redact),\n ('--host-path', 1, args.host_path),\n ]\n\n if isinstance(args, TestConfig):\n replace.extend([\n ('--changed', 0, False),\n ('--tracked', 0, False),\n ('--untracked', 0, False),\n ('--ignore-committed', 0, False),\n ('--ignore-staged', 0, False),\n ('--ignore-unstaged', 0, False),\n ('--changed-from', 1, False),\n ('--changed-path', 1, False),\n ('--metadata', 1, args.metadata_path),\n ('--exclude', 1, exclude),\n ('--require', 1, require),\n ('--base-branch', 1, args.base_branch or get_ci_provider().get_base_branch()),\n ])\n\n pass_through_args: list[str] = []\n\n for arg in filter_args(argv, {option: count for option, count, replacement in replace}):\n if arg == '--' or pass_through_args:\n pass_through_args.append(arg)\n continue\n\n yield arg\n\n for option, _count, replacement in replace:\n if not replacement:\n continue\n\n if isinstance(replacement, bool):\n yield option\n elif isinstance(replacement, str):\n yield from [option, replacement]\n elif isinstance(replacement, list):\n for item in replacement:\n yield from [option, item]\n\n yield from args.delegate_args\n yield from pass_through_args", "def get_other_options(cls, **options):\n return {option : value for option, value in options.items() if option not in cls.config_options}", "def _sanitize_settings(settings: dict) -> dict:\n resolved_settings = {}\n for k, v in settings.items():\n # Replace with k.lower().removeprefix(\"mongodb_\") when python 3.8 support ends.\n key = _get_name(k[8:]) if k.lower().startswith(\"mongodb_\") else _get_name(k)\n resolved_settings[key] = v\n\n return resolved_settings", "def test_from_settings_ignores_other_settings():\n expected = {'a': 1, 'b': 2}\n actual = from_settings({'DATABASE_A': 1, 'DATABASE_B': 2, 'OTHER': 3})\n assert actual == expected", "def filter(self, other):\n\n self.canonify()\n other.canonify()\n\n rej = self.__class__()\n rej.optlist = self.optlist.difference(other.optlist)\n self.optlist.difference_update(rej.optlist)\n for x in self.optdict.copy():\n if x not in other.optdict:\n self.optdict.pop(x)\n rej.optdict[x] = None\n\n return rej", "def set_exclude(self, exclude):\n self.exclude = exclude\n if exclude:\n log.info('Only considering tags without \"%s\"', exclude)\n return self", "def get_secret_setting_names(settings: dict) -> Set[str]:\n return {\n key for key in settings.keys()\n if AUTOFIND_SECRET_SETTINGS.match(key)\n and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED\n } | {\n key for key, value in settings['SETTINGS_DEFAULTS'].items()\n if value == PLACEHOLDER_FOR_SECRET\n and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED\n }", "def process_opt_ignores(self):\n for option in self.options.keys():\n if (option.startswith(\"ignore-\")\n and option[7:] in _COMPONENT_NAMES):\n values = self.options.pop(option)\n name = option.split(\"-\")[1]\n indices = [int(index) for index in values.split()]\n COMPARISON_SETTINGS[\"ignore_templates\"][name] = indices\n self.parent.reporter(\n \"Ignoring indices {0} of {1}\".format(indices, option[7:]))\n continue\n if option == \"ignore_missing\":\n value = self.options.pop(option)\n if value.lower() == \"true\":\n COMPARISON_SETTINGS[\"ignore_missing\"] = True\n self.parent.reporter(\"Ignoring positional header data\")", "def _warn_about_ignored_settings(_settings_type, _discussion_style):\n # TODO: Leverage the logger instead of warnings\n warn_msg = f\"Because the discussion style is '{_discussion_style}' all {_settings_type}-specific fields \" \\\n \"provided will be ignored.\"\n warnings.warn(warn_msg, UserWarning)", "def discard_settings(self):\n self._call(\"discardSettings\")", "def test_cfg_exclude_component_dict(self):\n # create the top level externals file\n desc = self.setup_dict_config()\n # Test an excluded repo\n external = create_externals_description(desc, model_format='dict',\n exclude=['simp_tag',\n 'simp_opt'])\n self.assertIsInstance(external, ExternalsDescriptionDict)\n self.assertFalse('simp_tag' in external)\n self.assertTrue('simp_branch' in external)\n self.assertFalse('simp_opt' in external)\n self.assertTrue('mixed_req' in external)", "def remove_simulation_kwargs(d: Dict[str, Any]) -> Dict[str, Any]:\n d = d.copy()\n d.pop(\"run\", None)\n d.pop(\"lazy_parallelism\", None)\n d.pop(\"overwrite\", None)\n d.pop(\"animate\", None)\n d.pop(\"wait_to_finish\", None)\n d.pop(\"cores\", None)\n d.pop(\"temp_dir\", None)\n d.pop(\"temp_file_str\", None)\n return d", "def set_html_exclusions(self, exclusions):\n excl = []\n for (tags,attrs) in exclusions:\n if len(tags)==1 and tags[0]==\"\":\n tags = []\n if len(attrs)==1 and attrs[0]==\"\":\n attrs = []\n excl.append((tags, attrs))\n self.html_exclusions = excl" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call the validator function on applicable settings and evaluate the 'overrides' option. Extends `optparse.Option.process`.
def process(self, opt, value, values, parser): result = optparse.Option.process(self, opt, value, values, parser) setting = self.dest if setting: if self.validator: value = getattr(values, setting) try: new_value = self.validator(setting, value, parser) except Exception as error: raise optparse.OptionValueError( 'Error in option "%s":\n %s' % (opt, ErrorString(error))) setattr(values, setting, new_value) if self.overrides: setattr(values, self.overrides, None) return result
[ "def process(self, opt, value, values, parser):\r\n result = optparse.Option.process(self, opt, value, values, parser)\r\n setting = self.dest\r\n if setting:\r\n if self.validator:\r\n value = getattr(values, setting)\r\n try:\r\n new_value = self.validator(setting, value, parser)\r\n except Exception, error:\r\n raise (optparse.OptionValueError(\r\n 'Error in option \"%s\":\\n %s'\r\n % (opt, ErrorString(error))),\r\n None, sys.exc_info()[2])\r\n setattr(values, setting, new_value)\r\n if self.overrides:\r\n setattr(values, self.overrides, None)\r\n return result", "def ValidateOptions(self, opt, args):", "def option_override(options):\n if not options.config_file:\n _logger.warning('config file {0} not found'.format(options.config_file))\n return\n\n config = configparser.RawConfigParser()\n config.read(options.config_file)\n\n section = 'system'\n if config.has_section(section):\n try_update(config, options, section, 'budget')\n try_update(config, options, section, 'sys_area')\n try_update(config, options, section, 'sys_power')\n try_update(config, options, section, 'sys_bw')\n try_update(config, options, section, 'thru_core')\n\n section = 'app'\n if config.has_section(section):\n try_update(config, options, section, 'workload')\n try_update(config, options, section, 'kernels')\n\n section = 'explore-variables'\n if config.has_section(section):\n try_update(config, options, section, 'f_parallel')\n try_update(config, options, section, 'asic_cov')\n try_update(config, options, section, 'asic_perf')\n try_update(config, options, section, 'asic_alloc')\n\n section = 'analysis'\n if config.has_section(section):\n try_update(config, options, section, 'series')\n try_update(config, options, section, 'action')\n try_update(config, options, section, 'fmt')\n try_update(config, options, section, 'nprocs')", "def clean_and_validate_options(self):\n pass", "def processCommandLineOptions(self, args):\n del args\n msg = (\"processCommandLineOptions() not implemented, Config must be \"\n \"subclassed.\")\n raise NotImplementedError(msg)", "def _validate_overrides(cls, filled: Config, overrides: Dict[str, Any]):\n error_msg = \"Invalid override: config value doesn't exist\"\n errors = []\n for override_key in overrides.keys():\n if not cls._is_in_config(override_key, filled):\n errors.append({\"msg\": error_msg, \"loc\": [override_key]})\n if errors:\n raise ConfigValidationError(filled, errors)", "def opt_validate (optparser):\n (options,args) = optparser.parse_args()\n if not options.fqfilename:\n optparser.print_help()\n sys.exit(1)\n if not options.species:\n optparser.print_help()\n sys.exit(1)\n if not options.dirOut:\n optparser.print_help()\n sys.exit(1)\n return options", "def _general_argument_parser(self, args_group):\n parser_handler = {UIConsts.MANDATORY: self._parse_mandatory_arguments,\n UIConsts.BASIC_MODE: self._parse_basic_mode_arguments,\n UIConsts.REGEX_MODE: self._parse_regex_mode_arguments,\n UIConsts.CUSTOM_MODE: self._parse_custom_mode_arguments}\n while self.num_of_attempts > 0:\n is_valid = parser_handler[args_group]()\n if not is_valid:\n continue\n else:\n self.num_of_attempts = 3\n return True\n return False", "def check_options(opts):\n\n sections = baseoptions.keys()\n for s in sections:\n defaults = dict(baseoptions[s])\n for i in defaults:\n if i not in opts:\n opts[i] = defaults[i]\n return opts", "def test_override_of_mixed_set_of_options(self):\n config_file = \"%s/config_mixed_overrides_1.conf\" % self.test_data_path\n oconfig = ocelog.config.Config(config_file)\n self.assertEqual(oconfig.server.port, 7777) # override \"8888\" \n self.assertEqual(oconfig.server.host, \"localhost\") # default\n self.assertEqual(oconfig.message.default_facility, \"local3\") # override of \"user\"\n self.assertEqual(oconfig.message.default_priority, \"err\") # override of \"notice\"\n self.assertEqual(oconfig.syslog.enabled, False) # default\n self.assertEqual(oconfig.security.require_token, False) # default\n self.assertEqual(oconfig.security.shared_secret, \"fruitpunch\") # override of None", "def parse_option(self, option, block_name, *values):\n if option == 'run':\n option = 'start_' + option\n\n key = option.split('_', 1)[0]\n self.paths[key] = set(common.extract_app_paths(values))", "def completing_subcommand_option_util(self, option, words):\n # Example: Return True for: gh view 1 --pag\n if len(words) > 3:\n if option in words:\n return True\n return False", "def __set_options(self, options):\n for option, value in options.iteritems():\n if option in ('slave_okay', 'slaveok'):\n self.__slave_okay = validate_boolean(option, value)\n elif option == 'read_preference':\n self.__read_pref = validate_read_preference(option, value)\n elif option == 'safe':\n self.__safe = validate_boolean(option, value)\n elif option in SAFE_OPTIONS:\n if option == 'journal':\n self.__set_safe_option('j', value)\n elif option == 'wtimeoutms':\n self.__set_safe_option('wtimeout', value)\n else:\n self.__set_safe_option(option, value)", "def _do_option(self, line):\n if line.startswith('option verbosity'):\n self._verbosity = int(line[len('option verbosity '):])\n self._write('ok')\n else:\n self._write('unsupported')", "def flags(self, **kw):\n for k, v in kw.iteritems():\n FLAGS.set_override(k, v)\n self._overridden_opts.append(k)", "def update_override_settings(self, override_settings: dict) -> None:", "def genome_options(parser, user_option, prebuilt):\n\n # Checks for custom built genomes using rna-seek build\n if user_option.endswith('.json'):\n # Check file is readable or accessible\n permissions(parser, user_option, os.R_OK)\n # Checks against vaild pre-built options\n # TODO: makes this more dynamic in the future to have it check against\n # a list of genomes (files) in config/genomes/*.json\n elif not user_option in prebuilt:\n # User did NOT provide a vaild choice\n parser.error(\"\"\"provided invalid choice, '{}', to --genome argument!\\n\n Choose from one of the following pre-built genome options: \\n\n \\t{}\\n\n or supply a custom reference genome JSON file generated from rna-seek build.\n \"\"\".format(user_option, prebuilt))\n\n return user_option", "def filter_options(\n args, # type: EnvironmentConfig\n argv, # type: t.List[str]\n exclude, # type: t.List[str]\n require, # type: t.List[str]\n): # type: (...) -> t.Iterable[str]\n replace: list[tuple[str, int, t.Optional[t.Union[bool, str, list[str]]]]] = [\n ('--docker-no-pull', 0, False),\n ('--truncate', 1, str(args.truncate)),\n ('--color', 1, 'yes' if args.color else 'no'),\n ('--redact', 0, False),\n ('--no-redact', 0, not args.redact),\n ('--host-path', 1, args.host_path),\n ]\n\n if isinstance(args, TestConfig):\n replace.extend([\n ('--changed', 0, False),\n ('--tracked', 0, False),\n ('--untracked', 0, False),\n ('--ignore-committed', 0, False),\n ('--ignore-staged', 0, False),\n ('--ignore-unstaged', 0, False),\n ('--changed-from', 1, False),\n ('--changed-path', 1, False),\n ('--metadata', 1, args.metadata_path),\n ('--exclude', 1, exclude),\n ('--require', 1, require),\n ('--base-branch', 1, args.base_branch or get_ci_provider().get_base_branch()),\n ])\n\n pass_through_args: list[str] = []\n\n for arg in filter_args(argv, {option: count for option, count, replacement in replace}):\n if arg == '--' or pass_through_args:\n pass_through_args.append(arg)\n continue\n\n yield arg\n\n for option, _count, replacement in replace:\n if not replacement:\n continue\n\n if isinstance(replacement, bool):\n yield option\n elif isinstance(replacement, str):\n yield from [option, replacement]\n elif isinstance(replacement, list):\n for item in replacement:\n yield from [option, item]\n\n yield from args.delegate_args\n yield from pass_through_args", "def run_validators(self, value):\n if isinstance(value, dict):\n to_validate = self._read_only_defaults()\n to_validate.update(value)\n else:\n to_validate = value\n super().run_validators(to_validate)", "def setPosixCompliance(self, aFlag = 0):\n self.posixCompliance = aFlag\n self.needsParse = 1\n\n if self.posixCompliance:\n self.optionStartExpr = re.compile('(--|-)(?P<option>[A-Za-z0-9_-]+)(?P<arg>=.*)?')\n self.orderMixed = 0\n else:\n self.optionStartExpr = re.compile('(--|-|\\+)(?P<option>[A-Za-z0-9_-]+)(?P<arg>=.*)?')\n self.orderMixed = 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each component, first populate from the `SettingsSpec.settings_spec` structure, then from the `SettingsSpec.settings_defaults` dictionary. After all components have been processed, check for and populate from each component's `SettingsSpec.settings_default_overrides` dictionary.
def populate_from_components(self, components): for component in components: if component is None: continue settings_spec = component.settings_spec self.relative_path_settings.extend( component.relative_path_settings) for i in range(0, len(settings_spec), 3): title, description, option_spec = settings_spec[i:i+3] if title: group = optparse.OptionGroup(self, title, description) self.add_option_group(group) else: group = self # single options for (help_text, option_strings, kwargs) in option_spec: option = group.add_option(help=help_text, *option_strings, **kwargs) if kwargs.get('action') == 'append': self.lists[option.dest] = 1 if component.settings_defaults: self.defaults.update(component.settings_defaults) for component in components: if component and component.settings_default_overrides: self.defaults.update(component.settings_default_overrides)
[ "def propagate_defaults(self, requiredvars, config, defaultsection=None):\n for option, infodic in requiredvars.items():\n if 'section' in infodic:\n section = infodic['section']\n else:\n section = defaultsection\n\n default = infodic['default']\n\n if not config.has_section(section):\n config.add_section(section)\n\n if not config.has_option(section, option):\n config.set(section, option, default)", "def update_defaults(self, default_configs: List[dict]) -> None:\n for c in default_configs:\n self.defaults = add_dicts(self.defaults, unpack(c))", "def _init_default_properties(self):\n for property_name in type(self).default_properties:\n if self.properties.get(property_name) is None:\n self.properties[property_name] = type(self).default_properties[property_name]", "def _process_default_values(data, specification, path, apply_defaults):\n\n for cur_key in specification:\n if (not _is_spec(cur_key)) and (cur_key not in data):\n default_value = specification[cur_key]\n default_value_from_spec = False\n if _is_spec(default_value):\n default_value = _instantiate_spec(default_value)\n if default_value.default is DEFAULT_NOT_SET:\n continue\n elif default_value.default is REQUIRED_VALUE:\n raise MakefileError(\"A value MUST be supplified for %r\"\n % (_path_to_str(path + (cur_key,))))\n default_value = default_value.default\n default_value_from_spec = True\n\n if apply_defaults \\\n and not isinstance(default_value, WithoutDefaults):\n if isinstance(default_value, dict):\n # Setting of values in the dict will be accomplished\n # in subsequent calls to _process_default_values\n default_value = {}\n elif isinstance(default_value, list):\n # Lists of specs defaults to empty lists\n if not default_value_from_spec:\n default_value = []\n\n # Prevent clobbering of values when re-using sub-specs\n data[cur_key] = copy.deepcopy(default_value)", "def set_all_defaults(self):\n for key, param in self.parameters.items():\n valdict = self.param_to_valdict(param)\n self.set_defaults(param, valdict)", "def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]", "def update_defaults(self):\r\n # setting names\r\n settings_names = (\"CMDSET_CHARACTER\", \"CMDSET_PLAYER\",\r\n \"BASE_PLAYER_TYPECLASS\", \"BASE_OBJECT_TYPECLASS\",\r\n \"BASE_CHARACTER_TYPECLASS\", \"BASE_ROOM_TYPECLASS\",\r\n \"BASE_EXIT_TYPECLASS\", \"BASE_SCRIPT_TYPECLASS\",\r\n \"BASE_CHANNEL_TYPECLASS\")\r\n # get previous and current settings so they can be compared\r\n settings_compare = zip([ServerConfig.objects.conf(name) for name in settings_names],\r\n [settings.__getattr__(name) for name in settings_names])\r\n mismatches = [i for i, tup in enumerate(settings_compare) if tup[0] and tup[1] and tup[0] != tup[1]]\r\n if len(mismatches): # can't use any() since mismatches may be [0] which reads as False for any()\r\n # we have a changed default. Import relevant objects and\r\n # run the update\r\n from src.objects.models import ObjectDB\r\n from src.comms.models import ChannelDB\r\n #from src.players.models import PlayerDB\r\n for i, prev, curr in ((i, tup[0], tup[1]) for i, tup in enumerate(settings_compare) if i in mismatches):\r\n # update the database\r\n print \" %s:\\n '%s' changed to '%s'. Updating unchanged entries in database ...\" % (settings_names[i], prev, curr)\r\n if i == 0:\r\n [obj.__setattr__(\"cmdset_storage\", curr) for obj in ObjectDB.objects.filter(db_cmdset_storage__exact=prev)]\r\n if i == 1:\r\n [ply.__setattr__(\"cmdset_storage\", curr) for ply in PlayerDB.objects.filter(db_cmdset_storage__exact=prev)]\r\n if i == 2:\r\n [ply.__setattr__(\"typeclass_path\", curr) for ply in PlayerDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i in (3, 4, 5, 6):\r\n [obj.__setattr__(\"typeclass_path\", curr) for obj in ObjectDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i == 7:\r\n [scr.__setattr__(\"typeclass_path\", curr) for scr in ScriptDB.objects.filter(db_typeclass_path__exact=prev)]\r\n if i == 8:\r\n [scr.__setattr__(\"typeclass_path\", curr) for scr in ChannelDB.objects.filter(db_typeclass_path__exact=prev)]\r\n # store the new default and clean caches\r\n ServerConfig.objects.conf(settings_names[i], curr)\r\n ObjectDB.flush_instance_cache()\r\n PlayerDB.flush_instance_cache()\r\n ScriptDB.flush_instance_cache()\r\n ChannelDB.flush_instance_cache()\r\n # if this is the first start we might not have a \"previous\"\r\n # setup saved. Store it now.\r\n [ServerConfig.objects.conf(settings_names[i], tup[1])\r\n for i, tup in enumerate(settings_compare) if not tup[0]]", "def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }", "def config_defaults(self):\n return {\n \"ingredients\": [data_ingredient, builder_ingredient],\n \"run_config\": copy(cd.run_config),\n \"loader_config\": copy(cd.loader_config),\n \"builder_config\": copy(cd.builder_config),\n \"tb_config\": copy(cd.tb_config),\n \"lr_config\": copy(cd.lr_config),\n }", "def dummy_config() -> ml_collections.ConfigDict:\n global_dict = {}\n for agent in get_implemented_agents():\n paper_agent = get_paper_agent(agent)\n global_dict.update(dataclasses.asdict(paper_agent.default))\n return ml_collections.ConfigDict(global_dict, type_safe=False)", "def _resolveEnvironments(self):\n configuration = self._raw['environments']\n\n default = configuration.get(DEFAULT, {})\n result = {}\n for name, data in configuration.items():\n if name == DEFAULT:\n continue\n new_data = default.copy()\n if isinstance(data, list):\n new_data['slaves'] = data\n else:\n new_data.update(data)\n result[name] = new_data\n\n return result", "def init_params(self, overrides: Dict[str, List[dict]] = {}) -> None:\n # TODO: Change overrides value type to a named tuple or something else\n # more appropriate than a free-form dict.\n for name, param in self._free_params.items():\n store = None\n for o in overrides.get(param.fqn, []):\n if path_matches_spec(self._fragment_path, o[\"path\"]):\n store = o[\"store\"]\n if not store:\n identity = (param.fqn, self._stringize_path())\n value = param.eval_default(self._get_dataset_or_set_default)\n store = param.make_store(identity, value)\n\n for handle in self._get_all_handles_for_param(name):\n handle.set_store(store)\n\n for s in self._subfragments:\n s.init_params(overrides)", "def check_options(opts):\n\n sections = baseoptions.keys()\n for s in sections:\n defaults = dict(baseoptions[s])\n for i in defaults:\n if i not in opts:\n opts[i] = defaults[i]\n return opts", "def load_defaults(defaults_file: list = []):\n cfg = Config(\"configs/default.yaml\")\n # cfg = cfg.update_config(Config(\"configs/dataset.yaml\"))\n for file in defaults_file:\n print(file)\n cfg = deep_update(cfg, Config(file))\n \n cfg = Opts(cfg).parse_args()\n \n cfg = load_enviroment_path(cfg)\n return cfg", "def test_read_config_found_defaults_in_sections(self):\n for k, v in self.config.items():\n for key in self.config_defaults.keys():\n self.assertTrue(key in v.keys())", "def _merge_configurations(self):\n m = dict()\n m.update(self._default)\n m.update(self._repo)\n m.update(self._user)\n return m", "def complete_dflt_vals(cfg):\n dflt = cfg['default_params'] # all default params\n for key, entries in cfg.items():\n if key not in _dict_fields:\n continue\n\n logger.debug(\"check for %s defaults\", key)\n dflts = dflt.get(key, {}) # default params for given section\n\n # if not dflts:\n # continue\n logger.info(\"set defaults for %s\", key)\n if dflts:\n logger.debug(\"defaults %s\", dflts)\n\n for name, entry in sorted(entries.items()):\n logger.debug(\"%s:%s\", key, name)\n\n if 'name' not in entry: # set name field if missing\n logger.debug(\"NAME = %r\", name)\n entry['name'] = name\n\n for dkey, dval in dflts.items():\n if dkey not in entry:\n entry[dkey] = dval\n logger.debug(\"%r = %r\", dkey, dval)", "def __parseAllHelper( self, parsed ):\n parsedDict = vars(parsed)\n for name, obj in vars(self).iteritems():\n if isinstance( obj, ConfigHelper ):\n for var in obj.getOptions():\n key = \"%s.%s\" %( name,var )\n if key in parsedDict:\n try:\n obj.setOption( var, parsedDict[key] )\n except RuntimeError as e:\n self._errorMessages.append( \"ERROR: %s \" % e )", "def _load_settings(self):\n with open(DEFAULT_PATH, 'rb') as file_:\n default_settings = yaml.load(file_)\n LOG.info('Loaded defaults: %s', default_settings)\n\n user_settings = {}\n if os.path.isfile(USERSETTINGS_PATH) and os.access(USERSETTINGS_PATH, os.R_OK):\n try:\n with open(USERSETTINGS_PATH, 'rb') as file_:\n user_settings = yaml.load(file_)\n LOG.info('Loaded user settings %s from path %s', user_settings,\n USERSETTINGS_PATH)\n except Exception:\n LOG.exception('Exception during loading of user settings')\n # FIXME check user_settings keys\n else:\n LOG.info('No user settings found, file %s does not exist or is not readable',\n USERSETTINGS_PATH)\n\n self.__class__.settings = ChainMap(user_settings, default_settings)\n self.__class__.settings_names = list(self.settings.keys())", "def get_component_definitions(\n pipeline_config: Dict[str, Any], overwrite_with_env_variables: bool = True\n) -> Dict[str, Dict[str, Any]]:\n component_definitions = {} # definitions of each component from the YAML.\n\n for raw_component_definition in pipeline_config[\"components\"]:\n name = raw_component_definition[\"name\"]\n # We perform a shallow copy here because of https://github.com/deepset-ai/haystack/issues/2568\n component_definition = {key: copy(value) for key, value in raw_component_definition.items() if key != \"name\"}\n component_definitions[name] = component_definition\n\n if overwrite_with_env_variables:\n for key, value in os.environ.items():\n env_prefix = f\"{name}_params_\".upper()\n if key.startswith(env_prefix):\n param_name = key.replace(env_prefix, \"\").lower()\n if \"params\" not in component_definition:\n component_definition[\"params\"] = {}\n component_definition[\"params\"][param_name] = value\n logger.info(\n \"Param '%s' of component '%s' overwritten with environment variable '%s' value '%s'.\",\n param_name,\n name,\n key,\n \"***\",\n )\n return component_definitions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of config files, from environment or standard.
def get_standard_config_files(self): try: config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep) except KeyError: config_files = self.standard_config_files # If 'HOME' is not set, expandvars() requires the 'pwd' module which is # not available under certain environments, for example, within # mod_python. The publisher ends up in here, and we need to publish # from within mod_python. Therefore we need to avoid expanding when we # are in those environments. expand = os.path.expanduser if 'HOME' not in os.environ: try: import pwd except ImportError: expand = lambda x: x return [expand(f) for f in config_files if f.strip()]
[ "def get_config_files(self):\n if package.backend.FORMAT == \"rpm\":\n return [\"sysconfig/clamd.amavisd\", \"tmpfiles.d/clamd.amavisd.conf\"]\n return []", "def in_cwd():\n configs = []\n\n for filename in os.listdir(os.getcwd()):\n if filename.startswith('.tmuxp') and is_config_file(filename):\n configs.append(filename)\n\n return configs", "def all_configs():\n\n path = os.path.expanduser(TESTCONFIG['audits']['config_dir'])\n config_names = []\n for glop in ['*conf']:\n config_names.extend(\n os.path.basename(x)\n for x in glob.iglob(os.path.join(path, glop)))\n return config_names", "def _readStdConfigFiles(cls):\n\n # Default one first\n cls.readConfigFile(DEFAULT_CONFIG)\n\n # Site specific one can override properties defined in default\n cls.readConfigFile(USER_CONFIG)", "def get_configs():\n with open(CONFIG_PATH) as f:\n return json.load(f)", "def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/balanced_vae_study_v1/metric_configs/\"))", "def global_resources_files(config):\n # type: (dict) -> list\n try:\n files = config['global_resources']['files']\n if util.is_none_or_empty(files):\n raise KeyError()\n except KeyError:\n files = []\n return files", "def configFiles(component, required=True):\n # Get the config dir\n etc = getConfigDir()\n\n conf = []\n\n # First the default conf\n cfg = os.path.join(etc, \"{0}.conf\".format(component))\n # Is it readable?\n if os.access(cfg, os.R_OK):\n # Yes, we add it to config list\n conf.append(cfg)\n elif required:\n # Nope, and it's required so we raise an error\n raise ConfigError(\"Required config file for component '{0}' not \"\n \"found at {1}\".format(component, cfg))\n else:\n # The default component config was not found, so we do not even look for\n # a site local config.\n return conf\n\n # Check for a site local confi\n cfg = os.path.join(etc, \"{0}.site.conf\".format(component))\n # Is it readable?\n if os.access(cfg, os.R_OK):\n # Yes, we add it to config list\n conf.append(cfg)\n\n return conf", "def locations(self, exists=True):\n result = []\n for config_files in self.config_paths:\n if not config_files:\n continue\n if os.path.isdir(config_files):\n config_files = [os.path.join(config_files, i)\n for i in sorted(os.listdir(config_files))\n if i.endswith('.conf')]\n else:\n config_files = [config_files]\n for config_file in config_files:\n if not exists or os.path.exists(config_file):\n config_file = os.path.abspath(config_file)\n if config_file in result:\n result.remove(config_file)\n result.append(config_file)\n return result", "def in_dir(config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json', '.ini']):\n configs = []\n\n for filename in os.listdir(config_dir):\n if is_config_file(filename, extensions) and not filename.startswith('.'):\n configs.append(filename)\n\n return configs", "def find_default_config_files() -> Iterator[Path]:\n yield from _yield_default_files()\n\n try:\n yield from _find_project_config()\n except OSError:\n pass\n\n try:\n yield from _find_config_in_home_or_environment()\n except OSError:\n pass\n\n try:\n if os.path.isfile(\"/etc/pylintrc\"):\n yield Path(\"/etc/pylintrc\").resolve()\n except OSError:\n pass", "def get_configs() -> list:\n configs = sh.docker('config', 'ls', '--format', '{{ .Name }}')\n\n return configs.stdout.decode('utf8').splitlines()", "def _read_config_files(self, *, base: str = '') -> None:\n self.__used_config_files = frozenset(self.read(os.path.join(base, f) for f in reversed(self.CONFIG_FILES)))", "def get_conf_files_plus(config_file):\n l = get_conf_files()\n l.append(config_file)\n return l", "def read_config_file():\n file_found = 0\n filename = URLNET_CFG\n search_path=os.environ['PATH']\n paths = ['.',]\n # allow for the possibility that there is no HOME env variable\n home = None\n try:\n home = os.environ['HOME']\n except Exception, e:\n pass\n # \n if home != None and len(home) > 0:\n paths.append(home)\n paths = paths + split(search_path, pathsep)\n \n for path in paths:\n if exists(join(path, filename)):\n file_found = 1\n break\n if file_found:\n path = abspath(join(path, filename))\n try:\n fd = open(path)\n lines = fd.readlines()\n fd.close()\n return lines\n except Exception, e:\n return None\n else:\n return None", "def get_project_list(config):\r\n eggs_dir = config.get('eggs_dir', 'eggs')\r\n if os.path.exists(eggs_dir):\r\n projects = os.listdir(eggs_dir)\r\n else:\r\n projects = []\r\n try:\r\n projects += [x[0] for x in config.cp.items('settings')]\r\n except NoSectionError:\r\n pass\r\n return projects", "def apps() -> List[str]:\n with Configuration() as config:\n return config.get_apps()", "def get_eval_config_files(self):\n return list(resources.get_files_in_folder(\"config/unsupervised_study_v1/metric_configs/\"))", "def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/correlated_factors_study_ws_id2/metric_configs/\"))", "def _get_config_dirs():\n config_dirs = [\n USER_CONFIG_DIR,\n os.path.join(\"/\", \"etc\", \"rapport\"),\n os.path.abspath(os.path.join(\"rapport\", \"config\"))\n ]\n return config_dirs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an option by its dest. If you're supplying a dest which is shared by several options, it is undefined which option of those is returned. A KeyError is raised if there is no option with the supplied dest.
def get_option_by_dest(self, dest): for group in self.option_groups + [self]: for option in group.option_list: if option.dest == dest: return option raise KeyError('No option with dest == %r.' % dest)
[ "def get(self, opt, index=0):\n\t\ti = 0\n\t\tfor n, d in self.options:\n\t\t\tif n == opt:\n\t\t\t\tif i == index:\n\t\t\t\t\treturn d\n\t\t\t\ti += 1\n\t\treturn None", "def get_option(cfg, base, opt):\n if cfg.has_option(base, opt):\n return cfg.get(base, opt)\n else:\n return None", "def get_option(self, **attrs) -> Optional[Option]:\n return utils.get(self._options, **attrs)", "def get_option_by_varname(self, varname):\n option = self.options.get(varname, None)\n if option is None:\n raise KeyError(\n \"No option with the variable name '{}' could \"\n \"be found\".format(varname)\n )\n return option", "def get_option(self, opt_str):\n for opt in self._options.values():\n if opt_str in ('-' + opt.short, '--' + opt.long):\n return opt, False\n if opt_str == '--' + opt.inverse:\n return opt, True\n return None, None", "def get(self, *args, **kargs):\n return self.get_option(*args, **kargs)", "def get_plugin_option(self, plugin, key):\n if plugin in self.plugins:\n plugin = self.plugins[plugin]\n return plugin.get_option(key)", "def get_source_for_option(self, section: str, option: str) -> Optional[str]:", "def GetCommandOption(self, option, default = None):\n\n for opt, opt_arg in self.__command_options:\n if opt == option:\n return opt_arg\n return default", "def option(self, name):\n return self._options.get(name)", "def get_unique_option(self, option):\n for opt in self.options:\n if option == opt.__class__:\n return opt", "def get(self, id):\n return self._opts.setdefault(id, [None])[0]", "def getopt(settings, key, strict=False, copy=False):\n from . import defaults\n\n ns = namespace(settings)\n if ns.__contains__(key):\n return ns.__getitem__(key)\n\n # usually fallback to None\n args = [defaults, key, None]\n if strict:\n args.pop()\n default = getattr(*args)\n if copy:\n from copy import deepcopy\n default = deepcopy(default)\n\n return default", "def get(option, fallback=None):\n from vjezd import device as this_device\n\n o = Config.query.filter(\n # only rules valid for this device\n or_(Config.device == this_device.id,\n Config.device == None),\n # AND meeting the option name\n Config.option == option,\n ).order_by(Config.device.desc(), Config.id.desc()).first()\n\n if o:\n logger.debug('Read option from db: {}'.format(o))\n if o.value == None:\n return fallback\n return o.value\n\n return fallback", "def getOption(self,optionName):\n for opt in self.options:\n if opt[0]==optionName:\n return opt[1]\n return ''", "def GetGlobalOption(self, option, default=None):\n\n for opt, opt_arg in self.__global_options:\n if opt == option:\n return opt_arg\n return default", "def get(self,\n section,\n option):\n return self.__parser.get(section=section, option=option)", "def _get_opt(corpus, opt, def_val):\n if \"clean\" in corpus[\"transforms\"]:\n value = corpus.get(opt, def_val)\n clean = value\n else:\n clean = None\n return clean", "def get_option(self, option, keys=None):\n try:\n item = self._table.get_item(\n Key={\n self._store_key: self._store_name,\n self._option_key: option\n }\n )['Item']\n del item[self._store_key]\n del item[self._option_key]\n\n if keys:\n return {\n key: value\n for key, value in item.items()\n if key in keys\n }\n else:\n return {key: value for key, value in item.items()}\n\n except Exception:\n raise", "def __get_config_option(self, o: str) -> Any:\n try:\n return self.config.get('FAKE_SECTION', o)\n except configparser.NoOptionError:\n return self.defaults[o]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform '' to '_' so the cmdline form of option names can be used.
def optionxform(self, optionstr): return optionstr.lower().replace('-', '_')
[ "def to_option(attr):\n return '--%s' % attr.lower().replace('_', '-')", "def __set_opt(option):\n return \"--\" + option", "def option_prefix(self, option):\n return \"--\"", "def attr_to_arg(attr):\n return '--{}'.format(attr.replace('_', '-'))", "def option_strings(self) -> List[str]:\n\n dashes: List[str] = [] # contains the leading dashes.\n options: List[str] = [] # contains the name following the dashes.\n\n dash = \"-\" if len(self.name) == 1 else \"--\"\n option = f\"{self.prefix}{self.name}\"\n\n if self.field.metadata.get(\"positional\"):\n # Can't be positional AND have flags at same time. Also, need dest to be be this and not just option.\n return [self.dest]\n\n dashes.append(dash)\n options.append(option)\n\n if dash == \"-\":\n # also add a double-dash option:\n dashes.append(\"--\")\n options.append(option)\n\n # add all the aliases that were passed to the `field` function.\n for alias in self.aliases:\n if alias.startswith(\"--\"):\n dash = \"--\"\n name = alias[2:]\n elif alias.startswith(\"-\"):\n dash = \"-\"\n name = alias[1:]\n else:\n dash = \"-\" if len(alias) == 1 else \"--\"\n name = alias\n option = f\"{self.prefix}{name}\"\n\n dashes.append(dash)\n options.append(option)\n\n # Additionally, add all name variants with the \"_\" replaced with \"-\".\n # For example, \"--no-cache\" will correctly set the `no_cache` attribute,\n # even if an alias isn't explicitly created.\n\n if FieldWrapper.add_dash_variants:\n additional_options = [\n option.replace(\"_\", \"-\") for option in options if \"_\" in option\n ]\n additional_dashes = [\n \"-\" if len(option) == 1 else \"--\" for option in additional_options\n ]\n options.extend(additional_options)\n dashes.extend(additional_dashes)\n\n if type(self).add_dest_to_option_strings:\n dashes.append(\"-\" if len(self.dest) == 1 else \"--\")\n options.append(self.dest)\n\n # remove duplicates by creating a set.\n option_strings = set(f\"{dash}{option}\" for dash, option in zip(dashes, options))\n # TODO: possibly sort the option strings, if argparse doesn't do it\n # already.\n return list(sorted(option_strings, key=len))", "def fix_sys_argv_quotes(self, cmd):\n # handle fixing quotes\n # case 1: \"--val\", \" -nlev 276 \"\n # case 2: \"-val\" , \" -nlev 276 \"\n # case 3: CAM_CONFIG_OPTS=\" -nlev 276 \"\n for i, item in enumerate(cmd):\n if re.match(\"[-]{1,2}val\", item) is not None:\n if i + 1 >= len(cmd):\n continue\n\n # only quote if value contains spaces\n if \" \" in cmd[i + 1]:\n cmd[i + 1] = f'\"{cmd[i + 1]}\"'\n else:\n m = re.search(\"([^=]*)=(.*)\", item)\n\n if m is None:\n continue\n\n g = m.groups()\n\n # only quote if value contains spaces\n if \" \" in g[1]:\n cmd[i] = f'{g[0]}=\"{g[1]}\"'\n\n return cmd", "def sanitize_options(options):\n sanitized_options = []\n for option in options:\n if isinstance(option, str):\n option = os.path.basename(option)\n sanitized_options.append(option)\n return sanitized_options", "def build_cli_extra(optargs):\n\n def render(k, v):\n if not isinstance(k, str):\n raise TypeError(\n \"Option name isn't a string: {} ({})\".format(k, type(k)))\n if v is None:\n return k\n if is_collection_like(v):\n v = \" \".join(map(str, v))\n return \"{} {}\".format(k, v)\n\n try:\n data_iter = optargs.items()\n except AttributeError:\n data_iter = optargs\n\n return \" \".join(render(*kv) for kv in data_iter)", "def convert_name(value: str) -> str:\n return \"--\" + value.replace(\"_\", \"-\")", "def option_maker(self):\n pass", "def get_attr_name (self, long_option):\r\n return string.translate(long_option, longopt_xlate)", "def _createMenuPathName(self, name):\n # hide anything between brackets\n name = re.sub(\"\\(.*\\)\", \"\", name)\n # replace invalid chars\n name = name.replace(\" \", \"_\")\n if name and name[0] in \"0123456789_\":\n name = \"_\" + name\n name = re.sub(\"[^a-zA-z_0-9]\", \"\", name)\n return name.lower()", "def format_options_name(operation):\n operation = operation.split('#')[-1]\n op_class, op_function = operation.split('.')\n op_class = operations_name(op_class)\n return f\"{op_class}_{op_function}_options\"", "def _command_name_normalized(cls, command: str) -> str:\n return command.lower().replace(\"-\", \"_\")", "def quote_names(self):\n return \"\"\"--quote-names\"\"\"", "def InternalArgNameFrom(arg_external_name):\n return arg_external_name.replace('-', '_')", "def ExternalArgNameFrom(arg_internal_name):\n return arg_internal_name.replace('_', '-')", "def optSetStrNr(*args):\n return _optcc.optSetStrNr(*args)", "def option_group_name(self) -> str:\n ...", "def skip_opt(self):\n return \"\"\"--skip-opt\"\"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test, whether the encoding of `stream` matches `encoding`. Returns
def check_encoding(stream, encoding): try: return codecs.lookup(stream.encoding) == codecs.lookup(encoding) except (LookupError, AttributeError, TypeError): return None
[ "def test_encoding_detection():\n \n url = 'http://lavr.github.io/python-emails/tests/requests/some-utf8-text.html'\n expected_content = u'我需要单间。' # Chinese is for example only. Any other non-european encodings broken too.\n\n r =\trequests.get(url)\n\n # Response.apparent_encoding is good\n assert r.apparent_encoding == 'utf-8'\n real_text = unicode(r.content, r.apparent_encoding)\n assert expected_content in real_text\n\n # but Response.text is broken\n # (the reason is: commit a0ae2e6)\n assert expected_content in r.text", "def detect_encoding(readline):\n try:\n filename = readline.__self__.name\n except AttributeError:\n filename = None\n bom_found = False\n encoding = None\n default = 'ascii'\n\n def read_or_stop():\n try:\n return readline()\n except StopIteration:\n return b''\n\n def find_cookie(line):\n try:\n # Decode as ASCII, which is Python 2 default\n line_string = line.decode('ascii')\n except UnicodeDecodeError:\n msg = \"invalid or missing encoding declaration\"\n if filename is not None:\n msg = '{} for {!r}'.format(msg, filename)\n raise SyntaxError(msg)\n\n match = cookie_re.match(line_string)\n if not match:\n return None\n encoding = _get_normal_name(match.group(1))\n try:\n lookup(encoding)\n except LookupError:\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = \"unknown encoding: \" + encoding\n else:\n msg = \"unknown encoding for {!r}: {}\".format(filename,\n encoding)\n raise SyntaxError(msg)\n\n if bom_found:\n if encoding != 'utf-8':\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = 'encoding problem: utf-8'\n else:\n msg = 'encoding problem for {!r}: utf-8'\n msg = msg.format(filename)\n raise SyntaxError(msg)\n encoding += '-sig'\n return encoding\n\n first = read_or_stop()\n if first.startswith(BOM_UTF8):\n bom_found = True\n first = first[3:]\n default = 'utf-8-sig'\n if not first:\n return default, []\n\n encoding = find_cookie(first)\n if encoding:\n return encoding, [first]\n if not blank_re.match(first):\n return default, [first]\n\n second = read_or_stop()\n if not second:\n return default, [first]\n\n encoding = find_cookie(second)\n if encoding:\n return encoding, [first, second]\n\n return default, [first, second]", "def detect_encoding(file_path: str) -> str:\n detector = UniversalDetector()\n\n with open(file_path, 'rb') as file:\n for line in file.readlines():\n detector.feed(line)\n if detector.done:\n break\n\n detector.close()\n\n encoding = detector.result['encoding']\n logger.debug(f'Detected encoding for file \"{file_path}\": {encoding}')\n\n return encoding", "def is_known_charset(charset):\n try:\n codecs.lookup(charset)\n except LookupError:\n return False\n return True", "def sniff_encoding(fh):\n sniff = sniff_file(fh)\n\n # WoS files typically include a BOM, which we want to strip from the actual\n # data. The encodings 'utf-8-sig' and 'utf-16' do this for UTF-8 and UTF-16\n # respectively. When dealing with files with BOM, avoid the encodings\n # 'utf-8' (which is fine for non-BOM UTF-8), 'utf-16-le', and 'utf-16-be'.\n # See e.g. http://stackoverflow.com/a/8827604\n encodings = {codecs.BOM_UTF16: 'utf-16',\n codecs.BOM_UTF8: 'utf-8-sig'}\n for bom, encoding in encodings.items():\n if sniff.startswith(bom):\n return encoding\n # WoS export files are either UTF-8 or UTF-16\n return 'utf-8'", "def detect_file_encoding(self):\n\t\twith open(self.wq, 'r') as filehandle: # read in the file data\n\t\t\tfile_data = filehandle.read()\n\t\t\tself.detected_encoding = chardet.detect(file_data)['encoding']\n\n\t\tif self.detected_encoding == \"UTF-16\":\n\t\t\tself.detected_encoding = \"utf_16_le\" # we'll use this encoding in this case - if it detects UTF-16 off the YSI\n\t\t\t\t\t\t\t\t\t\t\t\t\t# then it's probably UCS-2 LE BOM, AKA UTF-16 LE BOM (sort of)", "def detect_encoding(readline):\r\n bom_found = False\r\n encoding = None\r\n default = 'utf-8'\r\n def read_or_stop():\r\n try:\r\n return readline()\r\n except StopIteration:\r\n return bytes()\r\n\r\n def find_cookie(line):\r\n try:\r\n line_string = line.decode('ascii')\r\n except UnicodeDecodeError:\r\n return None\r\n\r\n matches = cookie_re.findall(line_string)\r\n if not matches:\r\n return None\r\n encoding = _get_normal_name(matches[0])\r\n try:\r\n codec = lookup(encoding)\r\n except LookupError:\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError(\"unknown encoding: \" + encoding)\r\n\r\n if bom_found:\r\n if codec.name != 'utf-8':\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError('encoding problem: utf-8')\r\n encoding += '-sig'\r\n return encoding\r\n\r\n first = read_or_stop()\r\n if first.startswith(BOM_UTF8):\r\n bom_found = True\r\n first = first[3:]\r\n default = 'utf-8-sig'\r\n if not first:\r\n return default, []\r\n\r\n encoding = find_cookie(first)\r\n if encoding:\r\n return encoding, [first]\r\n\r\n second = read_or_stop()\r\n if not second:\r\n return default, [first]\r\n\r\n encoding = find_cookie(second)\r\n if encoding:\r\n return encoding, [first, second]\r\n\r\n return default, [first, second]", "def test_encoding_win(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'bad_codec.csv'))\n self.assertEqual(guessed_encoding.lower(), u'windows-1252')", "def hasEncoding(dbname = None):\n\n # use default db if none passed\n if dbname is None:\n dbname = _defaultDB\n\n # get the db definition\n if dbname not in _databases:\n raise DBRuntimeException(\"Unknown database [%s]\" % dbname)\n dbDef = _databases[dbname]\n\n # get the loosetypes attribute (default to False)\n return dbDef[\"def\"].get(\"encoding\")", "def check(video):\n\n\tif video.codec != TARGET_CODEC:\n\t\treturn True\n\n\tif video.metadata.get('encoder', None) in TODO_ENCODERS:\n\t\treturn True\n\n\treturn False", "def decode(self):\n ce = self.headers.get_first(\"content-encoding\")\n if not self.body or ce not in encoding.ENCODINGS:\n return False\n data = encoding.decode(ce, self.body)\n if data is None:\n return False\n self.body = data\n del self.headers[\"content-encoding\"]\n return True", "def is_valid_utf8(o):\n try:\n o.decode(\"utf-8\")\n except (UnicodeDecodeError, AttributeError):\n return False\n else:\n return True", "def get_file_encoding(content):\r\n encoding = None\r\n try:\r\n lines_to_check = content.split(\"\\n\", 2)\r\n for index in range(2):\r\n if len(lines_to_check) > index:\r\n line_encoding = _search_coding_line(lines_to_check[index])\r\n if line_encoding:\r\n encoding = line_encoding\r\n break\r\n except UnicodeDecodeError as error:\r\n #add logger\r\n print(error)\r\n #if not encoding is set then use UTF-8 as default\r\n if encoding is None:\r\n encoding = \"UTF-8\"\r\n return encoding", "def testInvalidContentEncoding(self):\n r = Request.blank(\"/\").get_response(filters.decode_filter(invalid_content_encoding_server))\n self.assert_(\"lying about it's encoding\" in r.text, r.body)", "def is_exact_taint(stream) -> bool:\n # The fuzzer has to get 8 characters right. This may be a bit much,\n # however, when found it shows a high level of control over the data.\n if stream == 'FROMFUZZ':\n return True\n\n return False", "def test_encoding_amazon_de_reviews_is_utf8(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon_de_reviews_200.csv'))\n self.assertEqual(guessed_encoding.lower(), u'utf-8')", "def check_file_encodings(self, file: str = None, encoding: str = None, cycle_bit: int = 0) -> str: \n if not file:\n file = self.filename\n if not encoding:\n encoding = self.encoding \n \n if cycle_bit > 0:\n common_encodings = [\"UTF-8\",\"Latin-1\", \"UTF-16\", \"ascii\", \"cp037\", \"cp437\", \"UTF-32\"]\n return common_encodings[cycle_bit]\n \n else:\n try:\n with open(file, encoding = encoding) as f:\n f.seek(10000,0)\n f.readline()\n f.close()\n return encoding\n except:\n common_encodings = [\"Latin-1\", \"UTF-16\", \"ascii\", \"cp037\", \"cp437\", \"UTF-32\"]\n \n for codec in common_encodings:\n try:\n with open(file, encoding = codec) as f:\n f.readline()\n f.close()\n return codec\n except:\n continue\n print(\"Your file is an unusual type - can you specify the encoding for us?\")", "def verifyContentType(self, desiredType, desiredCharset=\"\"):\n content_type = self.headers.get('Content-Type')\n \n if content_type == None:\n return False\n \n parts = content_type.split(\";\")\n mediatype = parts[0]\n charset = parts[1] if len(parts) > 1 else \"\"\n \n isDesiredMediaType = True if mediatype.lower() == desiredType.lower() else False\n\n if len(desiredCharset):\n isDesiredCharset = True if desiredCharset.lower() in charset.lower() else False\n else:\n isDesiredCharset = True\n \n return isDesiredMediaType and isDesiredCharset", "def charset_exists(charset):\r\n import codecs\r\n try:\r\n codecs.lookup(charset)\r\n except LookupError:\r\n return False\r\n return True", "def test_encoding_ascii(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon-sample-1000.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decode a string, `data`, heuristically. Raise UnicodeError if unsuccessful. The client application should call ``locale.setlocale`` at the
def decode(self, data): if self.encoding and self.encoding.lower() == 'unicode': assert isinstance(data, str), ( 'input encoding is "unicode" ' 'but input is not a unicode object') if isinstance(data, str): # Accept unicode even if self.encoding != 'unicode'. return data if self.encoding: # We believe the user/application when the encoding is # explicitly given. encodings = [self.encoding] else: data_encoding = self.determine_encoding_from_data(data) if data_encoding: # If the data declares its encoding (explicitly or via a BOM), # we believe it. encodings = [data_encoding] else: # Apply heuristics only if no encoding is explicitly given and # no BOM found. Start with UTF-8, because that only matches # data that *IS* UTF-8: encodings = ['utf-8', 'latin-1'] if locale_encoding: encodings.insert(1, locale_encoding) for enc in encodings: try: decoded = str(data, enc, self.error_handler) self.successful_encoding = enc # Return decoded, removing BOMs. return decoded.replace('\ufeff', '') except (UnicodeError, LookupError) as err: error = err # in Python 3, the <exception instance> is # local to the except clause raise UnicodeError( 'Unable to decode input data. Tried the following encodings: ' '%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]), ErrorString(error)))
[ "def decode(self, data):\r\n if self.encoding and self.encoding.lower() == 'unicode':\r\n assert isinstance(data, unicode), (\r\n 'input encoding is \"unicode\" '\r\n 'but input is not a unicode object')\r\n if isinstance(data, unicode):\r\n # Accept unicode even if self.encoding != 'unicode'.\r\n return data\r\n if self.encoding:\r\n # We believe the user/application when the encoding is\r\n # explicitly given.\r\n encodings = [self.encoding]\r\n else:\r\n data_encoding = self.determine_encoding_from_data(data)\r\n if data_encoding:\r\n # If the data declares its encoding (explicitly or via a BOM),\r\n # we believe it.\r\n encodings = [data_encoding]\r\n else:\r\n # Apply heuristics only if no encoding is explicitly given and\r\n # no BOM found. Start with UTF-8, because that only matches\r\n # data that *IS* UTF-8:\r\n encodings = ['utf-8', 'latin-1']\r\n if locale_encoding:\r\n encodings.insert(1, locale_encoding)\r\n for enc in encodings:\r\n try:\r\n decoded = unicode(data, enc, self.error_handler)\r\n self.successful_encoding = enc\r\n # Return decoded, removing BOMs.\r\n return decoded.replace(u'\\ufeff', u'')\r\n except (UnicodeError, LookupError), err:\r\n error = err # in Python 3, the <exception instance> is\r\n # local to the except clause\r\n raise UnicodeError(\r\n 'Unable to decode input data. Tried the following encodings: '\r\n '%s.\\n(%s)' % (', '.join([repr(enc) for enc in encodings]),\r\n ErrorString(error)))", "def smart_decode(data, charset):\n try:\n if isinstance(data, str):\n # It's already unicode so just return it\n return data\n else:\n return data.decode(charset, errors='strict')\n\n except UnicodeDecodeError: # PY3\n # Looks like the charset lies, try to detect it\n return guess_encoding_and_decode(data, claimed=charset)\n\n except LookupError:\n # They gave us a crap encoding\n return guess_encoding_and_decode(data, claimed=charset)", "def decoder(data):\n\n def next_byte(_it, start, count):\n try:\n return next(_it)[1]\n except StopIteration:\n raise UnicodeDecodeError(\n NAME, data, start, start + count, \"incomplete byte sequence\"\n )\n\n it = iter(enumerate(data))\n for i, d in it:\n if d == 0x00: # 00000000\n raise UnicodeDecodeError(\n NAME, data, i, i + 1, \"embedded zero-byte not allowed\"\n )\n\n if d & 0x80: # 1xxxxxxx\n if d & 0x40: # 11xxxxxx\n if d & 0x20: # 111xxxxx\n if d & 0x10: # 1111xxxx\n raise UnicodeDecodeError(\n NAME, data, i, i + 1, \"invalid encoding character\"\n )\n\n if d == 0xED:\n value = 0\n for i1, dm in enumerate(DECODE_MAP[6]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 1110xxxx\n value = d & 0x0F\n for i1, dm in enumerate(DECODE_MAP[3]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 110xxxxx\n value = d & 0x1F\n for i1, dm in enumerate(DECODE_MAP[2]):\n d1 = next_byte(it, i, i1 + 1)\n value = dm.apply(d1, value, data, i, i1 + 1)\n else: # 10xxxxxx\n raise UnicodeDecodeError(\n NAME, data, i, i + 1, \"misplaced continuation character\"\n )\n else: # 0xxxxxxx\n value = d\n # noinspection PyCompatibility\n yield mutf8_unichr(value)", "def decode(self, data): # pragma: no cover\n encoding = getattr(self, 'encoding', 'ascii')\n return data.decode(encoding, 'ignore')", "def decode(self, s):\n\n if isinstance(s, unicode):\n return s\n for (name, decoder) in self.decoders:\n try:\n return decoder(s)[0]\n except ValueError:\n logger.verbose(\"Encoding '%s' failed for string %r\" % (name, s))\n\n if self.fallback_decoder is not None:\n (name, decoder) = self.fallback_decoder\n return decoder(s, 'replace')[0]\n else:\n raise UnicodeError()", "def decode_utf8(self, text):\n try:\n return text.decode('utf-8', 'strict') if self.utf8 else text.decode(self.fallback, errors='replace')\n except UnicodeDecodeError:\n return text.decode(self.fallback, 'replace')", "def urlDecode(self, data):\n # type: (Union[str, bytearray]) -> Union[str, bytearray]", "def decode(cls, data):\n raise NotImplementedError()", "def test_unicode_decode_errors(self):\n self.assertEqual(decode.decode('Why, %c', b'\\x01', True),\n 'Why, ' + error('%c ERROR', -1))\n\n self.assertEqual(\n decode.decode('%sXY%+ldxy%u', b'\\x83N\\x80!\\x01\\x02', True),\n '{}XY{}xy{}'.format(error('%s ERROR', \"'N\\\\x80!'\"),\n error('%+ld SKIPPED', -1),\n error('%u SKIPPED', 1)))\n\n self.assertEqual(\n decode.decode('%s%lld%9u', b'\\x82$\\x80\\x80', True),\n '{0}{1}{2}'.format(error(\"%s ERROR ('$\\\\x80')\"),\n error('%lld SKIPPED'), error('%9u SKIPPED')))\n\n self.assertEqual(decode.decode('%c', b'\\xff\\xff\\xff\\xff\\x0f', True),\n error('%c ERROR', -2147483648))", "def decode(data):\n obj , data = _decode(data)\n if data and len(data) == 0:\n return obj", "def decode(self, data):\n\t\traise NotImplementedError()", "def decode_string(s):\n try:\n return s.decode('utf-8')\n except (UnicodeDecodeError, AttributeError):\n return s", "def decode(self, text):\n # only decode byte strings into unicode if it hasn't already\n # been done by a subclass\n if isinstance(text, six.text_type):\n return text\n\n # empty text? nothing to decode\n if not text:\n return u''\n\n # use chardet to automatically detect the encoding text\n result = chardet.detect(text)\n return text.decode(result['encoding'])", "def decode(data):\n try:\n return _decode(data)\n except VecBufEOB:\n raise DecoderError('Incomplete encoded data')", "def decode(self, line):\n\n # loc = locale.getdefaultlocale()[1]\n\n try:\n line = line.decode(\"utf-8\")\n except Exception:\n pass\n return line", "def decode_modified_utf8(data, errors=\"strict\"):\n value, length = \"\", 0\n it = iter(decoder(byte_to_int(d) for d in data))\n while True:\n try:\n value += next(it)\n length += 1\n except StopIteration:\n break\n except UnicodeDecodeError as e:\n if errors == \"strict\":\n raise e\n\n if errors == \"ignore\":\n pass\n elif errors == \"replace\":\n value += \"\\uFFFD\"\n length += 1\n return value, length", "def decode_unicode_string(string):\n if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):\n return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])\n return string", "def decode_bytes(data: bytes, default_encoding: str = 'utf-8') -> str:\n encoding = default_encoding\n if HAS_CHARDET:\n detected = chardet.detect(data) or {}\n confidence = detected.get('confidence') or 0\n if confidence >= 0.5:\n encoding = detected['encoding']\n logger.debug(\n \"Data encoding detected as '{}' \"\n \"with a confidence of {}\".format(encoding, confidence))\n\n try:\n return data.decode(encoding)\n except UnicodeDecodeError:\n raise ActivityFailed(\n \"Failed to decode bytes using encoding '{}'\".format(encoding))", "def decode(byte_data):\n if byte_data is None:\n return None\n return byte_data.decode()", "def decode(val):\n if isinstance(val, str):\n # it was an already decoded unicode object\n return val\n else:\n # assume it is an encoded bytes object\n return val.decode('utf-8')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to determine the encoding of `data` by looking in `data`. Check for a byte order mark (BOM) or an encoding declaration.
def determine_encoding_from_data(self, data): # check for a byte order mark: for start_bytes, encoding in self.byte_order_marks: if data.startswith(start_bytes): return encoding # check for an encoding declaration pattern in first 2 lines of file: for line in data.splitlines()[:2]: match = self.coding_slug.search(line) if match: return match.group(1).decode('ascii') return None
[ "def get_data_encoding():", "def strip_byte_order_mark(cls, data):\n encoding = None\n if isinstance(data, str):\n return (\n data, encoding)\n else:\n if len(data) >= 4:\n if data[:2] == b'\\xfe\\xff':\n if data[2:4] != '\\x00\\x00':\n encoding = 'utf-16be'\n data = data[2:]\n if len(data) >= 4:\n if data[:2] == b'\\xff\\xfe':\n if data[2:4] != '\\x00\\x00':\n encoding = 'utf-16le'\n data = data[2:]\n if data[:3] == b'\\xef\\xbb\\xbf':\n encoding = 'utf-8'\n data = data[3:]\n else:\n if data[:4] == b'\\x00\\x00\\xfe\\xff':\n encoding = 'utf-32be'\n data = data[4:]\n else:\n if data[:4] == b'\\xff\\xfe\\x00\\x00':\n encoding = 'utf-32le'\n data = data[4:]\n return (\n data, encoding)", "def AutoDetectEncoding(self, srcFile):\n srcFile.seek(0)\n magic = srcFile.read(4)\n while len(magic) < 4:\n magic = magic + 'Q'\n if magic[:2] == '\\xff\\xfe' or magic[:2] == '\\xfe\\xff':\n if magic[2:] != '\\x00\\x00':\n magic = magic[:2]\n elif magic[:3] == '\\xef\\xbb\\xbf':\n magic = mage[:3]\n self.encoding, seekPos, self.bom = self.MagicTable.get(\n magic, ('utf-8', 0, False))\n srcFile.seek(seekPos)", "def detect_file_encoding(self):\n\t\twith open(self.wq, 'r') as filehandle: # read in the file data\n\t\t\tfile_data = filehandle.read()\n\t\t\tself.detected_encoding = chardet.detect(file_data)['encoding']\n\n\t\tif self.detected_encoding == \"UTF-16\":\n\t\t\tself.detected_encoding = \"utf_16_le\" # we'll use this encoding in this case - if it detects UTF-16 off the YSI\n\t\t\t\t\t\t\t\t\t\t\t\t\t# then it's probably UCS-2 LE BOM, AKA UTF-16 LE BOM (sort of)", "def detect_encoding(readline):\n try:\n filename = readline.__self__.name\n except AttributeError:\n filename = None\n bom_found = False\n encoding = None\n default = 'ascii'\n\n def read_or_stop():\n try:\n return readline()\n except StopIteration:\n return b''\n\n def find_cookie(line):\n try:\n # Decode as ASCII, which is Python 2 default\n line_string = line.decode('ascii')\n except UnicodeDecodeError:\n msg = \"invalid or missing encoding declaration\"\n if filename is not None:\n msg = '{} for {!r}'.format(msg, filename)\n raise SyntaxError(msg)\n\n match = cookie_re.match(line_string)\n if not match:\n return None\n encoding = _get_normal_name(match.group(1))\n try:\n lookup(encoding)\n except LookupError:\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = \"unknown encoding: \" + encoding\n else:\n msg = \"unknown encoding for {!r}: {}\".format(filename,\n encoding)\n raise SyntaxError(msg)\n\n if bom_found:\n if encoding != 'utf-8':\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = 'encoding problem: utf-8'\n else:\n msg = 'encoding problem for {!r}: utf-8'\n msg = msg.format(filename)\n raise SyntaxError(msg)\n encoding += '-sig'\n return encoding\n\n first = read_or_stop()\n if first.startswith(BOM_UTF8):\n bom_found = True\n first = first[3:]\n default = 'utf-8-sig'\n if not first:\n return default, []\n\n encoding = find_cookie(first)\n if encoding:\n return encoding, [first]\n if not blank_re.match(first):\n return default, [first]\n\n second = read_or_stop()\n if not second:\n return default, [first]\n\n encoding = find_cookie(second)\n if encoding:\n return encoding, [first, second]\n\n return default, [first, second]", "def get_file_encoding(content):\r\n encoding = None\r\n try:\r\n lines_to_check = content.split(\"\\n\", 2)\r\n for index in range(2):\r\n if len(lines_to_check) > index:\r\n line_encoding = _search_coding_line(lines_to_check[index])\r\n if line_encoding:\r\n encoding = line_encoding\r\n break\r\n except UnicodeDecodeError as error:\r\n #add logger\r\n print(error)\r\n #if not encoding is set then use UTF-8 as default\r\n if encoding is None:\r\n encoding = \"UTF-8\"\r\n return encoding", "def test_encoding_win(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'bad_codec.csv'))\n self.assertEqual(guessed_encoding.lower(), u'windows-1252')", "def test_encoding_amazon_de_reviews_is_utf8(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon_de_reviews_200.csv'))\n self.assertEqual(guessed_encoding.lower(), u'utf-8')", "def test_encoding_empty(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'empty_file.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')", "def decode(self, data):\r\n if self.encoding and self.encoding.lower() == 'unicode':\r\n assert isinstance(data, unicode), (\r\n 'input encoding is \"unicode\" '\r\n 'but input is not a unicode object')\r\n if isinstance(data, unicode):\r\n # Accept unicode even if self.encoding != 'unicode'.\r\n return data\r\n if self.encoding:\r\n # We believe the user/application when the encoding is\r\n # explicitly given.\r\n encodings = [self.encoding]\r\n else:\r\n data_encoding = self.determine_encoding_from_data(data)\r\n if data_encoding:\r\n # If the data declares its encoding (explicitly or via a BOM),\r\n # we believe it.\r\n encodings = [data_encoding]\r\n else:\r\n # Apply heuristics only if no encoding is explicitly given and\r\n # no BOM found. Start with UTF-8, because that only matches\r\n # data that *IS* UTF-8:\r\n encodings = ['utf-8', 'latin-1']\r\n if locale_encoding:\r\n encodings.insert(1, locale_encoding)\r\n for enc in encodings:\r\n try:\r\n decoded = unicode(data, enc, self.error_handler)\r\n self.successful_encoding = enc\r\n # Return decoded, removing BOMs.\r\n return decoded.replace(u'\\ufeff', u'')\r\n except (UnicodeError, LookupError), err:\r\n error = err # in Python 3, the <exception instance> is\r\n # local to the except clause\r\n raise UnicodeError(\r\n 'Unable to decode input data. Tried the following encodings: '\r\n '%s.\\n(%s)' % (', '.join([repr(enc) for enc in encodings]),\r\n ErrorString(error)))", "def detect_encoding(readline):\r\n bom_found = False\r\n encoding = None\r\n default = 'utf-8'\r\n def read_or_stop():\r\n try:\r\n return readline()\r\n except StopIteration:\r\n return bytes()\r\n\r\n def find_cookie(line):\r\n try:\r\n line_string = line.decode('ascii')\r\n except UnicodeDecodeError:\r\n return None\r\n\r\n matches = cookie_re.findall(line_string)\r\n if not matches:\r\n return None\r\n encoding = _get_normal_name(matches[0])\r\n try:\r\n codec = lookup(encoding)\r\n except LookupError:\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError(\"unknown encoding: \" + encoding)\r\n\r\n if bom_found:\r\n if codec.name != 'utf-8':\r\n # This behaviour mimics the Python interpreter\r\n raise SyntaxError('encoding problem: utf-8')\r\n encoding += '-sig'\r\n return encoding\r\n\r\n first = read_or_stop()\r\n if first.startswith(BOM_UTF8):\r\n bom_found = True\r\n first = first[3:]\r\n default = 'utf-8-sig'\r\n if not first:\r\n return default, []\r\n\r\n encoding = find_cookie(first)\r\n if encoding:\r\n return encoding, [first]\r\n\r\n second = read_or_stop()\r\n if not second:\r\n return default, [first]\r\n\r\n encoding = find_cookie(second)\r\n if encoding:\r\n return encoding, [first, second]\r\n\r\n return default, [first, second]", "def _validate_charset(data, charset):\n if len(charset) > 1:\n charset_data_length = 0\n for symbol_charset in charset:\n if symbol_charset not in ('A', 'B', 'C'):\n raise Code128.CharsetError\n charset_data_length += 2 if symbol_charset is 'C' else 1\n if charset_data_length != len(data):\n raise Code128.CharsetLengthError\n elif len(charset) == 1:\n if charset not in ('A', 'B', 'C'):\n raise Code128.CharsetError\n elif charset is not None:\n raise Code128.CharsetError", "def validUTF8(data):\n if data is None or len(data) == 0:\n return True\n numOfFiller = 0\n for byte in data:\n if numOfFiller > 0:\n tmp = verify_byte(byte, 5)\n numOfFiller = numOfFiller - 1\n if not tmp:\n return False\n else:\n if verify_byte(byte, 1):\n numOfFiller = 0\n elif verify_byte(byte, 2):\n numOfFiller = 1\n elif verify_byte(byte, 3):\n numOfFiller = 2\n elif verify_byte(byte, 4):\n numOfFiller = 3\n else:\n return False\n if numOfFiller > 0:\n return False\n return True", "def get_encoding(byte_string):\n return detect(byte_string)['encoding']", "def test_encoding_ascii(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon-sample-1000.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')", "def validUTF8(data):\n # Use maks, to clean byte of anything beyond 8 least significant bits.\n cleanByte = [rawByte & 0b11111111 for rawByte in data]\n\n # Cast to byte type.\n byte = bytes(cleanByte)\n\n # Attempt to decode byte data.\n try:\n byte.decode()\n except UnicodeDecodeError:\n # If decoding fails, return False.\n return False\n\n return True", "def sniff_encoding(fh):\n sniff = sniff_file(fh)\n\n # WoS files typically include a BOM, which we want to strip from the actual\n # data. The encodings 'utf-8-sig' and 'utf-16' do this for UTF-8 and UTF-16\n # respectively. When dealing with files with BOM, avoid the encodings\n # 'utf-8' (which is fine for non-BOM UTF-8), 'utf-16-le', and 'utf-16-be'.\n # See e.g. http://stackoverflow.com/a/8827604\n encodings = {codecs.BOM_UTF16: 'utf-16',\n codecs.BOM_UTF8: 'utf-8-sig'}\n for bom, encoding in encodings.items():\n if sniff.startswith(bom):\n return encoding\n # WoS export files are either UTF-8 or UTF-16\n return 'utf-8'", "def smart_decode(data, charset):\n try:\n if isinstance(data, str):\n # It's already unicode so just return it\n return data\n else:\n return data.decode(charset, errors='strict')\n\n except UnicodeDecodeError: # PY3\n # Looks like the charset lies, try to detect it\n return guess_encoding_and_decode(data, claimed=charset)\n\n except LookupError:\n # They gave us a crap encoding\n return guess_encoding_and_decode(data, claimed=charset)", "def guess_file_encoding(filename, default):\n try:\n f = open(filename, \"rb\")\n the_text = f.read()\n f.close()\n except Exception as details:\n warn(\"Error while trying to guess the encoding of file %s: %s\" \\\n % (filename, details))\n return default\n\n bomdict = { codecs.BOM_UTF8 : 'UTF8',\n codecs.BOM_UTF16_BE : 'UTF-16BE',\n codecs.BOM_UTF16_LE : 'UTF-16LE' }\n\n # check if there is Unicode signature\n for bom, encoding in bomdict.items():\n if the_text.startswith(bom):\n the_text = the_text[len(bom):]\n break\n else:\n bom = None\n encoding = None\n\n if encoding is None: # there was no BOM\n try:\n unicode_text, encoding = guess_encoding(the_text)\n except UnicodeError:\n warn(\"Can't work out the encoding of file '%s'.\" % filename)\n warn(\"Assuming the default encoding: %s\" % default)\n return default\n warn(\"Guessed encoding for file '%s': %s\" % (filename, encoding))\n return encoding", "def guess_encoding(text: bytes, default: Encoding=DEFAULT_ENCODING) -> Encoding:\n result = chardet.detect(text)\n return normalize_result(result, default=default)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode `data`, write it to a single file, and return it. With Python 3 or binary output mode, `data` is returned unchanged, except when specified encoding and output encoding differ.
def write(self, data): if not self.opened: self.open() if ('b' not in self.mode and sys.version_info < (3,0) or check_encoding(self.destination, self.encoding) is False ): if sys.version_info >= (3,0) and os.linesep != '\n': data = data.replace('\n', os.linesep) # fix endings data = self.encode(data) try: # In Python < 2.5, try...except has to be nested in try...finally. try: self.destination.write(data) except TypeError as e: if sys.version_info >= (3,0) and isinstance(data, bytes): try: self.destination.buffer.write(data) except AttributeError: if check_encoding(self.destination, self.encoding) is False: raise ValueError('Encoding of %s (%s) differs \n' ' from specified encoding (%s)' % (self.destination_path or 'destination', self.destination.encoding, self.encoding)) else: raise e except (UnicodeError, LookupError) as err: raise UnicodeError( 'Unable to encode output data. output-encoding is: ' '%s.\n(%s)' % (self.encoding, ErrorString(err))) finally: if self.autoclose: self.close() return data
[ "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError, e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError), err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data", "def encode(input, output, encoding):\n if encoding == 'base64':\n import base64\n return base64.encode(input, output)\n if encoding == 'quoted-printable':\n import quopri\n return quopri.encode(input, output, 0)\n if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):\n import uu\n return uu.encode(input, output)\n if encoding in ('7bit', '8bit'):\n return output.write(input.read())\n if encoding in encodetab:\n pipethrough(input, encodetab[encoding], output)\n else:\n raise ValueError, \\\n 'unknown Content-Transfer-Encoding: %s' % encoding", "def write_to_file(self, data):", "def write_binary_file(output_path, data):\n with open(output_path, \"wb\") as f:\n f.write(data)", "def compress(data, compression_level):\n buffer = cStringIO.StringIO()\n gz_file = GzipFile(None, 'wb', compression_level, buffer)\n if isinstance(data, unicode):\n data = data.encode(response.charset)\n gz_file.write(data)\n gz_file.close()\n return buffer.getvalue()", "def encode(self, data):\n\t\traise NotImplementedError()", "def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))", "def data_encode(data, encoding=DEFAULT_ENCODING):\r\n # http://stackoverflow.com/questions/1254454/fastest-way-to-convert-a-dicts-keys-values-from-unicode-to-str\r\n if isinstance(data, compat.unicode_type):\r\n return data.encode(encoding)\r\n elif isinstance(data, dict):\r\n return dict(map(data_encode, compat.iteritems(data)))\r\n elif isinstance(data, list) or isinstance(data, tuple):\r\n return list(map(data_encode, data))\r\n else:\r\n return data", "def write_to_output_file(output_dir, filename, data):\n\n if not output_dir or not prepare_output_dir(output_dir):\n return\n filename = os.path.join(output_dir, filename)\n try:\n with open(filename, 'w') as outfile:\n if isinstance(data, string_types):\n outfile.write(data)\n else:\n json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)\n # pylint: disable=broad-except; do not want serialization/write to break for any reason\n except Exception as exc:\n display.warning(\"Could not write output file {}: {}\".format(filename, exc))", "def write_file(self, data) -> None:\n pass", "def write(self, data):\r\n if self.stream is False:\r\n return\r\n if isinstance(data, Exception):\r\n data = unicode(SafeString(data, self.encoding,\r\n self.encoding_errors, self.decoding_errors))\r\n try:\r\n self.stream.write(data)\r\n except UnicodeEncodeError:\r\n self.stream.write(data.encode(self.encoding, self.encoding_errors))\r\n except TypeError: # in Python 3, stderr expects unicode\r\n if self.stream in (sys.stderr, sys.stdout):\r\n self.stream.buffer.write(data) # write bytes to raw stream\r\n else:\r\n self.stream.write(unicode(data, self.encoding,\r\n self.decoding_errors))", "def write_bytes(self, data):\n # type-check for the buffer interface before truncating the file\n view = memoryview(data)\n with self.open(mode='wb') as f:\n return f.write(view)", "def zip_compress(data):\n out = io.BytesIO()\n with zipfile.ZipFile(file=out, mode=\"w\") as z:\n with z.open(\"myfile\", \"w\") as zf:\n zf.write(data)\n out.seek(0)\n return out.read()", "def write_raw_file(self, data: bytes) -> None:\n pass", "def save_data(self, data):\n file = self.get_file()\n with open(file, \"w\") as f:\n f.write(data)", "def _save_and_compress(self, filename = None, data = None):\n if os.path.exists(filename):\n os.remove(filename)\n \n fileContents = gzip.open(filename, 'wb', compresslevel = 3)\n pickle.dump(data, fileContents, protocol = pickle.HIGHEST_PROTOCOL)\n fileContents.close()", "def write(object_data):\n output = pickle.dumps(object_data)\n return output", "def _dumpPickle(self, data):\r\n \r\n return codecs.encode(cPickle.dumps(data,protocol=cPickle.HIGHEST_PROTOCOL), \"base64\").decode()", "def write_bytes(out_data):\n if sys.version_info[0] >= 3:\n if isinstance(out_data, type(u'')):\n return out_data.encode('utf-8')\n elif isinstance(out_data, type(b'')):\n return out_data\n else:\n if isinstance(out_data, type(u'')):\n return out_data.encode('utf-8')\n elif isinstance(out_data, type(str(''))):\n return out_data\n msg = \"Invalid value for out_data neither unicode nor byte string: {}\".format(out_data)\n raise ValueError(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encode `data`, store it in `self.destination`, and return it.
def write(self, data): self.destination = self.encode(data) return self.destination
[ "def encode(self, data):\n\t\traise NotImplementedError()", "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError, e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError), err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data", "def write(self, data):\r\n if not self.opened:\r\n self.open()\r\n if ('b' not in self.mode and sys.version_info < (3,0)\r\n or check_encoding(self.destination, self.encoding) is False\r\n ):\r\n if sys.version_info >= (3,0) and os.linesep != '\\n':\r\n data = data.replace('\\n', os.linesep) # fix endings\r\n data = self.encode(data)\r\n\r\n try: # In Python < 2.5, try...except has to be nested in try...finally.\r\n try:\r\n self.destination.write(data)\r\n except TypeError as e:\r\n if sys.version_info >= (3,0) and isinstance(data, bytes):\r\n try:\r\n self.destination.buffer.write(data)\r\n except AttributeError:\r\n if check_encoding(self.destination, \r\n self.encoding) is False:\r\n raise ValueError('Encoding of %s (%s) differs \\n'\r\n ' from specified encoding (%s)' %\r\n (self.destination_path or 'destination',\r\n self.destination.encoding, self.encoding))\r\n else:\r\n raise e\r\n except (UnicodeError, LookupError) as err:\r\n raise UnicodeError(\r\n 'Unable to encode output data. output-encoding is: '\r\n '%s.\\n(%s)' % (self.encoding, ErrorString(err)))\r\n finally:\r\n if self.autoclose:\r\n self.close()\r\n return data", "def urlEncode(self, data):\n # type: (Union[str, bytearray]) -> Union[str,bytearray]", "def encode_data(self, data):\r\n if data:\r\n data = urlencode(data)\r\n\r\n return data", "def _op_push_data(self, data):\n\n # expects data in hexadecimal characters and converts appropriately\n # TODO maybe, for convenience, also accept objects for public keys,\n # addresses, etc. and use isinstance and convert manually\n data_bytes = unhexlify(data)\n\n if len(data_bytes) < 0x4c:\n return chr(len(data_bytes)).encode() + data_bytes\n elif len(data_bytes) < 0xff:\n return b'\\x4c' + chr(len(data_bytes)).encode() + data_bytes\n elif len(data_bytes) < 0xffff:\n return b'\\x4d' + struct.pack('<H', len(data_bytes)) + data_bytes\n elif len(data_bytes) < 0xffffffff:\n return b'\\x4e' + struct.pack('<I', len(data_bytes)) + data_bytes\n else:\n raise ValueError(\"Data too large. Cannot push into script\")", "def encode( data: JSONData ) -> bytes:\n\n try:\n s = json.dumps( data )\n return s.encode( _ENCODING )\n except UnicodeError as e:\n raise ConnectionError( f\"Failed to encode message: '{s}'\" ) from e", "def base64Encode(self, data):\n # type: (Union[str, bytearray]) -> Union[str,bytearray]", "def _writeSomeData(self, data):\n sent = self.transport._originalWriteSomeData(data)\n self.dataSentEvent(sent)\n return sent", "def encode_data(self, data):\r\n return json.dumps(data)", "def _encode_to_stream(self, output_stream, data, options=None, **kwargs):\n output_stream.write(self._encode(data, options=options, **kwargs))", "def serialize(self, data):\n raise NotImplementedError()", "def _dumpPickle(self, data):\r\n \r\n return codecs.encode(cPickle.dumps(data,protocol=cPickle.HIGHEST_PROTOCOL), \"base64\").decode()", "def _encode_data(self, data, **kwargs):\n return json.dumps(data, cls=JSONEncoder, **kwargs)", "def encode(self, data):\n return_list = [list() for _ in range(self.num_segments)]\n encoder = Encoder(self.min_segments, self.num_segments)\n for data_slice in _slice_generator(data, block_size):\n for segment_list, zfec_share in zip(\n return_list, encoder.encode(data_slice)\n ):\n segment_list.append(zfec_share)\n \n return [\"\".join(sublist) for sublist in return_list]", "def serialize(self, data) -> str:\n pass", "def sendToComparer(self, data):\n # type: (bytearray) -> ()", "def encode(self, data_string):\r\n\r\n if type(data_string) is not bytes:\r\n raise ValueError('Must pass bytes to encode')\r\n\r\n binary_string = ''\r\n\r\n # Match ASCII to entries in the lookup table\r\n for byte in data_string:\r\n binary_string += self.huffman_table[byte]\r\n\r\n # Convert binary string into ASCII\r\n encoded_string = b'';\r\n for i in range(0, len(binary_string), 8):\r\n binary = binary_string[i:i+8]\r\n encoded_string += bytes([int(binary[::-1], 2)])\r\n\r\n # If the huffman-coded string is longer than the original\r\n # string, return the original string instead. Putting an\r\n # ASCII value 0xff where the padding bit should be signals to\r\n # the decoder that the message is not encoded.\r\n if len(data_string) <= len(encoded_string):\r\n return b'\\xff' + data_string\r\n\r\n # In the first byte, store the number of padding bits\r\n padding_value = (8 - (len(binary_string) % 8)) % 8\r\n encoded_string = bytes([padding_value]) + encoded_string\r\n\r\n return encoded_string", "def encode(self) -> bytes:\n \n pass", "def __bytes__(self):\n\n return bytes(self._data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterable containing self (if include_self is true) all descendants in tree traversal order (if descend is true) all siblings (if siblings is true) and their descendants (if also descend is true) the siblings of the parent (if ascend is true) and their descendants (if also descend is true), and so on If `condition` is not None, the iterable contains only nodes for which ``condition(node)`` is true. If `condition` is a node class ``cls``, it is equivalent to a function consisting of ``return isinstance(node, cls)``. If ascend is true, assume siblings to be true as well.
def traverse(self, condition=None, include_self=True, descend=True, siblings=False, ascend=False): if ascend: siblings=True # Check for special argument combinations that allow using an # optimized version of traverse() if include_self and descend and not siblings: if condition is None: return self._all_traverse() elif isinstance(condition, type): return self._fast_traverse(condition) # Check if `condition` is a class (check for TypeType for Python # implementations that use only new-style classes, like PyPy). if isinstance(condition, type): node_class = condition def condition(node, node_class=node_class): return isinstance(node, node_class) r = [] if include_self and (condition is None or condition(self)): r.append(self) if descend and len(self.children): for child in self: r.extend(child.traverse(include_self=True, descend=True, siblings=False, ascend=False, condition=condition)) if siblings or ascend: node = self while node.parent: index = node.parent.index(node) for sibling in node.parent[index+1:]: r.extend(sibling.traverse(include_self=True, descend=descend, siblings=False, ascend=False, condition=condition)) if not ascend: break else: node = node.parent return r
[ "def traverse(self, condition=None, include_self=True, descend=True,\r\n siblings=False, ascend=False):\r\n if ascend:\r\n siblings=True\r\n # Check for special argument combinations that allow using an\r\n # optimized version of traverse()\r\n if include_self and descend and not siblings:\r\n if condition is None:\r\n return self._all_traverse()\r\n elif isinstance(condition, (types.ClassType, type)):\r\n return self._fast_traverse(condition)\r\n # Check if `condition` is a class (check for TypeType for Python\r\n # implementations that use only new-style classes, like PyPy).\r\n if isinstance(condition, (types.ClassType, type)):\r\n node_class = condition\r\n def condition(node, node_class=node_class):\r\n return isinstance(node, node_class)\r\n r = []\r\n if include_self and (condition is None or condition(self)):\r\n r.append(self)\r\n if descend and len(self.children):\r\n for child in self:\r\n r.extend(child.traverse(include_self=True, descend=True,\r\n siblings=False, ascend=False,\r\n condition=condition))\r\n if siblings or ascend:\r\n node = self\r\n while node.parent:\r\n index = node.parent.index(node)\r\n for sibling in node.parent[index+1:]:\r\n r.extend(sibling.traverse(include_self=True,\r\n descend=descend,\r\n siblings=False, ascend=False,\r\n condition=condition))\r\n if not ascend:\r\n break\r\n else:\r\n node = node.parent\r\n return r", "def nodes(self, method='dfs', criteria=lambda x: True):\n if method == 'bfs':\n def bfs_iter():\n queue = [self]\n while True:\n try:\n n = queue.pop(0)\n except IndexError:\n raise StopIteration\n queue.extend(n._children)\n if criteria(n):\n yield n\n return bfs_iter() # call the generator\n elif method == 'dfs':\n def dfs_iter():\n stack = (self,)\n while True:\n try:\n n = stack[0]\n except IndexError:\n raise StopIteration\n # TODO check whether using tuple here is actually faster than list\n stack = tuple(n._children) + stack[1:] # prepend\n if criteria(n):\n yield n\n return dfs_iter() # call the generator", "def iter_elements(self, condition):\n for elem in self.iter():\n if condition(elem):\n yield elem", "def traverse(self, order=\"draw\", includeSelf=True, inclStmtComment=False):\n if order == 'pick':\n if inclStmtComment and hasattr(self, 'stmtComment'):\n yield self.stmtComment\n else:\n if includeSelf:\n yield self\n # For \"pick\" order to be the true opposite of \"draw\", this loop should run in\n # reverse, but child icons are not intended to overlap in a detectable way.\n for child in self.children():\n if child is None:\n print('icon has null child', self)\n yield from child.traverse(order)\n if order == \"pick\":\n if includeSelf:\n yield self\n else:\n if inclStmtComment and hasattr(self, 'stmtComment'):\n yield self.stmtComment", "def traverse_depthwise(self, flag=None):\n queue = deque([self]) \n while len(queue) != 0:\n node = queue.popleft()\n if node.has_children():\n for child in node.get_children():\n if child is not None:\n queue.append(child)\n if flag is not None:\n if node.is_marked(flag):\n yield node\n else:\n yield node", "def ancestor_finder(resource, predicate, include_self=False):\n resource = resource if include_self else getattr(resource, \"__parent__\", None)\n while resource is not None:\n if predicate(resource):\n yield resource\n resource = getattr(resource, \"__parent__\", None)", "def nodes_where(self, conditions=None, data=False, **kwargs):\n conditions = conditions or {}\n conditions.update(kwargs)\n\n for key, attr in self.nodes(True):\n is_match = True\n attr = attr or {}\n\n for name, value in conditions.items():\n method = getattr(self, name, None)\n\n if callable(method):\n val = method(key)\n if isinstance(val, list):\n if value not in val:\n is_match = False\n break\n break\n if isinstance(value, (tuple, list)):\n minval, maxval = value\n if val < minval or val > maxval:\n is_match = False\n break\n else:\n if value != val:\n is_match = False\n break\n\n else:\n if name not in attr:\n is_match = False\n break\n if isinstance(attr[name], list):\n if value not in attr[name]:\n is_match = False\n break\n break\n if isinstance(value, (tuple, list)):\n minval, maxval = value\n if attr[name] < minval or attr[name] > maxval:\n is_match = False\n break\n else:\n if value != attr[name]:\n is_match = False\n break\n\n if is_match:\n if data:\n yield key, attr\n else:\n yield key", "def setDescendantCondTypes(self):\n self.setConditionalType()\n for child in self.childList:\n child.setDescendantCondTypes()", "def get_children(self, flag=None, reverse=False):\n \n if self.has_children(flag=flag):\n if not reverse:\n #\n # Go in usual order\n # \n for pos in self._child_positions:\n child = self.children[pos]\n if child is not None:\n if flag is None:\n yield child\n elif child.is_marked(flag):\n yield child\n else: \n #\n # Go in reverse order\n # \n for pos in reversed(self._child_positions):\n child = self.children[pos]\n if child is not None:\n if flag is None:\n yield child\n elif child.is_marked(flag):\n yield child", "def descendants(self) -> Iterable[\"Type\"]:\n return self._hier.closure(self, lambda t: t.child_types)", "def descendantGen(self):\n yield self\n for child in self.childList:\n for item in child.descendantGen():\n yield item", "def iter_children(self):\n for child in self.children:\n if not child.is_null():\n yield child", "def children(self):\r\n c = self.child\r\n while c:\r\n yield c\r\n c = c.nxt", "def children_iter(self):\n for child in self.children:\n if child:\n yield child", "def get_all_ancestors(node):\n return node.iterancestors()", "def iter_siblings(self):\r\n if self._parent:\r\n for sibling in self._parent.iter_children():\r\n if sibling is not self:\r\n yield sibling\r\n else:\r\n raise StopIteration()", "def walk_dependency_graph(self, reverse=False):\n if reverse:\n graph_name = 'reverse_dependencies'\n else:\n graph_name = 'dependencies'\n\n # self first\n yield self\n\n # Use Breadth First Search (BFS) algorithm\n vqueue = [self]\n discovered = set(vqueue)\n while vqueue:\n u = vqueue.pop()\n for v in getattr(u, graph_name):\n if v not in discovered:\n discovered.add(v)\n vqueue.append(v)\n yield v", "def all(self):\n def walk(nodes):\n for node in nodes:\n yield node\n if self.recurse and node.is_container:\n for result in walk(node.children):\n yield result\n return Query(walk(self))", "def children(self):\n node = self.first_child\n while node is not None:\n yield node\n node = node.next" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the first node in the iterable returned by traverse(), or None if the iterable is empty. Parameter list is the same as of traverse. Note that include_self defaults to 0, though.
def next_node(self, condition=None, include_self=False, descend=True, siblings=False, ascend=False): iterable = self.traverse(condition=condition, include_self=include_self, descend=descend, siblings=siblings, ascend=ascend) try: return iterable[0] except IndexError: return None
[ "def start_node(self):\n if len(self._nodes) == 0:\n return None\n return self._nodes[0]", "def get_first_node(self):\n return self._nodes[0]", "def suggested_node(self):\n for _ in range(0, len(self.node.children)):\n if self.current_idx == len(self.node.children):\n self.current_idx = 0\n node = self.node.children[self.current_idx]\n if node:\n return node\n\n return None", "def first(self):\n\t\tif self.is_empty():\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self._make_position(0) #position of first element", "def _get_next_node(self) -> Optional[BaseInvocation]:\n g = self.execution_graph.nx_graph()\n\n # Depth-first search with pre-order traversal is a depth-first topological sort\n sorted_nodes = nx.dfs_preorder_nodes(g)\n\n next_node = next(\n (\n n\n for n in sorted_nodes\n if n not in self.executed # the node must not already be executed...\n and all((e[0] in self.executed for e in g.in_edges(n))) # ...and all its inputs must be executed\n ),\n None,\n )\n\n if next_node is None:\n return None\n\n return self.execution_graph.nodes[next_node]", "def next(self) -> Optional[BaseInvocation]:\n\n # TODO: enable multiple nodes to execute simultaneously by tracking currently executing nodes\n # possibly with a timeout?\n\n # If there are no prepared nodes, prepare some nodes\n next_node = self._get_next_node()\n if next_node is None:\n prepared_id = self._prepare()\n\n # Prepare as many nodes as we can\n while prepared_id is not None:\n prepared_id = self._prepare()\n next_node = self._get_next_node()\n\n # Get values from edges\n if next_node is not None:\n self._prepare_inputs(next_node)\n\n # If next is still none, there's no next node, return None\n return next_node", "def find_node(self, func):\n\n for obj in self.lst_hierobj:\n if func(obj):\n return obj\n return None", "def get_leftmost_child(self) -> Node or None:\n if len(self.children) == 0: return None\n return self.children[0]", "def next(self):\n if self.is_complete():\n return None\n return self.tree.children[self.dot]", "def _first(self, node: etree._Entity, expr: str) -> etree._Entity | None:\n for entity in self.xpath(node, expr):\n return entity\n return None", "def firstChild(self):\n # return self.currentNode.firstChild\n return traverseChildren(self, 'first')", "def _get_node(self, index):\n if not (-self._length <= index <= self._length - 1):\n return None\n\n # Converts negative indexes to positive.\n index = index + self._length if index < 0 else index\n\n cur_node = self._first\n for i in range(index):\n cur_node = cur_node.next\n\n return cur_node", "def getFirstChild(self):\n children = self.getChildNodes()\n if children:\n return children._data[0]\n return None", "def first(self, default=None):\n try:\n return iter(self).next()\n except StopIteration:\n return default", "def getFirstTopLevelNode(self) -> retval:\n ...", "def peek(self):\n # TODO: Return top item, if any\n \n if self.list.is_empty():\n return None \n\n # looks at top of ll which is top of stack and grabs the head.\n return self.list.head.data", "def depth_first_traversal(self, start=None):\n if start is None:\n start = self.root\n traverse = []\n\n return traverse", "def first(self, default=None):\r\n try:\r\n return next(iter(self))\r\n except StopIteration:\r\n return default", "def first(self):\n return next(self)", "def first(iterator):\n return next(iterator)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return node's language tag. Look iteratively in self and parents for a class argument starting with ``language`` and return the remainder of it (which should be a `BCP49` language tag) or the `fallback`.
def get_language_code(self, fallback=''): for cls in self.get('classes', []): if cls.startswith('language-'): return cls[9:] try: return self.parent.get_language(fallback) except AttributeError: return fallback
[ "def language(element: ET.Element) -> Optional[str]:\n classes = element.get('class', '').split()\n # Return the first one that matches.\n for css_class in classes:\n match = re.match(r'(lang|language)-(.*)$', css_class)\n if match is not None:\n prefix, language = match.groups()\n return language", "def scanLanguageDirectives(self, p: Position) -> str:\n c = self.c\n language = g.getLanguageFromAncestorAtFileNode(p)\n return language or c.target_language", "def _getTree(self, language):\n\n try:\n return self._words[language]\n except KeyError:\n raise LexiconError('Unsupported language: %s' % language)", "def getLanguage(cls, code):\n cls.initialize()\n return None if code is None else cls.languageIndex.get(code, None)", "def _language(self, item):\n\n response = item['spider_response'].body\n try:\n root = html.fromstring(response)\n except ValueError:\n root = html.fromstring(response.encode(\"utf-8\"))\n\n # Check for lang-attributes\n lang = root.get('lang')\n\n if lang is None:\n lang = root.get('xml:lang')\n\n # Check for general meta tags\n if lang is None:\n meta = root.cssselect('meta[name=\"language\"]')\n if len(meta) > 0:\n lang = meta[0].get('content')\n\n # Check for open graph tags\n if lang is None:\n meta = root.cssselect('meta[property=\"og:locale\"]')\n if len(meta) > 0:\n lang = meta[0].get('content')\n\n # Look for <article> elements and inspect the one with the largest payload with langdetect\n if lang is None:\n article_list = []\n for article in root.xpath('//article'):\n article_list.append(re.sub(r'\\s+', ' ', article.text_content().strip()))\n longest_articles = sorted(article_list, key=lambda article: len(article), reverse=True)\n for article in longest_articles:\n try:\n lang = detect(article)\n except LangDetectException:\n continue\n else:\n break\n\n # Analyze the whole body with langdetect\n if lang is None:\n try:\n lang = detect(root.text_content().strip())\n except LangDetectException:\n pass\n\n # Try to normalize output\n if lang is not None:\n # First search for suitable locale in the original output\n matches = self.langcode_pattern.search(lang)\n if matches is not None:\n lang = matches.group(0)\n else:\n # If no match was found, normalize the original output and search again\n normalized = locale.normalize(re.split(r'\\s|;|,', lang.strip())[0])\n matches = self.langcode_pattern.search(normalized)\n if matches is not None:\n lang = matches.group(0)\n\n return lang", "def get_language_code(tag_008=None):\n try:\n return tag_008[35:38]\n except TypeError:\n return None\n except IndexError:\n return None", "def detect_language(self, language=None):\n log.info('Detecting language for %s', self.fname)\n\n if language:\n self.lang = language\n\n else:\n ext = os.path.splitext(self.fname)[1]\n self.lang = languages.get_by_ext(ext)\n\n self.ms = self.lang['multistart']\n self.me = self.lang['multiend']\n self.multi_re = re.compile('%s.*?%s' % (self.me, self.ms))\n log.debug('Detected %s for %s', self.lang['name'], self.fname)", "def language(self):\n pass", "def scanColorDirectives(self, p: Position) -> str:\n c = self.c\n root = p.copy()\n for p in root.self_and_parents(copy=False):\n language = g.findFirstValidAtLanguageDirective(p.b)\n if language:\n return language\n # Get the language from the nearest ancestor @<file> node.\n language = g.getLanguageFromAncestorAtFileNode(root) or c.target_language\n return language", "def get_language(self):\n return self._get_option('language')", "def language_code(self):\n return self._book_dict[\"language_code\"]", "def volume_get_language(self, volume):\n return self.request( \"volume-get-language\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'language-code': [ basestring, False ],\n 'nfs-character-set': [ basestring, False ],\n 'oem-character-set': [ basestring, False ],\n 'language': [ basestring, False ],\n } )", "def get_language(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'language', identifier, category_details)", "def get_language(entry):\n index_url = entry.url.replace(\"robots.txt\", \"\")\n\n # hack around some issues here,\n if entry.domain in KNOWN_LANGUAGES:\n language = KNOWN_LANGUAGES.get(entry.domain)\n\n else:\n try:\n page = requests.get(index_url)\n try:\n languages = cld2.detect(page.content, isPlainText=False,\n hintTopLevelDomain=entry.domain.split('.')[-1])\n except:\n languages = cld2.detect(page.text.encode(\"utf8\"), isPlainText=False,\n hintTopLevelDomain=entry.domain.split('.')[-1])\n\n # ignoring 'is_reliable' flag here, set on baidu.com etc (even though detects\n # language appropiately\n language = languages.details[0].language_name if languages.details else 'Unknown'\n index_url = page.url\n\n except Exception as e:\n log.exception(\"Failed to analyze language for '%s'\", entry.domain)\n language = 'Failed'\n\n language = language.title()\n # traditional chinese -> chinese\n if language == 'Chineset':\n language = 'Chinese'\n return language, not urlparse(index_url).netloc.endswith(entry.domain)", "def get_source_language(resources):\r\n return resources[0].source_language", "def language_name(language_code):\n return get_language_name(language_code)", "def get(self, language: str) -> str:\n value = None\n\n try:\n # Get specified language\n value = self[language]\n\n # Default to english\n if value is None:\n value = self['en']\n except KeyError:\n # Default to the first property\n for language in self.keys():\n if language in self:\n value = self[language]\n break\n\n return value", "def language(self):\n if self.service:\n if self.service.supports_single_language:\n self._language = self.service.supported_languages.all()[0]\n elif self.user and self.user.language in self.service.supported_languages.all():\n self._language = self.user.language\n elif self._language and not self._language in self.service.supported_languages.all():\n self._language = None\n else:\n self._language = None\n\n self.save()\n return self._language", "def getLanguageName(self, language):\n\n return (language['name']\n if ((len(language) > 0) and ('name' in language.keys()) and (language['name'] is not None))\n else \"\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') from node or dictionary `dict_`.
def update_basic_atts(self, dict_): if isinstance(dict_, Node): dict_ = dict_.attributes for att in self.basic_attributes: self.append_attr_list(att, dict_.get(att, []))
[ "def update_all_atts(self, dict_, update_fun = copy_attr_consistent,\r\n replace = True, and_source = False):\r\n if isinstance(dict_, Node):\r\n dict_ = dict_.attributes\r\n\r\n # Include the source attribute when copying?\r\n if and_source:\r\n filter_fun = self.is_not_list_attribute\r\n else:\r\n filter_fun = self.is_not_known_attribute\r\n\r\n # Copy the basic attributes\r\n self.update_basic_atts(dict_)\r\n\r\n # Grab other attributes in dict_ not in self except the\r\n # (All basic attributes should be copied already)\r\n for att in filter(filter_fun, dict_):\r\n update_fun(self, att, dict_[att], replace)", "def update(self, dict):\n self.attr.update(dict)\n return self", "def copy_attributes(self, parent_dict, child_dict, attrs):\n for attr in attrs:\n has_attr = parent_dict.get(attr)\n if has_attr is not None:\n child_dict[attr] = has_attr", "def update(self, given_dict):\n self.__dict__.update(given_dict)", "def update(self, dictionary):\n for key, value in dictionary.items():\n if is_stringlike(key):\n setattr(self, key, value)\n else:\n self[Tag(key)] = value", "def update(self, data):\n for field in self.ATTR_FIELDS:\n if field in data:\n setattr(self, field, data[field])", "def update_attributes(self, override: Dict):\n self.additional_attributes.update(override)", "def set_node_attributes(self, node_attributes: dict) -> None:\n\n node_attribute_map = self._create_node_attributes(node_attributes)\n\n for col in node_attribute_map.keys():\n\n if col in self.node_columns:\n\n for node in node_attribute_map[col]:\n\n if node in self.node_map.map.keys():\n\n self.node_map.map[node]['attributes'].update(node_attribute_map[col][node])", "def update_svg_from_dict():\n doc = minidom.parse(TOPOLOGY_FILENAME)\n with open(RELATIONS_FILENAME) as fp:\n content = json.load(fp)\n relations, data = content[\"relations\"], content[\"data\"]\n\n for node in doc.getElementsByTagName(\"tspan\"):\n if not node.hasAttribute(RELATIONS_KEY):\n continue\n\n b_id = str(get_block_id(node))\n node.setAttribute(\"type\", data[b_id][\"type\"])\n node.setAttribute(RELATIONS_KEY, str(list(map(int, relations[b_id]))))\n node.setAttribute(ADDITIONAL_DATA_KEY, str({\n k: v for k, v in data[b_id].items() if k != \"type\"\n }))\n\n with open(TOPOLOGY_FILENAME, \"w\") as fp:\n doc.writexml(fp)", "def update_with_attributes(obj, attributes):\n for key, val in attributes.items():\n setattr(obj, key, val)", "def update(self):\n for dynamic_attr in self.dynamic_attrs.itervalues():\n dynamic_attr.clear_overloads()\n \n self.update_children()\n \n for modifier in self.modifiers:\n self.apply_modifier(modifier)", "def update_attributes_instability(attrs_inst: Dict[Attribute, float]):\n for attribute, attribute_instability in attrs_inst.items():\n attributes_instability[attribute] = attribute_instability", "def _copy_attr(o, attr, adict, key=None):\n if hasattr(o, attr):\n adict[key or attr] = getattr(o, attr)", "def update_from_dict(instance, attrs, commit):\n\n field_names = list(map(lambda f: f.name, instance._meta.get_fields()))\n for attr, val in attrs.items():\n if attr in field_names:\n setattr(instance, attr, val)\n\n if commit:\n instance.save()", "def _dict_to_hdf5_attrs(hdf5_dataset_object, dictionary, base_path = ''):\n\n\tfor key, value in dictionary.items():\n\n\t\thdf5_dataset_object.attrs[os.path.join(base_path,key)] = value\n\n\treturn", "def set_attributes_randomly(self) -> None:\n for f in self.attributes:\n self.data[f.name] = f.random_value()", "def change(self, new_dict):\n self.dict = new_dict", "def _set_netcdf_attributes(root, attrs):\n for key, val in attrs.items():\n setattr(root, key, val)", "def add_additional_attributes(self, attribs: dict):\n for k, v in attribs.items():\n if k not in self.__dict__:\n setattr(self, k, v)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each element in values, if it does not exist in self[attr], append it.
def append_attr_list(self, attr, values): # List Concatenation for value in values: if not value in self[attr]: self[attr].append(value)
[ "def copy_attr_concatenate(self, attr, value, replace):\r\n if self.get(attr) is not value:\r\n if isinstance(self.get(attr), list) and \\\r\n isinstance(value, list):\r\n self.append_attr_list(attr, value)\r\n else:\r\n self.replace_attr(attr, value, replace)", "def coerce_append_attr_list(self, attr, value):\r\n # List Concatenation\r\n if not isinstance(self.get(attr), list):\r\n self[attr] = [self[attr]]\r\n if not isinstance(value, list):\r\n value = [value]\r\n self.append_attr_list(attr, value)", "def __or__(self, attrs):\r\n remove = set([an for an, av in attrs if av is None])\r\n replace = dict([(an, av) for an, av in attrs\r\n if an in self and av is not None])\r\n return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self\r\n if sn not in remove] +\r\n [(an, av) for an, av in attrs\r\n if an not in self and an not in remove])", "def merge(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value, set value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n if value not in current_value:\n _set(environment, attr, current_value + \" \" + value)\n # do nothing if value in current value\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(\n environment,\n attr,\n CompleteExpression(merge_sublist(list(current_value), list(value))),\n )\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, merge_sublist(current_value, value))\n else:\n raise TypeError(\n \"current value is of unsupported type\"\n f\"'{type(current_value)}' for the 'append' action\"\n )", "def addVals( self, vals ):\n for x in vals: self.add( x )", "def addVals( self, vals ):\n for x in vals: self.add( x )", "def append(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n _set(environment, attr, current_value + \" \" + value)\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(environment, attr, (current_value + value).complete())\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, current_value + value)\n else:\n raise TypeError(\n \"current value and new value are of unsupported types\"\n f\"'{type(current_value)}' and '{type(value)}' for the 'append' action\"\n )", "def add_to_set(value, values):\n if value:\n values.add(value)\n return values", "def applyAttrVals(self, obj, vals):\n [setattr(obj, key, val) for key, val in vals.iteritems()]", "def add_attr(self, attr_name, samplet_id, attr_value):\n\n if attr_name is not None:\n\n if attr_name not in self._attr:\n self._attr[attr_name] = dict()\n self._attr_dtype[attr_name] = None\n\n if is_iterable_but_not_str(samplet_id):\n if not isinstance(attr_value, (Sequence, np.ndarray, np.generic)):\n raise TypeError('When samplet_id is a list, attr_value must '\n 'also be a list')\n if len(samplet_id) != len(attr_value):\n raise ValueError('Number of attribute values provided do not '\n 'match the number of samplet IDs')\n\n for sid, val in zip(samplet_id, attr_value):\n self.__add_single_attr(attr_name, sid, val)\n\n else:\n if is_iterable_but_not_str(attr_value):\n raise TypeError('When samplet_id is not a list, attr_value also '\n 'must not be a list')\n\n self.__add_single_attr(attr_name, samplet_id, attr_value)\n\n else:\n raise ValueError('Attribute name can not be None!')", "def by_attribute(self, schema_field, att_value, is_lookup=False):\n\n clone = self.prepare_attribute_qs()\n real_name = str(schema_field.real_name)\n if not isinstance(att_value, (list, tuple)):\n att_value = [att_value]\n if is_lookup:\n att_value = Lookup.objects.filter(schema_field__id=schema_field.id, code__in=att_value)\n if not att_value:\n # If the lookup values don't exist, then there aren't any\n # NewsItems with this attribute value. Note that we aren't\n # using QuerySet.none() here, because we want the result to\n # be a NewsItemQuerySet, and none() returns a normal QuerySet.\n clone = clone.extra(where=('1=0',))\n return clone\n att_value = [val.id for val in att_value]\n if schema_field.is_many_to_many_lookup():\n # We have to use a regular expression search to look for all rows\n # with the given att_value *somewhere* in the column. The [[:<:]]\n # thing is a word boundary.\n for value in att_value:\n if not str(value).isdigit():\n raise ValueError('Only integer strings allowed for att_value in many-to-many SchemaFields')\n clone = clone.extra(where=(\"db_attribute.%s ~ '[[:<:]]%s[[:>:]]'\" % (real_name, '|'.join([str(val) for val in att_value])),))\n elif None in att_value:\n if att_value != [None]:\n raise ValueError('by_attribute() att_value list cannot have more than one element if it includes None')\n clone = clone.extra(where=(\"db_attribute.%s IS NULL\" % real_name,))\n else:\n clone = clone.extra(where=(\"db_attribute.%s IN (%s)\" % (real_name, ','.join(['%s' for val in att_value])),),\n params=tuple(att_value))\n return clone", "def append_all(x, val):\n for el in x:\n el.append(val)", "def attr_jar_append(cls, value):\n cls.attr_jar.append(value.encode('ascii', 'replace'))", "def append_val(self, key, val, extra_data):\n raise NotImplementedError", "def get_values(data, attribute):\n return data[attribute].unique()", "def get_attribute_values(self, object_dn, key, vals):\n\n r = re.compile(\"^([^;]+);range=(\\d+)-(\\d+|\\*)$\")\n\n m = r.match(key)\n if m is None:\n # no range, just return the values\n return vals\n\n attr = m.group(1)\n hi = int(m.group(3))\n\n # get additional values in a loop\n # until we get a response with '*' at the end\n while True:\n\n n = \"%s;range=%d-*\" % (attr, hi + 1)\n res = self.ldb.search(base=object_dn, scope=SCOPE_BASE, attrs=[n])\n assert len(res) == 1\n res = dict(res[0])\n del res[\"dn\"]\n\n fm = None\n fvals = None\n\n for key in res.keys():\n m = r.match(key)\n\n if m is None:\n continue\n\n if m.group(1) != attr:\n continue\n\n fm = m\n fvals = list(res[key])\n break\n\n if fm is None:\n break\n\n vals.extend(fvals)\n if fm.group(3) == \"*\":\n # if we got \"*\" we're done\n break\n\n assert int(fm.group(2)) == hi + 1\n hi = int(fm.group(3))\n\n return vals", "def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml", "def change_attr(el, attr, values):\n v = el.attrib.get(attr, '')\n changed = False\n for value in values.split(';'):\n k, newv = split2(value, \"Each value must be in the form x:y\", \":\")\n v = replace_key(v, k, newv)\n if v == '': # there were no such yet\n v = \"%s:%s\" % (k, newv)\n #print \"Changing %s : %s, got %s\" % (attr, values, str(v))\n el.attrib[attr] = v", "def set(self, attr, val, strict=True):\n if val is None:\n return\n if isinstance(val, basestring):\n if not len(val): return False\n s = re.sub(\"[\\n\\t\\r ]+\", \"\", val)\n if not len(s): return False\n if val is self:\n raise RRSDatabaseValueError(\"Avoid infinite recursion: Cannot insert itself as a value.\")\n\n # Check type. If it is some new attribute, the type cannot be determined\n if attr in self.__types__:\n if self.__types__[attr] is not _UnknownType:\n self._check_type(attr, val, self.__types__[attr])\n else:\n try:\n if type(self.__dict__[attr]) is not list:\n self.__types__[attr] = _UnknownType\n except KeyError, e:\n if strict:\n cls = str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n raise KeyError(\"Object %s has no attribute %s.\" % (cls, e))\n else:\n self.__types__[attr] = _UnknownType\n # no semanics checking needed, so insert value into object\n setattr(self, attr, val)\n return\n\n # Check semantics\n self._check_semantics(attr, val)\n\n # Insert\n if type(self.__dict__[attr]) == list:\n if not isinstance(val, _RRSDbEntityRelationship):\n raise RRSDatabaseValueError(\"Relationship between entities has to\" \\\n \"be represented by any instance of subclass of _RRSDbEntityRelationship\")\n val._parent = self # set pointer to parent element\n self.__dict__[attr].append(val)\n else:\n setattr(self, attr, val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
First, convert both self[attr] and value to a nonstring sequence type; if either is not already a sequence, convert it to a list of one element. Then call append_attr_list.
def coerce_append_attr_list(self, attr, value): # List Concatenation if not isinstance(self.get(attr), list): self[attr] = [self[attr]] if not isinstance(value, list): value = [value] self.append_attr_list(attr, value)
[ "def copy_attr_concatenate(self, attr, value, replace):\r\n if self.get(attr) is not value:\r\n if isinstance(self.get(attr), list) and \\\r\n isinstance(value, list):\r\n self.append_attr_list(attr, value)\r\n else:\r\n self.replace_attr(attr, value, replace)", "def append_attr_list(self, attr, values):\r\n # List Concatenation\r\n for value in values:\r\n if not value in self[attr]:\r\n self[attr].append(value)", "def _set_log_attrs(\n self,\n attr_map: dict,\n attr_type: str,\n value: t.Union[str, t.List[str]],\n ):\n if not hasattr(self, \"_LOG_ATTRS\"):\n self._LOG_ATTRS = {\"response\": [], \"request\": []}\n\n value = [x.lower().strip() for x in listify(value) if isinstance(x, str)]\n\n if not value:\n self._LOG_ATTRS[attr_type] = []\n return\n\n log_attrs = self._LOG_ATTRS[attr_type]\n\n if \"all\" in value:\n for k, v in attr_map.items():\n entry = f\"{k}={v}\"\n if entry not in log_attrs:\n log_attrs.append(entry)\n return\n\n for item in value:\n if item in attr_map:\n value = attr_map[item]\n entry = f\"{item}={value}\"\n if entry not in log_attrs:\n log_attrs.append(entry)", "def attrsToList(self, attrs):\n return [g.Bunch(name=name, val=attrs.getValue(name))\n for name in attrs.getNames()]", "def __set_list_value(self, prop, val):\n\t\tif isinstance(val, str):\n\t\t\tif val != \"\":\n\t\t\t\tprop.append(val)\n\t\telif isinstance(val, list):\n\t\t\tif val:\n\t\t\t\tprop.extend([x.strip() for x in val])\n\t\telse:\n\t\t\traise TypeError(\"Expected string, got %r instead\" % type(val))", "def _convert_param_attr_to_list(param_attr, n):\n if isinstance(param_attr, (list, tuple)):\n assert len(param_attr) == n, (\n \"length of param_attr should be %d when it is a list/tuple\" % n\n )\n param_attrs = []\n for attr in param_attr:\n if isinstance(attr, bool):\n if attr:\n param_attrs.append(ParamAttr._to_attr(None))\n else:\n param_attrs.append(False)\n else:\n param_attrs.append(ParamAttr._to_attr(attr))\n # param_attrs = [ParamAttr._to_attr(attr) for attr in param_attr]\n elif isinstance(param_attr, bool):\n param_attrs = []\n if param_attr:\n param_attrs = [ParamAttr._to_attr(None) for i in range(n)]\n else:\n param_attrs = [False] * n\n else:\n param_attrs = []\n attr = ParamAttr._to_attr(param_attr)\n for i in range(n):\n attr_i = copy.deepcopy(attr)\n if attr.name:\n attr_i.name = attr_i.name + \"_\" + str(i)\n param_attrs.append(attr_i)\n return param_attrs", "def _parse_attr_proto(self, attributes, node):\n for attr in attributes:\n if sys.getsizeof(attr['value']) > self.MAX_NODE_ATTRIBUTE_VALUE_BYTES:\n message = f\"The attribute value of node({node.name}) \" \\\n f\"is over {self.MAX_NODE_ATTRIBUTE_VALUE_BYTES} Bytes, will ignore.\"\n logger.warning(message)\n continue\n\n if attr['name'] == 'gen_strategy':\n # The gen_strategy value is equal in_strategy value, so we only need to show one strategy value in attr\n continue\n\n value = self._parse_value_proto(attr['value'])\n node.add_attr({attr['name']: str(value)})", "def get_sequentialAttrDict(self,attr = None):\n\t#log.debug(\">>> %s.get_sequentialAttrDict(attr = '%s') >> \"%(self.p_nameShort,attr) + \"=\"*75) \t\t\n\tuserAttrs = self.getUserAttrsAsDict()\n\td_attrList = {}\n\tfor key in userAttrs.keys():\n\t if '_' in key:\n\t\t_split = key.split('_')\n\t\t_int_ = _split[-1]\n\t\t_str_ = ('_').join(_split[:-1])\n\t\tif \"%s\"%attr == _str_:\n\t\t try:\n\t\t\td_attrList[int(_int_)] = key\n\t\t\t#log.debug(\"match: '%s'\"%key)\n\t\t except:log.warning(\"%s failed to int | int: %s\"%(key,_int_))\n\t\t \n\t#log.debug(\"-\"*100) \t \t\n\treturn d_attrList", "def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml", "def uuids_as_list(attrname):\n return (lambda self, value: [operator.attrgetter('id')(obj)\n for obj in operator.attrgetter(\n attrname)(self)])", "def attr_value_proto(value):\n value_type = str(type(value))\n op_attr_str = str(value)\n value_attr = AttrValue(s=op_attr_str.encode(encoding='utf_8'))\n\n if value_type == \"<class 'int'>\" or value_type == \"<type 'int'>\":\n value_attr = AttrValue(i=value)\n elif value_type == \"<class 'float'>\" or value_type == \"<type 'float'>\":\n value_attr = AttrValue(f=value)\n elif value_type == \"<class 'bool'>\" or value_type == \"<type 'bool'>\":\n value_attr = AttrValue(b=value)\n elif value_type == \"<class 'list'>\" or value_type == \"<type 'list'>\":\n if len(value) > 0:\n value_list_dtype = str(type(value[0]))\n if value_list_dtype == \"<class 'int'>\" or value_list_dtype == \"<type 'int'>\":\n value_attr = AttrValue(list=AttrValue.ListValue(i=value))\n elif value_list_dtype == \"<class 'float'>\" or value_list_dtype == \"<type 'float'>\":\n value_attr = AttrValue(list=AttrValue.ListValue(f=value))\n elif value_list_dtype == \"<class 'bool'>\" or value_list_dtype == \"<type 'bool'>\":\n value_attr = AttrValue(list=AttrValue.ListValue(b=value))\n\n return value_attr", "def add_attr(self, attr_name, samplet_id, attr_value):\n\n if attr_name is not None:\n\n if attr_name not in self._attr:\n self._attr[attr_name] = dict()\n self._attr_dtype[attr_name] = None\n\n if is_iterable_but_not_str(samplet_id):\n if not isinstance(attr_value, (Sequence, np.ndarray, np.generic)):\n raise TypeError('When samplet_id is a list, attr_value must '\n 'also be a list')\n if len(samplet_id) != len(attr_value):\n raise ValueError('Number of attribute values provided do not '\n 'match the number of samplet IDs')\n\n for sid, val in zip(samplet_id, attr_value):\n self.__add_single_attr(attr_name, sid, val)\n\n else:\n if is_iterable_but_not_str(attr_value):\n raise TypeError('When samplet_id is not a list, attr_value also '\n 'must not be a list')\n\n self.__add_single_attr(attr_name, samplet_id, attr_value)\n\n else:\n raise ValueError('Attribute name can not be None!')", "def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)", "def attr_jar_append(cls, value):\n cls.attr_jar.append(value.encode('ascii', 'replace'))", "def get_all_attr(self, attribute: str): # -> list[tuple[SimpleConstraint, typing.Any]]:\n value = getattr(self, attribute)\n if value is not None:\n return [(self, value)]\n return []", "def _get_log_attrs(self, attr_type: str) -> t.List[str]:\n return getattr(self, \"_LOG_ATTRS\", {}).get(attr_type, [])", "def by_attribute(self, schema_field, att_value, is_lookup=False):\n\n clone = self.prepare_attribute_qs()\n real_name = str(schema_field.real_name)\n if not isinstance(att_value, (list, tuple)):\n att_value = [att_value]\n if is_lookup:\n att_value = Lookup.objects.filter(schema_field__id=schema_field.id, code__in=att_value)\n if not att_value:\n # If the lookup values don't exist, then there aren't any\n # NewsItems with this attribute value. Note that we aren't\n # using QuerySet.none() here, because we want the result to\n # be a NewsItemQuerySet, and none() returns a normal QuerySet.\n clone = clone.extra(where=('1=0',))\n return clone\n att_value = [val.id for val in att_value]\n if schema_field.is_many_to_many_lookup():\n # We have to use a regular expression search to look for all rows\n # with the given att_value *somewhere* in the column. The [[:<:]]\n # thing is a word boundary.\n for value in att_value:\n if not str(value).isdigit():\n raise ValueError('Only integer strings allowed for att_value in many-to-many SchemaFields')\n clone = clone.extra(where=(\"db_attribute.%s ~ '[[:<:]]%s[[:>:]]'\" % (real_name, '|'.join([str(val) for val in att_value])),))\n elif None in att_value:\n if att_value != [None]:\n raise ValueError('by_attribute() att_value list cannot have more than one element if it includes None')\n clone = clone.extra(where=(\"db_attribute.%s IS NULL\" % real_name,))\n else:\n clone = clone.extra(where=(\"db_attribute.%s IN (%s)\" % (real_name, ','.join(['%s' for val in att_value])),),\n params=tuple(att_value))\n return clone", "def _attr_from_documents(self, attr):\n elements = [getattr(doc, attr) for doc in self.documents]\n return elements", "def get_valid_attribute_values(self, attr, buf, pos):\n node = buf.xml_node_at_pos(pos)\n if node is None: return None\n handlerclass = buf.xml_tree_handler(node)\n values = handlerclass.values(attr, buf.xml_tree, node)\n if not values:\n return None\n values.sort()\n return values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If self[attr] does not exist or force is True or omitted, set self[attr] to value, otherwise do nothing.
def replace_attr(self, attr, value, force = True): # One or the other if force or self.get(attr) is None: self[attr] = value
[ "def set(self, attr, val):\n if not hasattr(self, attr):\n logger.error('model: set: The attribute \"{0}\" is undefined'.format(attr))\n sys.exit(1)\n setattr(self, attr, val)", "def set_attribute(self,att,val):\r\n self.attributes[att] = val", "def set(self, attr, val, strict=True):\n if val is None:\n return\n if isinstance(val, basestring):\n if not len(val): return False\n s = re.sub(\"[\\n\\t\\r ]+\", \"\", val)\n if not len(s): return False\n if val is self:\n raise RRSDatabaseValueError(\"Avoid infinite recursion: Cannot insert itself as a value.\")\n\n # Check type. If it is some new attribute, the type cannot be determined\n if attr in self.__types__:\n if self.__types__[attr] is not _UnknownType:\n self._check_type(attr, val, self.__types__[attr])\n else:\n try:\n if type(self.__dict__[attr]) is not list:\n self.__types__[attr] = _UnknownType\n except KeyError, e:\n if strict:\n cls = str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n raise KeyError(\"Object %s has no attribute %s.\" % (cls, e))\n else:\n self.__types__[attr] = _UnknownType\n # no semanics checking needed, so insert value into object\n setattr(self, attr, val)\n return\n\n # Check semantics\n self._check_semantics(attr, val)\n\n # Insert\n if type(self.__dict__[attr]) == list:\n if not isinstance(val, _RRSDbEntityRelationship):\n raise RRSDatabaseValueError(\"Relationship between entities has to\" \\\n \"be represented by any instance of subclass of _RRSDbEntityRelationship\")\n val._parent = self # set pointer to parent element\n self.__dict__[attr].append(val)\n else:\n setattr(self, attr, val)", "def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)", "def set_attr(self,n,attr,val=None):\n\t\tself.realopen()\n\t\ta=loads(self.bdb.get(dumps(n,-1),txn=self.txn))\n\t\tif isinstance(attr,dict) :\n\t\t\ta.update(attr)\n\t\telse: a[attr]=val\n\t\tself[n]=a", "def setOptionalAttribute(self, name, value):\n if value is not None:\n self.setAttribute(name, value)", "def _apply_value(self, value):\n\n setattr(self._obj, self._attr, value)", "def onSetAttr(self, attr, vals, opts):\n pass", "def set_attribute(self,attr,value,add = None):\n\t\tif (add is None):\n\t\t\tadd = False \n\t\tif (attr is None):\n\t\t\traise ValueError(\"You must specify an attribute\")\n\t\tif (value is None):\n\t\t\traise ValueError(\"You must specify a value\")\n\t\tif ((not add) and (attr not in self._Attributes)):\n\t\t\traise ValueError(\"Attribute \" + attr + \" unrecognized\")\n\t\tself._Attributes[attr] = value", "def set(value,force=False):", "def SetAttr(self, attr, value):\n self.__article[attr] = value", "def set(self, prop, val):\n if prop == 'num_released':\n raise AttributeError(\"cannot set attribute\")\n\n # we don't want to add an attribute that doesn't already exist\n # first check to see that the attribute exists, then change it else\n # raise error\n if hasattr(self.release, prop):\n setattr(self.release, prop, val)\n elif hasattr(self.element_type, prop):\n setattr(self.element_type, prop, val)\n else:\n for init in self.element_type.initializers.values():\n if hasattr(init, prop):\n setattr(init, prop, val)\n break\n else:\n raise AttributeError('{0} attribute does not exist '\n 'in element_type '\n 'or release object'.format(prop))", "def set_attr(self, node: str, value: dict):\n\n if node in list(self.graph.keys()):\n self.graph[node][self._ATTR] = value\n else:\n raise NodeDoesNotExist(node)", "def update_attribute(self, instance, name, field, value):\n field_setter = getattr(self, f\"set_{name}\", None)\n if field_setter:\n field_setter(instance, name, field, value)\n else:\n setattr(instance, name, value)", "def set_attr(self, user, key, value):\n query1 = \"\"\"SELECT attr_value FROM attributes WHERE attr_uid = ?\n AND attr_key = ?\"\"\"\n query2 = \"INSERT INTO attributes VALUES (?, ?, ?)\"\n query3 = \"\"\"UPDATE attributes SET attr_value = ? WHERE attr_uid = ?\n AND attr_key = ?\"\"\"\n with self._db_access_lock, sqlite.connect(self._dbfile) as conn:\n if conn.execute(query1, (user, key)).fetchone():\n conn.execute(query3, (value, user, key))\n else:\n conn.execute(query2, (user, key, value))\n try:\n self._attributes[user][key] = value\n except KeyError:\n self.attributes[user] = {key: value}", "def set(self, attr_name, attr_value, overwrite=False):\n aname = 'lgt.' + attr_name\n if attr_name in self.lgtattrs:\n if not overwrite:\n log.warn('LGT attribute \"%s\" exists but overwrite is False.' % aname)\n return \n self.lgtattrs.union(set(attr_name))\n self._obj.attrs[aname] = attr_value", "def _attribute_inverter(obj, name, value):\n setattr(obj, name, value)\n return True", "def SetAttribute(self, name, value):\n aMap = self._AMap()\n if name in aMap:\n attrName, decode, vType = aMap[name]\n if vType is ListType:\n if value is None:\n value = []\n else:\n value = value.split()\n setattr(self, attrName, map(decode, value))\n elif vType is DictType:\n if value is None:\n value = []\n else:\n value = value.split()\n dValue = {}\n for iv in map(decode, value):\n dValue[iv] = dValue.get(iv, 0) + 1\n setattr(self, attrName, dValue)\n else:\n x = getattr(self, attrName, None)\n if type(x) in (ListType, DictType):\n print \"Problem setting %s in %s: single value will overwrite List or Dict\" % (repr(name), repr(self.__class__.__name__))\n # print self.GetDocument()\n if value is None:\n setattr(self, attrName, None)\n else:\n setattr(self, attrName, decode(value))\n elif hasattr(self.__class__, 'ID') and name == self.__class__.ID:\n self.SetID(value)\n else:\n if value is None:\n if name in self._attrs:\n del self._attrs[name]\n else:\n self._attrs[name] = value", "def update_attr(field, attr, value):\n\n field.widget.attrs.update({\n attr: value\n })" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If attr is an attribute of self and both self[attr] and value are lists, concatenate the two sequences, setting the result to self[attr]. If either self[attr] or value are nonsequences and replace is True or self[attr] is None, replace self[attr] with value. Otherwise, do nothing.
def copy_attr_concatenate(self, attr, value, replace): if self.get(attr) is not value: if isinstance(self.get(attr), list) and \ isinstance(value, list): self.append_attr_list(attr, value) else: self.replace_attr(attr, value, replace)
[ "def replace_attr(self, attr, value, force = True):\r\n # One or the other\r\n if force or self.get(attr) is None:\r\n self[attr] = value", "def append_attr_list(self, attr, values):\r\n # List Concatenation\r\n for value in values:\r\n if not value in self[attr]:\r\n self[attr].append(value)", "def __or__(self, attrs):\r\n remove = set([an for an, av in attrs if av is None])\r\n replace = dict([(an, av) for an, av in attrs\r\n if an in self and av is not None])\r\n return Attrs([(sn, replace.get(sn, sv)) for sn, sv in self\r\n if sn not in remove] +\r\n [(an, av) for an, av in attrs\r\n if an not in self and an not in remove])", "def merge(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value, set value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n if value not in current_value:\n _set(environment, attr, current_value + \" \" + value)\n # do nothing if value in current value\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(\n environment,\n attr,\n CompleteExpression(merge_sublist(list(current_value), list(value))),\n )\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, merge_sublist(current_value, value))\n else:\n raise TypeError(\n \"current value is of unsupported type\"\n f\"'{type(current_value)}' for the 'append' action\"\n )", "def append(\n environment: MutableMapping[str, Any],\n attr: str,\n value: Union[str, List[Any], Expression],\n) -> None:\n # no current value\n if not _has(environment, attr) or _get(environment, attr) == MISSING:\n replace(environment, attr, value)\n return\n # has current value\n current_value = _get(environment, attr)\n if isinstance(current_value, str) and isinstance(value, str):\n _set(environment, attr, current_value + \" \" + value)\n elif isinstance(current_value, Expression) and isinstance(value, Expression):\n # force CompleteExpression's\n _set(environment, attr, (current_value + value).complete())\n elif isinstance(current_value, List) and isinstance(value, List):\n _set(environment, attr, current_value + value)\n else:\n raise TypeError(\n \"current value and new value are of unsupported types\"\n f\"'{type(current_value)}' and '{type(value)}' for the 'append' action\"\n )", "def coerce_append_attr_list(self, attr, value):\r\n # List Concatenation\r\n if not isinstance(self.get(attr), list):\r\n self[attr] = [self[attr]]\r\n if not isinstance(value, list):\r\n value = [value]\r\n self.append_attr_list(attr, value)", "def set(self, attr, val, strict=True):\n if val is None:\n return\n if isinstance(val, basestring):\n if not len(val): return False\n s = re.sub(\"[\\n\\t\\r ]+\", \"\", val)\n if not len(s): return False\n if val is self:\n raise RRSDatabaseValueError(\"Avoid infinite recursion: Cannot insert itself as a value.\")\n\n # Check type. If it is some new attribute, the type cannot be determined\n if attr in self.__types__:\n if self.__types__[attr] is not _UnknownType:\n self._check_type(attr, val, self.__types__[attr])\n else:\n try:\n if type(self.__dict__[attr]) is not list:\n self.__types__[attr] = _UnknownType\n except KeyError, e:\n if strict:\n cls = str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n raise KeyError(\"Object %s has no attribute %s.\" % (cls, e))\n else:\n self.__types__[attr] = _UnknownType\n # no semanics checking needed, so insert value into object\n setattr(self, attr, val)\n return\n\n # Check semantics\n self._check_semantics(attr, val)\n\n # Insert\n if type(self.__dict__[attr]) == list:\n if not isinstance(val, _RRSDbEntityRelationship):\n raise RRSDatabaseValueError(\"Relationship between entities has to\" \\\n \"be represented by any instance of subclass of _RRSDbEntityRelationship\")\n val._parent = self # set pointer to parent element\n self.__dict__[attr].append(val)\n else:\n setattr(self, attr, val)", "def change_attr(el, attr, values):\n v = el.attrib.get(attr, '')\n changed = False\n for value in values.split(';'):\n k, newv = split2(value, \"Each value must be in the form x:y\", \":\")\n v = replace_key(v, k, newv)\n if v == '': # there were no such yet\n v = \"%s:%s\" % (k, newv)\n #print \"Changing %s : %s, got %s\" % (attr, values, str(v))\n el.attrib[attr] = v", "def _combine_attribute(attribute, other_attribute, separator=', '):\n if (other_attribute and attribute != other_attribute and\n attribute not in other_attribute and other_attribute not in attribute):\n return '{}{}{}'.format(attribute, separator, other_attribute)\n return attribute", "def autoval(attr, value):\n val = AssignmentValue(None, self)\n val.identifier = attr\n setattr(val, attr, value)\n self.values[attr] = val\n if self.value is None:\n self.value = [attr]\n else:\n self.value.append(attr)", "def concat_attribute_values(l, r, delimiter):\n if not l:\n return r\n if not r:\n return l\n return l + delimiter + r", "def _remove_attr(self, ml, attr):\n\t\tfor m in ml:\n\t\t\tif m[0] == attr:\n\t\t\t\tml.remove(m)\n\t\tif self.oldattr.get(attr, []):\n\t\t\tml.insert(0, (attr, self.oldattr.get(attr, []), ''))\n\t\treturn ml", "def set_attributes(self, model_1, obj_1, obj_2, overwrite=True):\n for (\n attr\n ) in (\n obj_2.traits()\n ): # Iterate through all attributes in obj_2. These should be the same traits as obj_1 assuming the precondition\n class_name = str(type(obj_2.traits()[attr])).strip(\"<>'\").split(\".\")[-1]\n # TODO: check for reactance tuples: str(obj_2.traits()[attr]._trait.klass).strip(\"<>'\").split('.')[-1] != (Int,Int,Int):\n\n if class_name == \"List\":\n phase_order = {\n \"A\": 0,\n \"B\": 1,\n \"C\": 2,\n \"N\": 3,\n } # Should only have to deal with 3 phases.\n #\n # BUG WARNING: The order of objects in the list is important and is used to determine the changes that are made\n # Try to ensure that phases are specified to avoid this problem\n # If number of elements in obj_1 is 0, all elements of obj_2 are added\n # If number of elements is the same, they are modified with a 1-1 comparison\n # If number of elements in obj_2 is < obj_1, set the first values of obj_1 as obj_2\n # If number of elements in obj_2 is > obj_1, set the all the values in obj_1 in the order they'r in obj_2 and append the extras\n # This will fail if obj_1 is (A, B, C) and obj_2 is (A, C), as it'll assign phase C to phase B.\n # This will also fail if obj_1 is (C) and obj_2 is (A,B,C) as C will have A assigned to it.\n # This will also fail if obj_1 is (A,B) and obj_2 is (A,C) as B will have C assigned to it.\n list_1 = getattr(obj_1, attr)\n list_2 = getattr(obj_2, attr)\n if list_1 is None or len(list_1) == 0:\n result_list = []\n for element in list_2:\n result_list.append(self.copy(model_1, element))\n setattr(obj_1, attr, result_list)\n continue\n elif list_2 is None or len(list_2) == 0:\n continue\n\n # Almost all Lists are of objects which have phases. Exceptions being windings, reactances and positions\n # Require the phases to be specified in both systems to modify based on phase\n has_phases = True\n for i in range(len(list_1)):\n if not (\n hasattr(list_1[0], \"phase\") and list_1[0].phase is not None\n ):\n has_phases = False\n for i in range(len(list_2)):\n if not (\n hasattr(list_2[0], \"phase\") and list_2[0].phase is not None\n ):\n has_phases = False\n if has_phases and len(list_1) > 0 and len(list_2) > 0:\n # Firstly sort the lists so they're in correct order by phase.\n list_1.sort(key=lambda x: phase_order[x.phase])\n list_2.sort(key=lambda x: phase_order[x.phase])\n list_1_phase = phase_order[list_1[0].phase]\n list_2_phase = phase_order[list_2[0].phase]\n list_1_idx = 0\n list_2_idx = 0\n while list_1_idx < len(list_1) and list_2_idx < len(list_2):\n if list_1_idx < len(list_1):\n list_1_phase = phase_order[list_1[list_1_idx].phase]\n else:\n list_1_phase = 1000000\n if list_2_idx < len(list_2):\n list_2_phase = phase_order[list_2[list_2_idx].phase]\n else:\n list_2_phase = 1000001\n\n # i.e. recurse\n if list_1_phase == list_2_phase:\n self.set_attributes(\n model_1,\n list_1[list_1_idx],\n list_2[list_2_idx],\n overwrite,\n )\n list_1_idx = list_1_idx + 1\n list_2_idx = list_2_idx + 1\n elif list_1_phase < list_2_phase:\n list_1_idx = (\n list_1_idx + 1\n ) # e.g. obj_1 = (A, B, C) and obj_2 = (B). We don't update this phase\n\n else:\n getattr(obj_1, attr).append(list_2[list_2_idx])\n list_2_idx = list_2_idx + 1\n\n elif len(list_1) == len(list_2):\n for i in range(len(list_1)):\n self.set_attributes(model_1, list_1[i], list_2[i], overwrite)\n\n elif len(list_1) > len(list_2):\n for i in range(len(list_2)):\n self.set_attributes(model_1, list_1[i], list_2[i], overwrite)\n\n else: # i.e. len(list_1) < len(list_2):\n for i in range(len(list_2)):\n if i < len(list_1):\n self.set_attributes(\n model_1, list_1[i], list_2[i], overwrite\n )\n else:\n getattr(obj_1, attr).append(list_2[i])\n\n else:\n value = getattr(obj_2, attr)\n if value is not None:\n if getattr(obj_1, attr) is not None and overwrite == False:\n continue\n setattr(obj_1, attr, value)", "def merge_attribute(self, attribute, overwrite=False):\n\n assert(attribute.user == self.user)\n for k, v in attribute.other_data.iteritems():\n if not overwrite and k in self.data:\n print \"WARNING! OVEWRITITNG \", k\n self.data[k] = v", "def normalizeAttributeValue (\n\n self,\n attribute = None,\n value = None\n ) :\n \n if ( ( utilities.isEmpty( attribute ) ) or ( utilities.isEmpty( value ) ) ) : return None, None\n\n attribute = utilities.string( attribute, format = \"identifier\" )\n\n if attribute == \"reference\" : pass\n\n elif attribute == \"bibtex\" : pass\n\n elif attribute in self.aliasDictionary : attribute = self.aliasDictionary[ attribute ]\n\n elif attribute in self.fieldList : pass\n\n else : return None, None\n\n # first normalization of value: removes external {}, quotes, and strips spaces\n\n value = value.strip( \";,: /\\\\\" )\n\n size = len( value )\n\n while True : \n\n if value.startswith( \"{\" ) and value.endswith( \"}\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( \"(\" ) and value.endswith( \")\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( \"[\" ) and value.endswith( \"]\" ) : value = value[ 1 : -1 ]\n \n if value.startswith( '\"' ) and value.endswith( '\"' ) : value = value[ 1 : -1 ]\n\n if value.startswith( \"'\" ) and value.endswith( \"'\" ) : value = value[ 1 : -1 ]\n\n value = value.strip( \";,: /\\\\\" )\n\n if len( value ) == size : break\n\n size = len( value )\n\n # normalizes fields\n \n if attribute == \"author\" :\n\n value = self.normalizeAuthor( value )\n\n self.author = value\n\n elif ( ( attribute == \"reference\" ) or ( attribute == \"bibtex\" ) ) :\n\n attribute = \"bibtex\"\n\n value = utilities.string( value, format = \"identifier\" )\n \n self.bibtex = value\n\n elif attribute == \"booktitle\" : value = self.normalizeBookTitle( value )\n\n elif attribute == \"description\" :\n\n value = self.normalizeDescription( value )\n\n self.description = value\n\n elif attribute == \"editor\" : value = self.normalizeEditor( value )\n\n elif attribute == \"journal\" : value = self.normalizeJournal( value )\n\n elif attribute == \"month\" : value = self.normalizeMonth( value )\n\n elif attribute == \"pages\" : value = self.normalizePages( value )\n\n elif attribute == \"title\" :\n\n value = self.normalizeTitle( value )\n\n self.title = value\n\n elif attribute == \"year\" :\n\n value = self.normalizeYear( value )\n\n self.year = value\n\n## elif attribute == \"bib\" :\n##\n## value = self.normalizePath( value )\n##\n## self.bibPath = value\n\n elif attribute == \"file\" :\n\n value = self.normalizePath( value )\n\n self.filePath = value\n \n elif attribute == \"owner\" :\n\n value = utilities.string( value, format = \"title\" )\n\n self.owner = value\n\n # other values: strips delimiters\n \n else : value = str( value ).strip( \" ()[].;:,/\\\\{}-_\" )\n\n\n\n # cleans value\n\n## print \"normalize\", str( attribute), str( value )\n\n value = value.strip().replace( \"{\", \"\" ).replace( \"}\", \"\" )\n\n## # recodes attribute: reference becomes bibtex and the remainder has a prefix reference **RF\n##\n## if ( ( not attribute == \"bibtex\" ) and ( not attribute.startswith( \"reference\" ) ) ) :\n##\n## attribute = \"reference\" + utilities.string( attribute, format = \"class\" )\n\n return attribute, value", "def onSetAttr(self, attr, vals, opts):\n pass", "def mutated_sequence(self):\n for i in range(len(self.seq)):\n for alt in self.vocab:\n if i in self.fixed_positions or alt == self.seq[i]:\n continue\n yield SeqNode(self.seq[:i] + alt + self.seq[i + 1:],\n fixed_positions=self.fixed_positions + [i])", "def __setattr__(self, attr, value):\n raise AttributeError(\"%s object is immutable\" % (type(self).__name__,))", "def replace_attributes(soup: BeautifulSoup, attribute: str, value: str, new_value: str) -> None:\n for target in soup.find_all(attrs={attribute: value}):\n target: Tag\n target.attrs[attribute] = new_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates all attributes from node or dictionary `dict_`. Appends the basic attributes ('ids', 'names', 'classes', 'dupnames', but not 'source') and then, for all other attributes in dict_, updates the same attribute in self. When attributes with the same identifier appear in both self and dict_, the two values are merged based on the value of update_fun. Generally, when replace is True, the values in self are replaced or merged with the values in dict_; otherwise, the values in self may be preserved or merged. When and_source is True, the 'source' attribute is included in the copy.
def update_all_atts(self, dict_, update_fun = copy_attr_consistent, replace = True, and_source = False): if isinstance(dict_, Node): dict_ = dict_.attributes # Include the source attribute when copying? if and_source: filter_fun = self.is_not_list_attribute else: filter_fun = self.is_not_known_attribute # Copy the basic attributes self.update_basic_atts(dict_) # Grab other attributes in dict_ not in self except the # (All basic attributes should be copied already) for att in filter(filter_fun, dict_): update_fun(self, att, dict_[att], replace)
[ "def update_basic_atts(self, dict_):\r\n if isinstance(dict_, Node):\r\n dict_ = dict_.attributes\r\n for att in self.basic_attributes:\r\n self.append_attr_list(att, dict_.get(att, []))", "def update(self, dict):\n self.attr.update(dict)\n return self", "def copy_attributes(self, parent_dict, child_dict, attrs):\n for attr in attrs:\n has_attr = parent_dict.get(attr)\n if has_attr is not None:\n child_dict[attr] = has_attr", "def update(self, given_dict):\n self.__dict__.update(given_dict)", "def update_attributes(self, override: Dict):\n self.additional_attributes.update(override)", "def update(self):\n for dynamic_attr in self.dynamic_attrs.itervalues():\n dynamic_attr.clear_overloads()\n \n self.update_children()\n \n for modifier in self.modifiers:\n self.apply_modifier(modifier)", "def update_with_attributes(obj, attributes):\n for key, val in attributes.items():\n setattr(obj, key, val)", "def update(self, data):\n for field in self.ATTR_FIELDS:\n if field in data:\n setattr(self, field, data[field])", "def _update_dict(self, doc_dict, update_doc_dict):\n\n for key, value in update_doc_dict.items():\n if isinstance(value, dict) and key in doc_dict:\n self._update_dict(doc_dict[key], update_doc_dict[key])\n elif isinstance(value, list) and key in doc_dict and\\\n self._is_list_descriptor(value):\n self._update_list(doc_dict[key], update_doc_dict[key])\n else:\n doc_dict[key]=value", "def update(self, dictionary):\n for key, value in dictionary.items():\n if is_stringlike(key):\n setattr(self, key, value)\n else:\n self[Tag(key)] = value", "def load_updated(self, grfn_dict):\n for container in self.function_argument_map:\n if container in self.update_functions:\n for container_grfn in grfn_dict[0][\"containers\"]:\n for body_function in container_grfn[\"body\"]:\n function_name = body_function[\"function\"][\"name\"]\n if (\n function_name.startswith(\"@container\")\n and function_name.split(\"::\")[-1] == container\n ):\n updated_variable = [\n body_function[\"input\"][i]\n for i in self.function_argument_map[container][\n \"updated_indices\"\n ]\n ]\n for i in range(len(updated_variable)):\n old_index = int(\n updated_variable[i].split(\"::\")[-1]\n )\n new_index = old_index + 1\n updated_var_list = updated_variable[i].split(\n \"::\"\n )[:-1]\n updated_var_list.append(str(new_index))\n updated_variable[i] = \"::\".join(\n updated_var_list\n )\n self.current_scope = self.update_functions[\n container\n ][\"scope\"]\n variable_name = updated_var_list[1]\n variable_spec = self.generate_variable_definition(\n variable_name,\n None,\n False,\n self.update_functions[container][\"state\"],\n )\n variable_name_list = variable_spec[\n \"name\"\n ].split(\"::\")[:-1]\n variable_name_list.append(str(new_index))\n variable_spec[\"name\"] = \"::\".join(\n variable_name_list\n )\n grfn_dict[0][\"variables\"].append(variable_spec)\n body_function[\"updated\"] = updated_variable\n return grfn_dict", "def copy_attrs(self, src, overwrite=False):\n # check src.tile.lgtattrs\n if len(src.tile.lgtattrs) == 0:\n src.tile._get_lgt_attrs()\n for attr_name in src.tile.lgtattrs:\n self.set(attr_name, src.tile.get(attr_name), overwrite=overwrite)", "def update(self, d, o=None):\n if isinstance(d, abc.Mapping):\n for k, v in shallow_items(d):\n if k in vars(self):\n v_ = vars(self)[k]\n if isinstance(v_, Tdict):\n if isinstance(v, abc.Mapping) or o is not None:\n vars(self)[k].update(v, o)\n else:\n vars(self)[k] = v\n elif o is None:\n vars(self)[k] = ensure_tdict(v)\n else:\n vars(self)[k] = o(v_, v)\n else:\n vars(self)[k] = ensure_tdict(v)\n else:\n for k, v in vars(self).items():\n if isinstance(v, Tdict):\n v.update(d, o)\n elif o is None:\n vars(self)[k] = d\n else:\n vars(self)[k] = o(v, d)\n return self", "def clone_attributes(self, source_cell, target_cell, no_clone_key_dict_list=None):\n if no_clone_key_dict_list is None:\n no_clone_key_dict_list = []\n\n # clone \"C++\" attributes\n for attrName in self.clonable_attribute_names:\n setattr(target_cell, attrName, getattr(source_cell, attrName))\n\n # clone dictionary\n for key, val in source_cell.dict.items():\n\n if key in no_clone_key_dict_list:\n continue\n elif key == '__sbml_fetcher':\n # we are skipping copying of SWIG-added attribute\n # SBMLFetcher - this is added by default during cell creation\n # co no need to copy\n continue\n elif key == 'SBMLSolver':\n self.copy_sbml_simulators(from_cell=source_cell, to_cell=target_cell)\n elif key == CompuCell.CellG.__maboss__:\n # skipping MaBoSS models; need a reliable copy constructor\n continue\n else:\n # copying the rest of dictionary entries\n target_cell.dict[key] = deepcopy(source_cell.dict[key])\n\n # now copy data associated with plugins\n # AdhesionFlex\n if self.adhesionFlexPlugin:\n source_adhesion_vector = self.adhesionFlexPlugin.getAdhesionMoleculeDensityVector(source_cell)\n self.adhesionFlexPlugin.assignNewAdhesionMoleculeDensityVector(target_cell, source_adhesion_vector)\n\n # PolarizationVector\n if self.polarizationVectorPlugin:\n source_polarization_vector = self.polarizationVectorPlugin.getPolarizationVector(source_cell)\n self.polarizationVectorPlugin.setPolarizationVector(target_cell, source_polarization_vector[0],\n source_polarization_vector[1],\n source_polarization_vector[2])\n\n # polarization23Plugin\n if self.polarization23Plugin:\n pol_vec = self.polarization23Plugin.getPolarizationVector(source_cell)\n self.polarization23Plugin.setPolarizationVector(target_cell, pol_vec)\n pol_mark = self.polarization23Plugin.getPolarizationMarkers(source_cell)\n self.polarization23Plugin.setPolarizationMarkers(target_cell, pol_mark[0], pol_mark[1])\n lam = self.polarization23Plugin.getLambdaPolarization(source_cell)\n self.polarization23Plugin.setLambdaPolarization(target_cell, lam)\n\n # CellOrientationPlugin\n if self.cellOrientationPlugin:\n lam = self.cellOrientationPlugin.getLambdaCellOrientation(source_cell)\n self.cellOrientationPlugin.setLambdaCellOrientation(target_cell, lam)\n\n # ContactOrientationPlugin\n if self.contactOrientationPlugin:\n o_vec = self.contactOrientationPlugin.getOriantationVector(source_cell)\n self.contactOrientationPlugin.setOriantationVector(target_cell, o_vec.x, o_vec.y, o_vec.z)\n self.contactOrientationPlugin.setAlpha(target_cell, self.contactOrientationPlugin.getAlpha(source_cell))\n\n # ContactLocalProductPlugin\n if self.contactLocalProductPlugin:\n c_vec = self.contactLocalProductPlugin.getCadherinConcentrationVec(source_cell)\n self.contactLocalProductPlugin.setCadherinConcentrationVec(target_cell, c_vec)\n\n # LengthConstraintPlugin\n if self.lengthConstraintPlugin:\n lam = self.lengthConstraintPlugin.getLambdaLength(source_cell)\n tl = self.lengthConstraintPlugin.getTargetLength(source_cell)\n mtl = self.lengthConstraintPlugin.getMinorTargetLength(source_cell)\n self.lengthConstraintPlugin.setLengthConstraintData(target_cell, lam, tl, mtl)\n\n # ConnectivityGlobalPlugin\n if self.connectivityGlobalPlugin:\n cs = self.connectivityGlobalPlugin.getConnectivityStrength(source_cell)\n self.connectivityGlobalPlugin.setConnectivityStrength(target_cell, cs)\n\n # ConnectivityLocalFlexPlugin\n if self.connectivityLocalFlexPlugin:\n cs = self.connectivityLocalFlexPlugin.getConnectivityStrength(source_cell)\n self.connectivityLocalFlexPlugin.setConnectivityStrength(target_cell, cs)\n\n # Chemotaxis\n if self.chemotaxisPlugin:\n field_names = self.chemotaxisPlugin.getFieldNamesWithChemotaxisData(source_cell)\n\n for fieldName in field_names:\n source_chd = self.chemotaxisPlugin.getChemotaxisData(source_cell, fieldName)\n target_chd = self.chemotaxisPlugin.addChemotaxisData(target_cell, fieldName)\n\n target_chd.setLambda(source_chd.getLambda())\n target_chd.saturationCoef = source_chd.saturationCoef\n target_chd.setChemotaxisFormulaByName(source_chd.formulaName)\n target_chd.assignChemotactTowardsVectorTypes(source_chd.getChemotactTowardsVectorTypes())\n\n # FocalPointPLasticityPlugin - this plugin has to be handled manually -\n # there is no good way to figure out which links shuold be copied from parent to child cell", "def _copy_attr(o, attr, adict, key=None):\n if hasattr(o, attr):\n adict[key or attr] = getattr(o, attr)", "def _update_attrs(cls, args, das, attrs, var_id=None, names=None):\n out = cls._format(attrs, args)\n for locale in OPTIONS[METADATA_LOCALES]:\n out.update(\n cls._format(\n cls._get_translated_metadata(\n locale, var_id=var_id, names=names or list(attrs.keys())\n ),\n args=args,\n formatter=get_local_formatter(locale),\n )\n )\n\n # Generate a signature string for the history attribute\n # We remove annotations, replace default float/int/str by values\n # and replace others by type\n callstr = []\n for (k, v) in das.items():\n callstr.append(f\"{k}=<array>\")\n for (k, v) in args.items():\n if isinstance(v, (float, int, str)):\n callstr.append(f\"{k}={v!r}\") # repr so strings have ' '\n else:\n callstr.append(\n f\"{k}={type(v)}\"\n ) # don't take chance of having unprintable values\n\n # Get history and cell method attributes from source data\n attrs = defaultdict(str)\n if names is None or \"cell_methods\" in names:\n attrs[\"cell_methods\"] = merge_attributes(\n \"cell_methods\", new_line=\" \", missing_str=None, **das\n )\n if \"cell_methods\" in out:\n attrs[\"cell_methods\"] += \" \" + out.pop(\"cell_methods\")\n\n attrs[\"xclim_history\"] = update_history(\n f\"{var_id or cls._registry_id}({', '.join(callstr)})\",\n new_name=out.get(\"var_name\"),\n **das,\n )\n\n attrs.update(out)\n return attrs", "def update_attributes_instability(attrs_inst: Dict[Attribute, float]):\n for attribute, attribute_instability in attrs_inst.items():\n attributes_instability[attribute] = attribute_instability", "def replicate_attributes(self):\n changed = False\n if getattr(self, 'phonology', None):\n changed = self.set_attr('word_boundary_symbol', self.phonology.word_boundary_symbol, changed)\n changed = self.set_attr('morpheme_delimiters', self.morphology.morpheme_delimiters, changed)\n changed = self.set_attr('morphology_rare_delimiter', self.morphology.rare_delimiter, changed)\n changed = self.set_attr('morphology_rich_upper', self.morphology.rich_upper, changed)\n changed = self.set_attr('morphology_rich_lower', self.morphology.rich_lower, changed)\n changed = self.set_attr('morphology_rules_generated', self.morphology.rules_generated, changed)\n changed = self.set_attr('language_model_start_symbol', self.language_model.start_symbol, changed)\n changed = self.set_attr('language_model_end_symbol', self.language_model.end_symbol, changed)\n changed = self.set_attr('language_model_categorial', self.language_model.categorial, changed)\n self.changed = changed", "def update_from_dict(instance, attrs, commit):\n\n field_names = list(map(lambda f: f.name, instance._meta.get_fields()))\n for attr, val in attrs.items():\n if attr in field_names:\n setattr(instance, attr, val)\n\n if commit:\n instance.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace one child `Node` with another child or children.
def replace(self, old, new): index = self.index(old) if isinstance(new, Node): self.setup_child(new) self[index] = new elif new is not None: self[index:index+1] = new
[ "def _replace_child(self, node, old, new):\n if node is None:\n self.tree = new\n elif node._left == old:\n node._left = new\n node._left._parent = node\n elif node._right == old:\n node._right = new\n node._left._parent = node\n else:\n assert (False) # May need to change", "def replace_with(self, other):\n self.parent.children[self.parent.children.index(self)] = other\n other.parent = self.parent", "def replaceChild(self, *args) -> \"void\":\n return _coin.SoGroup_replaceChild(self, *args)", "def replace_with_node(self,node):\n\n self.set_for_parents(node) # connect new to parent on proper locations\n node.parent= self.parent # set node paent correctly\n self.parent = None # disconnect self from the parent\n return node.find_root() # find root again", "def replaceChildren(self, newChildren):\n self.content.replaceChild(newChildren)", "def replaceChild(self, *args) -> \"void\":\n return _coin.SoNodeKitListPart_replaceChild(self, *args)", "def replace_child(parent, node, replace_with):\n # TODO(soupytwist): Don't refer to the formatting dict directly\n if hasattr(node, PASTA_DICT):\n setprop(replace_with, 'prefix', prop(node, 'prefix'))\n setprop(replace_with, 'suffix', prop(node, 'suffix'))\n for field in parent._fields:\n field_val = getattr(parent, field, None)\n if field_val == node:\n setattr(parent, field, replace_with)\n return\n elif isinstance(field_val, list):\n try:\n field_val[field_val.index(node)] = replace_with\n return\n except ValueError:\n pass\n raise errors.InvalidAstError('Node %r is not a child of %r' % (node, parent))", "def replaceChild(self, *args) -> \"void\":\n return _coin.SoVRMLLOD_replaceChild(self, *args)", "def _relink(self, parent, child, is_child_left):\n if is_child_left:\n parent._left = child\n else:\n parent._right = child\n if child is not None:\n child._parent = parent", "def replace_child(parent, node, replace_with):\n # TODO(soupytwist): Don't refer to the formatting dict directly\n if hasattr(node, fmt.PASTA_DICT):\n fmt.set(replace_with, 'prefix', fmt.get(node, 'prefix'))\n fmt.set(replace_with, 'suffix', fmt.get(node, 'suffix'))\n for field in parent._fields:\n field_val = getattr(parent, field, None)\n if field_val == node:\n setattr(parent, field, replace_with)\n return\n elif isinstance(field_val, list):\n try:\n field_val[field_val.index(node)] = replace_with\n return\n except ValueError:\n pass\n raise errors.InvalidAstError('Node %r is not a child of %r' % (node, parent))", "def put(self, node, child):\n\n node.add_child(child)", "def move_children(self, element1, element2):\n for child in element1.getchildren():\n element2.append(child)\n # reversed is needed to safely remove items while iterating\n for child in reversed(element1.getchildren()):\n element1.remove(child)", "def replaceNode(self, *args) -> \"void\":\n return _coin.SoMFNode_replaceNode(self, *args)", "def replaceChild(self, *args) -> \"void\":\n return _coin.SoVRMLParent_replaceChild(self, *args)", "def process_children(self, node):\n if node == None: return\n \n self.parent_stack.append(node)\n for childacc in node.get_child_accessors():\n child = childacc.get()\n if isinstance(child, list):\n newchild = self.process_list(child, childacc.name())\n if not isinstance(newchild, list): raise Exception(\"Cannot replace list with non-list!\")\n else:\n newchild = self.process_node(child, childacc.name())\n if newchild is not None and not isinstance(newchild, Nodes.Node):\n raise Exception(\"Cannot replace Node with non-Node!\")\n childacc.set(newchild)\n self.parent_stack.pop()", "def _replace_element_by_own_content(self, element):\n # pylint: disable=no-self-use\n\n if element.has_children_elements():\n children = element.get_children_elements()\n for child in children:\n element.insert_before(child)\n element.remove_node()\n elif element.has_children():\n element.replace_node(element.get_first_node_child())", "def replaceChild(self, *args) -> \"void\":\n return _coin.SoVRMLSwitch_replaceChild(self, *args)", "def replaceWith(self, newChild):\n self.parentNode.replaceChild(newChild, self)\n return self", "def replace_node(graph, node, new_node):\n graph.add_node(new_node)\n graph.add_edges_from([(new_node, n) for n in graph.neighbors(node)])\n graph.remove_node(node)", "def _replace(self, node1:int, node2: int):\n for key in self.graph:\n for index, value in enumerate(self.graph[key]):\n if value == node2:\n self.graph[key][index] = node1\n \n '''\n while node2 in self.graph[key]:\n # instead of reassigning the value of the list where node2 resides, I am deleting the node2 from the list and adding the node1 value\n\n # self.graph[key] = [value for value in self.graph if != node2] \n self.graph[key].remove(node2) # deletes the node2 value from the row\n self.graph[key].append(node1) # adds the node1 value \n '''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the index of the first child whose class exactly matches.
def first_child_matching_class(self, childclass, start=0, end=sys.maxsize): if not isinstance(childclass, tuple): childclass = (childclass,) for index in range(start, min(len(self), end)): for c in childclass: if isinstance(self[index], c): return index return None
[ "def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0", "def _get_class_index(prediction: np.ndarray, order_number_minus_one: int) -> int:\n return np.where(\n prediction\n == np.partition(prediction.flatten(), -2)[-order_number_minus_one - 1]\n )[1][0]", "def getMinChildIndex(self, index):\n\t\tleftChild = self.data[self.leftChildIndexOf(index)]\n\t\t\n\t\tif self.rightChildIndexOf(index) < self.numElements:\n\t\t\trightChild = self.data[self.rightChildIndexOf(index)]\n\t\t\tif rightChild < leftChild:\n\t\t\t\treturn self.rightChildIndexOf(index)\n\n\t\treturn self.leftChildIndexOf(index)", "def position(self):\n if self.parent:\n return self.parent.children.index(self)", "def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None", "def get_index_in_parent_list(self):\n\t\tif self.parent:\n\t\t\treturn super(Heading, self).get_index_in_parent_list()\n\t\telif self.document:\n\t\t\tl = self.get_parent_list()\n\t\t\tif l:\n\t\t\t\treturn l.index(self)", "def first_child_oftype(self, typename):\n for child in self._kids:\n if child.typename == typename:\n return child", "def _smaller_child(self, idx):\n left = 2 * idx + 1\n # case 1: no child\n if left >= len(self):\n return None\n\n right = left + 1\n # case 2: only left child\n if right == len(self):\n return left\n\n # case 3: two children\n if self._entries[left][1] < self._entries[right][1]:\n return left\n else:\n return right", "def get_class_index(classes, class_list):\n\tfilter_index = [np.where(class_list == i)[0][0] for i in classes]\n\treturn filter_index", "def parent_index(self, vaddr):\n return self.parent.child_index(vaddr)", "def min_child(self, i):\n # Check if the given node has two children\n if i*2*2 > self.size:\n return i*2+1\n else:\n if self.heap_list[i*2+1] < self.heap_list[i*2+2]:\n return i*2+1\n else:\n return i*2+2", "def find_node(self, node):\n for i in range(self.count):\n if node.name == self.nodes[i].name:\n return i\n\n return -1", "def child(self, i):\r\n return self.children[i]", "def smallest_child(self, k):\n\n # 2*k == self.counter: check that k only has one child, if yes,\n # then just return this child's index\n # self.array[2*k][0] < self.array[2*k + 1]: the left child is smaller\n # than the right child\n if 2*k == self.counter \\\n or self.array[2*k].distance < self.array[2*k + 1].distance:\n return 2*k\n else:\n # return the index of the right child if right child is less than\n # the left child\n return 2*k + 1", "def findChild(self, child: 'SoNode') -> \"int\":\n return _coin.SoVRMLParent_findChild(self, child)", "def findChild(self, node: 'SoNode') -> \"int\":\n return _coin.SoGroup_findChild(self, node)", "def indexOfCurrentElement(self):\r\n return self.tableOfContact.indexOfTopLevelItem(self.tableOfContact.currentItem())", "def _getLeftChild(self, index):\r\n return index * 2 + 1", "def child_index_a(self):\r\n return self._index_a", "def get_parent_index(self, index):\n return int((index - 1 ) / 2) if index != 0 else None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the index of the first child whose class does not match.
def first_child_not_matching_class(self, childclass, start=0, end=sys.maxsize): if not isinstance(childclass, tuple): childclass = (childclass,) for index in range(start, min(len(self), end)): for c in childclass: if isinstance(self.children[index], c): break else: return index return None
[ "def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0", "def getMinChildIndex(self, index):\n\t\tleftChild = self.data[self.leftChildIndexOf(index)]\n\t\t\n\t\tif self.rightChildIndexOf(index) < self.numElements:\n\t\t\trightChild = self.data[self.rightChildIndexOf(index)]\n\t\t\tif rightChild < leftChild:\n\t\t\t\treturn self.rightChildIndexOf(index)\n\n\t\treturn self.leftChildIndexOf(index)", "def _get_class_index(prediction: np.ndarray, order_number_minus_one: int) -> int:\n return np.where(\n prediction\n == np.partition(prediction.flatten(), -2)[-order_number_minus_one - 1]\n )[1][0]", "def position(self):\n if self.parent:\n return self.parent.children.index(self)", "def get_index_in_parent_list(self):\n\t\tif self.parent:\n\t\t\treturn super(Heading, self).get_index_in_parent_list()\n\t\telif self.document:\n\t\t\tl = self.get_parent_list()\n\t\t\tif l:\n\t\t\t\treturn l.index(self)", "def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None", "def _smaller_child(self, idx):\n left = 2 * idx + 1\n # case 1: no child\n if left >= len(self):\n return None\n\n right = left + 1\n # case 2: only left child\n if right == len(self):\n return left\n\n # case 3: two children\n if self._entries[left][1] < self._entries[right][1]:\n return left\n else:\n return right", "def get_parent_index(self, index):\n return int((index - 1 ) / 2) if index != 0 else None", "def parent_index(self, vaddr):\n return self.parent.child_index(vaddr)", "def indexOfCurrentElement(self):\r\n return self.tableOfContact.indexOfTopLevelItem(self.tableOfContact.currentItem())", "def find_node(self, node):\n for i in range(self.count):\n if node.name == self.nodes[i].name:\n return i\n\n return -1", "def get_class_index(classes, class_list):\n\tfilter_index = [np.where(class_list == i)[0][0] for i in classes]\n\treturn filter_index", "def first_child_oftype(self, typename):\n for child in self._kids:\n if child.typename == typename:\n return child", "def _getLeftChild(self, index):\r\n return index * 2 + 1", "def min_child(self, i):\n # Check if the given node has two children\n if i*2*2 > self.size:\n return i*2+1\n else:\n if self.heap_list[i*2+1] < self.heap_list[i*2+2]:\n return i*2+1\n else:\n return i*2+2", "def smallest_child(self, k):\n\n # 2*k == self.counter: check that k only has one child, if yes,\n # then just return this child's index\n # self.array[2*k][0] < self.array[2*k + 1]: the left child is smaller\n # than the right child\n if 2*k == self.counter \\\n or self.array[2*k].distance < self.array[2*k + 1].distance:\n return 2*k\n else:\n # return the index of the right child if right child is less than\n # the left child\n return 2*k + 1", "def child(self, i):\r\n return self.children[i]", "def _get_index(self) -> \"int\" :\n return _core.ListItem__get_index(self)", "def child_insert_index(self, parent: Node, new_child: Node) -> int:\n try:\n new_child_index = self._rule_children_names.index(new_child.name)\n except ValueError as e:\n msg = f\"Child '{new_child.name}' not allowed in parent '{parent.name}'\"\n raise ChildNotAllowedError(msg)\n for index, child in enumerate(parent.children):\n parent_child_index = self._rule_children_names.index(child.name)\n if parent_child_index > new_child_index:\n return index\n index = len(parent.children)\n return index", "def child_index_a(self):\r\n return self._index_a" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Note that this Element has been referenced by its name `name` or id `id`.
def note_referenced_by(self, name=None, id=None): self.referenced = 1 # Element.expect_referenced_by_* dictionaries map names or ids # to nodes whose ``referenced`` attribute is set to true as # soon as this node is referenced by the given name or id. # Needed for target propagation. by_name = getattr(self, 'expect_referenced_by_name', {}).get(name) by_id = getattr(self, 'expect_referenced_by_id', {}).get(id) if by_name: assert name is not None by_name.referenced = 1 if by_id: assert id is not None by_id.referenced = 1
[ "def element_name(self, element_name: str):\n\n self._element_name = element_name", "def element_name(self) -> str:\n return self._element_name", "def add_name(self):\n self.curr_iden = self.curr_word\n self.curr_obj.insert_attr_name(self.curr_word)", "def _reference(self):\n\t\tpass", "def nodesByIdref(self, QXmlName): # real signature unknown; restored from __doc__\n return []", "def setName(self, name):\n self.setAttribute('NAME', name)", "def setName(self, name):\n self.content = name", "def startElement(self, name: unicode) -> None:\n ...", "def update_name(self, name):\n if name != self.name:\n self.parent.types[name] = self\n del self.parent.types[self.name]\n self.name = name", "def set_name(self, name):\n if self.__id == -1:\n self.__shadow_expr = self.__expr\n self.__shadow_dependencies = self.__dependencies\n self.__id = evar.__dic_id\n self.__dependencies = {self.__id}\n evar.__dic_id += 1\n evar.__var_dic[self.__id] = weakref.ref(self)\n self.symbol = sympy.symbols(\"v\" + str(self.__id) + \"v\")\n self.g_symbol = sympy.symbols(\"g\" + str(self.__id) + \"g\")\n self.m_symbol = sympy.symbols(\"m\" + str(self.__id) + \"m\")\n self.__expr = self.symbol\n self.name = name\n self.__finish_operation()\n else:\n self.name = name", "def element(self, name):\n try:\n return self.elements[self.elements.index(name.lower())]\n except ValueError:\n return None", "def setName( self, name ):\n if type( name ) == str:\n self.Name = name\n self.graph.graph[ 'name' ] = name", "def SetID(self, id):\n if not self.IsValidName(id):\n raise XMLIDValueError(id)\n doc = self.GetDocument()\n if doc:\n doc.UnregisterElement(self)\n self.id = id\n doc.RegisterElement(self)\n else:\n self.id = id", "def set_name(self, new_name):\n\n self.img.attrib['Name'] = new_name", "def __repr__(self):\n\n return self.tagname", "def setNameAttribute(self, name: 'char const *') -> \"void\":\n return _coin.ScXMLScxmlElt_setNameAttribute(self, name)", "def name(self, decl_name):\n self._name = decl_name", "def name(self):\n # set the default name value\n name = Node.NAME_DEFAULT\n\n # try to get the name of this object from the attributes\n attributes = self.attribute\n if Node.NAME_KEY in attributes:\n name = attributes[Node.NAME_KEY]\n return name", "def setNameAttribute(self, name: 'char const *') -> \"void\":\n return _coin.ScXMLEventElt_setNameAttribute(self, name)", "def test_anchorName(self):\r\n listing = Element('a')\r\n listing.setAttribute('name', 'foo')\r\n self.spitter.visitNode(listing)\r\n self.assertEqual(\r\n ''.join(self.output),\r\n \"\\\\label{%sHASHfoo}\" % (\r\n os.path.abspath(self.filename).replace('\\\\', '/'),))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if and only if the given attribute is NOT one of the basic list attributes defined for all Elements.
def is_not_list_attribute(cls, attr): return attr not in cls.list_attributes
[ "def is_not_known_attribute(cls, attr):\r\n return attr not in cls.known_attributes", "def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False", "def _attr_ne(self, name, value):\n self._attr_present(name)\n self.filters.append(lambda elem: elem.attrib[name] != value)", "def _check_rule_has_not_attribute(self, data_sources, conditions):\n return not self._check_rule_has_attribute(data_sources, conditions)", "def has_attribute(self, atributo):\r\n return atributo in self.__atributos", "def is_valid_svg_attribute(self, elementname, attributename):\n element = self._get_element(elementname)\n return attributename in element.valid_attributes", "def _has_private_attribute(self):\n return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])", "def has_comm_attr(self, attr):\n\n for comm_attr in self._attr_list:\n if comm_attr == attr:\n return True\n\n return False", "def empty(self, exc=[]):\n attrs = self.get_own_attrs()\n return not set(attrs.keys()).difference(set(exc))", "def check_attribute(array):\n if array[0] == array[1] and array[1] == array[2]:\n return True\n elif array[0] != array[1] and array[1] != array[2] and array[0] != array[2]:\n return True\n else:\n return False", "def verify_export_attrs_removed(attributes):\n self.assertNotIn('index_in_children_list', attributes)\n self.assertNotIn('parent_sequential_url', attributes)\n self.assertNotIn('parent_url', attributes)", "def check_attr_unit(self, element, attr, unit_list):\n if attr in element.attrib:\n unit = self.parse_length(element.get(attr), percent=True)[1]\n return unit in unit_list", "def is_lock_attribute(element, attribute):\n\n return cmds.getAttr(\"{}.{}\".format(element, attribute), lock=True)", "def ignore(self):\n return \"ignore\" in self.attributes and self.attributes[\"ignore\"] == \"true\"", "def _attr_exists(self, attr):\n\n if self.metadata and attr not in self.metadata:\n self._warn(\"Attribute [{attr}] does not exist. \" +\n \"Check for a typo or disable validation \" +\n \"by .set_validation(False) \".format(attr=attr))\n\n # Return True if attribute validation is disabled\n return False == self.attribute_validation\n\n return True", "def confirm_attribute(item, attribute):\n if type(item) == dict:\n if item.__contains__(attribute) is True:\n pass\n else:\n raise KeyError('Attribute {} does not exist.'.format(attribute))\n else:\n if hasattr(item, attribute) is True:\n pass\n else:\n raise AttributeError('Attribute {} does not exist.'.format(attribute))\n\n return", "def has_attribute(self, attributeType):\n return attributeType in self._node._attribute_map", "def is_attr_protected(attrname: str) -> bool:\n return (\n attrname[0] == \"_\"\n and attrname != \"_\"\n and not (attrname.startswith(\"__\") and attrname.endswith(\"__\"))\n )", "def remove_attr(self,attr_list=[]):\n for x in attr_list: \n if hasattr(self,x): delattr(self,x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if and only if the given attribute is NOT recognized by this class.
def is_not_known_attribute(cls, attr): return attr not in cls.known_attributes
[ "def _attr_exists(self, attr):\n\n if self.metadata and attr not in self.metadata:\n self._warn(\"Attribute [{attr}] does not exist. \" +\n \"Check for a typo or disable validation \" +\n \"by .set_validation(False) \".format(attr=attr))\n\n # Return True if attribute validation is disabled\n return False == self.attribute_validation\n\n return True", "def is_not_list_attribute(cls, attr):\r\n return attr not in cls.list_attributes", "def _check_rule_has_not_attribute(self, data_sources, conditions):\n return not self._check_rule_has_attribute(data_sources, conditions)", "def _has_private_attribute(self):\n return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])", "def __bool__(self):\n return not hasattr(self, 'missing')", "def has_attribute(self, atributo):\r\n return atributo in self.__atributos", "def has_attr(product):\n if len(product.attribute_value_ids) > 0:\n return True\n return False", "def hasAttribute(*args, **kwargs):\n \n pass", "def is_unidentified(self):\n try:\n if 'compound' not in self.fields.keys():\n return False\n if not self.compound.contextual_fulfilled:\n return self.compound.is_unidentified\n except AttributeError:\n return True", "def ignore(self):\n return \"ignore\" in self.attributes and self.attributes[\"ignore\"] == \"true\"", "def has_attribute(self, key):\n assert isinstance(key, str)\n return key in self._attributes.keys()", "def has_attribute(self, attributeType):\n return attributeType in self._node._attribute_map", "def is_attr_protected(attrname: str) -> bool:\n return (\n attrname[0] == \"_\"\n and attrname != \"_\"\n and not (attrname.startswith(\"__\") and attrname.endswith(\"__\"))\n )", "def is_missing(obj):\n return getattr(obj, \"moya_missing\", False)", "def confirm_attribute(item, attribute):\n if type(item) == dict:\n if item.__contains__(attribute) is True:\n pass\n else:\n raise KeyError('Attribute {} does not exist.'.format(attribute))\n else:\n if hasattr(item, attribute) is True:\n pass\n else:\n raise AttributeError('Attribute {} does not exist.'.format(attribute))\n\n return", "def _isprop(self, attr: str) -> bool:\n\n return isinstance(attr, property)", "def attribute_has(nodeName, attributeName):\r\n\r\n # valider si le noeud possède l'attribut\r\n if maya.cmds.objExists(\"%s.%s\" % (nodeName, attributeName)):\r\n return True\r\n else:\r\n return False", "def hasattribute(self, k):\n return isinstance(self.attributes, dict) and k in self.attributes", "def _check_rule_has_attribute(self, data_sources, conditions):\n return hasattr(data_sources['asset'], conditions['attribute']) and \\\n getattr(data_sources['asset'], conditions['attribute']) is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a DOM representation of this document.
def asdom(self, dom=None): if dom is None: import xml.dom.minidom as dom domroot = dom.Document() domroot.appendChild(self._dom_node(domroot)) return domroot
[ "def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')", "def getImplementation(self):\n return DOMImplementation()", "def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElement = dom.documentElement\n if self.description is not None:\n domElement.setAttribute('description', self.description)\n e = dom.createTextNode(self.filename)\n domElement.appendChild(e)\n\n return domElement", "def dom_element(self):\n data = self.data\n dom_element = data.dom_element\n return dom_element", "def html(self):\n doc = dominate.document(title=self.title)\n\n # Add favicon\n if self.favicon is not None:\n with doc.head:\n link(rel='icon', href=self.favicon)\n\n # Add external files (Skin)\n if self.skin is not None:\n with doc.head:\n for ref in self.skin.libs: # Libs\n link(rel='stylesheet', crossorigin='anonymous', href=ref)\n\n for ref in self.skin.fonts: # Fonts\n link(rel='stylesheet', type='text/css', href=ref)\n\n if self.skin.rules != \"\":\n style(raw(self.skin.rules))\n\n # Add Widgets HTML to the page\n main_div = div(cls=\"container\")\n for w in self.widgets:\n main_div.add(w.html())\n main_div.add(br())\n doc.add(main_div)\n\n # Add Javascript code to the page\n js_str = \"\\n\\n\".join([a.js() for a in self.ajax()])\n if js_str != '':\n doc.add(script(src=JQUERY_CDN))\n doc.add(script(raw(js_str + \"\\n\\n\" + COMMON_JS)))\n\n return doc", "def toDomElement(self):\n dom = parseString('<%s></%s>' % (self.elementType, self.elementType))\n domElement = dom.documentElement\n\n if self.privilege is not None:\n domElement.setAttribute('privilege', self.privilege)\n\n addChildNode(dom=dom, parentNode=domElement, name='UserID',\n value=self.userId, nodeType=Node.TEXT_NODE,\n attrDict={})\n addChildNode(dom=dom, parentNode=domElement, name='FirstName',\n value=self.firstName, nodeType=Node.TEXT_NODE,\n attrDict={})\n addChildNode(dom=dom, parentNode=domElement, name='LastName',\n value=self.lastName, nodeType=Node.TEXT_NODE,\n attrDict={})\n addChildNode(dom=dom, parentNode=domElement, name='Email',\n value=self.email, nodeType=Node.TEXT_NODE,\n attrDict={})\n\n return domElement", "def get_root_node(self):\n\n return self.dom.documentElement", "def get_soup(self):\n if self._soup is None:\n self._soup = BeautifulSoup(self.get_data(), \"lxml\", from_encoding='utf8')\n return self._soup", "def serialize(self):\n # Serialize fields to a dict\n elements = []\n for element in self.elements:\n elements.append(element.serialize())\n data = {'type': 'document', 'elements': elements}\n return data", "def dump(self):\n return etree.tostring(self.root)", "def get_DOM(self, xmlfile):\n\t\tdom = None\n\t\ttry: dom = xml.dom.minidom.parseString(xmlfile)\n\t\texcept Exception, e:\n\t \t\tprint \"Error getting dom:\", str(e)\n\t \t\treturn None\n\t\treturn dom", "def internet_document(self):\n return _InternetDocument(self, 'InternetDocument')", "def xml(self):\n return oxml_tostring(self, encoding='UTF-8', standalone=True)", "def makeNewDocument(self):\n\n document = textlayout.Document(\n width=self._propertyToPoints(\"width\"),\n marginTop=self._propertyToPoints(\"margin_top\"),\n marginBottom=self._propertyToPoints(\"margin_bottom\"),\n )\n\n return document", "def GetDocument(self):\n if self.parent:\n if isinstance(self.parent, Document):\n return self.parent\n else:\n return self.parent.GetDocument()\n else:\n return None", "def get_xml(self):\n return etree.tostring(self.xml_tree, pretty_print=True, encoding=\"utf-8\").decode(\"utf-8\")", "def read(self):\n # Get result data from debugger engine and verify length of response\n data = self.read_data()\n # Show debug output\n debug('[Response data] %s' % data)\n # Create XML document object\n document = parseString(data)\n return document", "def loading_xml(self):\n\n dom = minidom.parse(self.filepath)\n return dom", "def html(self):\n if not self._html:\n self._html = parse(self.input_doc, self.options.get('url'))\n\n return self._html", "def main_document(self) -> SpdxDocument:\n self._generate_documents()\n return cast(SpdxDocument, self._main_document)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`self.nameids` maps names to IDs, while `self.nametypes` maps names to booleans representing hyperlink type (True==explicit, False==implicit). This method updates the mappings. The following state transition table shows how `self.nameids` ("ids") and `self.nametypes` ("types") change with new input (a call to this method), and what actions are performed ("implicit"type system
def set_name_id_map(self, node, id, msgnode=None, explicit=None): for name in node['names']: if name in self.nameids: self.set_duplicate_name_id(node, id, name, msgnode, explicit) else: self.nameids[name] = id self.nametypes[name] = explicit
[ "def updateNameTypes(self):\n typeid = self.getName().getType()\n if not typeid:\n raise RuntimeError(\"declared name in GlslNameStrip has no type id\")\n for ii in self.__names[1:]:\n found_type = ii.getType()\n if found_type:\n if typeid != found_type:\n raise RuntimeError(\"conflicting type found for %s: %s vs. %s\" % (str(ii), str(typeid), str(found_type)))\n else:\n ii.setType(typeid)", "def edit_names(a):\n\n dictionary={}\n i=0\n for state in a.states:\n dictionary[str(i)]=state\n i+=1\n\n # rename states\n a.states=list(a.states)\n for i in range(len(a.states)):\n a.states[i]=list(dictionary.keys())[list(dictionary.values()).index(a.states[i])]\n a.states=set(a.states)\n\n # rename start states\n a.start=list(a.start)\n for i in range(len(a.start)):\n a.start[i]=list(dictionary.keys())[list(dictionary.values()).index(a.start[i])]\n a.start=set(a.start)\n\n # rename accept states\n a.accept=list(a.accept)\n for i in range(len(a.accept)):\n a.accept[i]=list(dictionary.keys())[list(dictionary.values()).index(a.accept[i])]\n a.accept=set(a.accept)\n\n # rename transitions\n for i in range(len(a.transitions)):\n a.transitions[i][0]=list(dictionary.keys())[list(dictionary.values()).index(a.transitions[i][0])]\n a.transitions[i][2]=list(dictionary.keys())[list(dictionary.values()).index(a.transitions[i][2])]", "def update_type_and_attribute_ids(self):\n\n self.get_type_name_map()\n\n if len(self.input_network.types)>0:\n # If the network has type\n self.input_network.types = [self.network_template_type]\n\n #map the name of the nodes, links and groups to its negative ID\n for n_j in self.input_network.nodes:\n self.name_maps['NODE'][n_j.name] = n_j.id\n self.update_type_and_attribute(n_j)\n\n for l_j in self.input_network.links:\n self.name_maps['LINK'][l_j.name] = l_j.id\n self.update_type_and_attribute(l_j)\n\n for g_j in self.input_network.resourcegroups:\n self.name_maps['GROUP'][g_j.name] = g_j.id\n self.update_type_and_attribute(g_j)", "def update_name(self, name):\n if name != self.name:\n self.parent.types[name] = self\n del self.parent.types[self.name]\n self.name = name", "def _setup_name_tables():\n with tables_lock:\n if to_name: return\n\n # Go through every possible scan code, and map them to virtual key codes.\n # Then vice-versa.\n all_scan_codes = [(sc, user32.MapVirtualKeyExW(sc, MAPVK_VSC_TO_VK_EX, 0)) for sc in range(0x100)]\n all_vks = [(user32.MapVirtualKeyExW(vk, MAPVK_VK_TO_VSC_EX, 0), vk) for vk in range(0x100)]\n for scan_code, vk in all_scan_codes + all_vks:\n # `to_name` and `from_name` entries will be a tuple (scan_code, vk, extended, shift_state).\n if (scan_code, vk, 0, 0, 0) in to_name:\n continue\n\n if scan_code not in scan_code_to_vk:\n scan_code_to_vk[scan_code] = vk\n\n # Brute force all combinations to find all possible names.\n for extended in [0, 1]:\n for modifiers in distinct_modifiers:\n entry = (scan_code, vk, extended, modifiers)\n # Get key names from ToUnicode, GetKeyNameText, MapVirtualKeyW and official virtual keys.\n names = list(get_event_names(*entry))\n if names:\n # Also map lowercased key names, but only after the properly cased ones.\n lowercase_names = [name.lower() for name in names]\n to_name[entry] = names + lowercase_names\n # Remember the \"id\" of the name, as the first techniques\n # have better results and therefore priority.\n for i, name in enumerate(map(normalize_name, names + lowercase_names)):\n from_name[name].append((i, entry))\n\n # TODO: single quotes on US INTL is returning the dead key (?), and therefore\n # not typing properly.\n\n # Alt gr is way outside the usual range of keys (0..127) and on my\n # computer is named as 'ctrl'. Therefore we add it manually and hope\n # Windows is consistent in its inconsistency.\n for extended in [0, 1]:\n for modifiers in distinct_modifiers:\n to_name[(541, 162, extended, modifiers)] = ['alt gr']\n from_name['alt gr'].append((1, (541, 162, extended, modifiers)))\n\n modifiers_preference = defaultdict(lambda: 10)\n modifiers_preference.update({(): 0, ('shift',): 1, ('alt gr',): 2, ('ctrl',): 3, ('alt',): 4})\n def order_key(line):\n i, entry = line\n scan_code, vk, extended, modifiers = entry\n return modifiers_preference[modifiers], i, extended, vk, scan_code\n for name, entries in list(from_name.items()):\n from_name[name] = sorted(set(entries), key=order_key)", "def replace_names(names):\n new_names = []\n for name in names:\n if name == 'Transformer':\n new_names.append('transformer')\n elif name == 'encoder_norm':\n new_names.append('norm')\n elif 'encoderblock' in name:\n num = name.split('_')[-1]\n new_names.append('encoder_layers')\n new_names.append(num)\n elif 'LayerNorm' in name:\n num = name.split('_')[-1]\n if num == '0':\n new_names.append('norm{}'.format(1))\n elif num == '2':\n new_names.append('norm{}'.format(2))\n elif 'MlpBlock' in name:\n new_names.append('mlp')\n elif 'Dense' in name:\n num = name.split('_')[-1]\n new_names.append('fc{}'.format(int(num) + 1))\n elif 'MultiHeadDotProductAttention' in name:\n new_names.append('attn')\n elif name == 'kernel' or name == 'scale':\n new_names.append('weight')\n elif name == 'bias':\n new_names.append(name)\n elif name == 'posembed_input':\n new_names.append('pos_embedding')\n elif name == 'pos_embedding':\n new_names.append('pos_embedding')\n elif name == 'embedding':\n new_names.append('embedding')\n elif name == 'head':\n new_names.append('classifier')\n elif name == 'cls':\n new_names.append('cls_token')\n else:\n new_names.append(name)\n return new_names", "def set_topic_name_revcode_mapping(self) -> None:\n for periodic_topic in self.periodic_data:\n self.log.debug(f\"creating mapping for {periodic_topic}\")\n self.create_revcode_mapping(periodic_topic)\n\n for asynchronous_topic in self.asynchronous_data:\n self.log.debug(f\"creating mapping for {asynchronous_topic}\")\n self.create_revcode_mapping(asynchronous_topic)", "def rename(self):\n\n ids = {s: str(i) for (i, s) in enumerate(sorted(list(self.Q())))}\n\n self.transitions = [(ids[t[0]], t[1], ids[t[2]]) for t in self.transitions]\n self.F = [ids[f] for f in self.F]\n self.q0 = ids[self.q0]", "def idschange(listname, dictname):\n for index, item in enumerate(listname):\n if isinstance(item, list):\n idschange(item, dictname)\n elif item in dictname.keys():\n listname[index] = dictname[item]\n return", "def init_name_maps(self):\n map_1 = {}\n with open(self.organisms_code_names_path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n for code in content:\n s = code.split('\t')\n map_1[s[0]] = s[1]\n self.short_name_to_full_name_map = map_1\n\n map_2 = {}\n # tree_str = self.newick\n # tree_names = re.split('[\\s+\\n+\\\"\\'\\:\\)\\(\\,\\:\\'\\']', tree_str)\n # tree_names = list(filter(lambda x: x != \"\" and x != ';', tree_names))\n for short_name in self.short_name_to_full_name_map.keys():\n full_name = self.short_name_to_full_name_map[short_name]\n map_2[full_name] = short_name\n\n self.full_name_to_short_name_map = map_2", "def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict", "def test_reassignNames(self):\n t = self.TreeRoot\n mapping = dict([(x, str(i)) for i,x in enumerate('abfg')])\n exp_names = ['0','1','2','3','c','d','e','h']\n t.reassignNames(mapping)\n obs_names = sorted(t.getNodeNames())\n self.assertEqual(obs_names, exp_names)", "def type_mapping(name):\n di = dict(FLOAT=1, FLOATS=6, GRAPH=5, GRAPHS=10, INT=2,\n INTS=7, STRING=3, STRINGS=8, TENSOR=4,\n TENSORS=9, UNDEFINED=0, SPARSE_TENSOR=11)\n if name is None:\n return di\n if isinstance(name, str):\n return di[name]\n rev = {v: k for k, v in di.items()}\n return rev[name]", "def add_names(ibs, name_list, note_list=None):\n # nid_list_ = [namenid_dict[name] for name in name_list_]\n # ibsfuncs.assert_valid_names(name_list)\n # All names are individuals and so may safely receive the INDIVIDUAL_KEY lblannot\n lbltype_rowid = ibs.lbltype_ids[constants.INDIVIDUAL_KEY]\n lbltype_rowid_list = [lbltype_rowid] * len(name_list)\n nid_list = ibs.add_lblannots(lbltype_rowid_list, name_list, note_list)\n return nid_list", "def add_state_names_column(self):\r\n \r\n\r\n names_map = {\"CA\": \"Cali\", \"CO\": \"Colo\", \"CT\": \"Conn\"}\r\n self[\"name\"] = self['abbrev'].map(names_map)", "def put_names(self, object_type, names):\n\n self.__ex_put_names(object_type, names)", "def test_reassignNames_specific_nodes(self):\n t = self.TreeRoot\n nodes = [self.TreeNode['a'], self.TreeNode['b']]\n mapping = dict([(x, str(i)) for i,x in enumerate('abfg')])\n exp_names = ['0','1','c','d','e','f','g','h']\n t.reassignNames(mapping, nodes)\n obs_names = sorted(t.getNodeNames())\n self.assertEqual(obs_names, exp_names)", "def test_name_rename():\n class Renamer(NodeTransformer):\n def visit_Name(self, node, meta):\n node.id = node.id + '_visited'\n return node\n\n renamer = Renamer()\n mod = ast.parse(\"bob = frank\")\n transform(mod, renamer)\n bob_node = mod.body[0].targets[0]\n frank_node = mod.body[0].value\n\n assert bob_node.id == \"bob_visited\"\n assert frank_node.id == \"frank_visited\"", "def rename(broadlink, new_name):\n #Get object state\n broadlink_state = str(state.get(broadlink)) #type casting into a string to prevent errors down the line\n new_name_state = str(state.get(new_name)).strip() \n\n broadlink_data = read_json_data(os.path.join(BROADLINK_CONFIG_FOLDER, STORAGE_FILE)) \n for broadlink_configured in broadlink_data.items():\n if broadlink_configured[1]['friendly_name'] == new_name_state: \n notify.persistent_notification(message = \"A broadlink with this name already exists\", title = \"Broadlink\")\n return False\n if broadlink_configured[1]['friendly_name'] == broadlink_state: \n mac_address = broadlink_configured[0]\n\n broadlink_data[mac_address]['friendly_name'] = new_name_state\n update_input_select(broadlink_data, INPUT_SELECT_YAML_FILE, INPUT_SELECT_REMOTE)\n write_json_data(os.path.join(BROADLINK_CONFIG_FOLDER, STORAGE_FILE), broadlink_data)\n\n input_select.reload() #Reload the input_select to update the friendly name", "def _replace_names(self, names):\n el_namen = self.get_root().xpath('./person/persName')\n for el_naam in el_namen:\n el_naam.getparent().remove(el_naam)\n for name in names:\n self._add_a_name(name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call self."``visit_`` + node class name" with `node` as parameter. If the ``visit_...`` method does not exist, call self.unknown_visit.
def dispatch_visit(self, node): node_name = node.__class__.__name__ method = getattr(self, 'visit_' + node_name, self.unknown_visit) self.document.reporter.debug( 'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s' % (method.__name__, node_name)) return method(node)
[ "def Visit(self, node):\n mapping = self._mapping\n\n # Build a visitor that performs the old_class -> new_class mapping:\n class Visitor(visitors.Visitor):\n visits_all_node_types = True\n name_to_class = mapping\n for name, new_cls in mapping.iteritems():\n\n def Visit(self, node):\n # Python doesn't allow us to build this as a closure, so we have to\n # use the clunky way of retrieving the replacement class.\n cls = self.name_to_class.get(node.__class__.__name__)\n if cls is not None:\n return cls(*node)\n else:\n return node\n locals()[\"Visit\" + name] = Visit\n return node.Visit(Visitor())", "def generic_visit(self, node):\n raise NotImplementedError('Unsupported AST node %s' % node)", "def visit(self, node, args=()):\n if not isinstance(node, broom.Node):\n node = node.node(*args)\n self.visitGraph(node)", "def generic_visit(self, node, offset=0):\n lead = ' ' * offset\n\n output = f\"{lead}{node.lineno} {node.__class__.__name__}: \"\n\n if node.attr_names:\n vlist = [(n, getattr(node, n)) for n in node.attr_names]\n output += ', '.join('%s = %s' % v for v in vlist)\n\n print(output)\n\n for (_, child) in node.children():\n self.visit(child, offset=offset + 2)", "def traverse(self, node, branch=None, **kw):\n\n parent = node\n if branch:\n node = node[branch]\n\n if node is None:\n return\n\n if branch and 'type' in parent:\n self.debug('TRAVERSE {parent[type]} -> {branch}:{node[type]}',\n node=node, parent=parent, branch=branch)\n else:\n self.debug('TRAVERSE {node[type]}', node=node)\n\n assert '__traversed' not in node\n node['__traversed'] = True\n\n self.set_location(node)\n\n try:\n handler = self.node_handlers[node['type']]\n except KeyError:\n if self.err.debug_level:\n self.error(err_id=('traverser', 'traverse', 'unknown_node'),\n error='Unknown node type: {[type]}'.format(node))\n\n log.exception('Unknown node type: {[type]}'.format(node))\n key = 'unknown_node_types'\n self.err.metadata.setdefault(key, defaultdict(int))\n self.err.metadata[key][node['type']] += 1\n else:\n with self._debug_level:\n result = handler(node, **kw)\n if isinstance(result, (JSWrapper, JSValue)):\n result.parse_node = node\n return result", "def _hx_visit_generic(self, visitor_class, *args):\n v = visitor_class(*args)\n v.apply_to(self._cfunc.body, None)", "def apply_visitor(visitor, decl_inst):\n\n fname = 'visit_' + \\\n decl_inst.__class__.__name__[:-2] # removing '_t' from class name\n if not hasattr(visitor, fname):\n raise visit_function_has_not_been_found_t(visitor, decl_inst)\n return getattr(visitor, fname)()", "def parse(self, node):\n parseMethod = getattr(self, \"parse%s\" % node.__class__.__name__)\n parseMethod(node)", "def generic_visit(self, node):\n\n # let the super class visit this node first\n super().generic_visit(node)\n\n # only trace statements\n if not isinstance(node, ast.stmt):\n return node\n\n # a unique identifier and initial data for this node\n node_id = len(self.nodes)\n self.nodes.append({\n 'node': node,\n 'counter': 0,\n 'time': 0,\n })\n\n # tracing is done by calling \"execute_node\" of this class\n func1 = ast.Attribute(\n value=ast.Name(id=CodeTracer.__INJECT_NAME, ctx=ast.Load()),\n attr='execute_node1',\n ctx=ast.Load()\n )\n func2 = ast.Attribute(\n value=ast.Name(id=CodeTracer.__INJECT_NAME, ctx=ast.Load()),\n attr='execute_node2',\n ctx=ast.Load()\n )\n\n # the argument to the tracing function is the unique node identifier\n args = [ast.Num(n=node_id)]\n\n # the tracer will be executed whenever the statement is executed\n tracer1 = ast.Expr(value=ast.Call(func=func1, args=args, keywords=[]))\n tracer2 = ast.Expr(value=ast.Call(func=func2, args=args, keywords=[]))\n\n # spoof location information for the generated node\n ast.copy_location(tracer1, node)\n ast.copy_location(tracer2, node)\n\n # inject tracers in a try-finally construct around this node\n wrapper = ast.Try(body=[node], handlers=[], orelse=[], finalbody=[tracer2])\n return [tracer1, wrapper]", "def define_visitor(self, file_ref, baseclass):\n self.write_ln(file_ref, \"class Visitor:\\n\")\n self.write_newline(file_ref)\n for entry in self.types:\n details = entry.split(\":\")\n classname = details[0].strip()\n self.write_ln(file_ref,\n \"def visit_\" + classname.lower() + \"_\" + baseclass.lower() + \"(self, \" + classname.lower() +\n \"_\" + baseclass.lower() + \": '\" + classname + \"'): pass\\n\",\n indent=4)", "def visitClass(self, testClass):", "def parse(self, node):\n pm = getattr(self, \"parse_%s\"%node.__class__.__name__)\n pm(node)", "def get_visitor(self, node):\r\n visitor = self._visitor_cache.get(node.__class__)\r\n if visitor is None:\r\n method = 'visit_' + node.__class__.__name__\r\n visitor = getattr(self, method, None)\r\n self._visitor_cache[node.__class__] = visitor\r\n return visitor", "def generic_visit(self, node):\n if (\n not self.replaced\n and hasattr(node, \"_location\")\n and node._location == self.search\n ):\n self.replaced = True\n return self.replacement_node\n else:\n return NodeTransformer.generic_visit(self, node)", "def build_cases(node: ASTNodeType) -> None:\n # Don't bother processing classes unless they actually have\n # concrete subclasses, otherwise we would be producing dead code.\n if not node.concrete_subclasses:\n return\n\n to_pop = False\n\n if node == root_node:\n # As a special case, emit actions for the root node outside of\n # the top-level CASE block as we don't need to dispatch on\n # anything for them: they always must be applied.\n actions = actions_for_node(node, node_var)\n if actions:\n result.append(actions)\n\n else:\n # If there are actions for this node, add a matcher for them\n # and process the subclasses in a nested CASE block.\n actions = actions_for_node(node, Matcher.new_node_var(node))\n if actions:\n m = Matcher(node, actions)\n case_stack[-1].matchers.append(m)\n case_stack.append(m.inner_case)\n to_pop = True\n\n for subcls in node.subclasses:\n build_cases(subcls)\n\n if to_pop:\n case_stack.pop()", "def transform(self, node):\n try:\n handler = getattr(self, 'transform_%s' % node.kind.name.lower())\n return handler(node)\n except AttributeError:\n print(\n \"Ignoring node of type %s (%s)\" % (\n node.kind,\n ' '.join(\n t.spelling for t in node.get_tokens())\n ),\n file=sys.stderr\n )", "def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname", "def EnterClassType(self, node):\n nodes = [node]\n seen = set()\n while nodes:\n cur_node = nodes.pop(0)\n if cur_node in seen:\n continue\n seen.add(cur_node)\n for prefix, cls in self._Lookup(cur_node):\n if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.ClassType):\n if cls.type.cls:\n cls = cls.type.cls\n else:\n nodes.append(cls.type)\n if isinstance(cls, pytd.Class):\n node.cls = cls\n return\n else:\n logging.warning(\"Couldn't resolve %s: Not a class: %s\",\n prefix + node.name, type(cls))", "def call_visitor(fort_node):\n v = ASR2PyVisitor()\n v.visit(fort_node)\n res_ast = v.ret_ast()\n return res_ast", "def _(self, node: AnnCastRecordDef):\n # TODO: Where should bases field be used?\n funcs = []\n fields = []\n if len(node.funcs) > 0:\n funcs = self.visit_list(node.funcs)\n if len(node.fields) > 0:\n fields = self.visit_list(node.fields)\n node_uid = uuid.uuid4()\n self.G.add_node(node_uid, label=\"Class: \" + node.name)\n\n # Add attributes to the graph\n attr_uid = uuid.uuid4()\n self.G.add_node(attr_uid, label=\"Attributes\")\n self.G.add_edge(node_uid, attr_uid)\n for n in fields:\n self.G.add_edge(attr_uid, n)\n\n # Add functions to the graph\n funcs_uid = uuid.uuid4()\n self.G.add_node(funcs_uid, label=\"Functions\")\n self.G.add_edge(node_uid, funcs_uid)\n for n in funcs:\n self.G.add_edge(funcs_uid, n)\n\n return node_uid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call self."``depart_`` + node class name" with `node` as parameter. If the ``depart_...`` method does not exist, call self.unknown_departure.
def dispatch_departure(self, node): node_name = node.__class__.__name__ method = getattr(self, 'depart_' + node_name, self.unknown_departure) self.document.reporter.debug( 'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s' % (method.__name__, node_name)) return method(node)
[ "def connect_directive_node(self, name, f_visit, f_depart):\r\n self.builder._function_node.append((name, f_visit, f_depart))", "def _depart(self, data, sock, forward=True):\n if forward:\n self.send_replicas_forward()\n time.sleep(1)\n self.send_data_forward()\n # Let the previous node know who its new next node is, after I depart\n self.neighbors.send_back('next:{}:{}:{}'.format(self.neighbors.front_ip,self.neighbors.front_port, self.neighbors.front_hash))\n # Let the next node know who its new previous node is, after I depart\n self.neighbors.send_front('prev:{}:{}:{}'.format(self.neighbors.back_ip,self.neighbors.back_port, self.neighbors.back_hash))\n # forward = False means all have to exit and not pass their values\n \n # Else let the master know that you departed\n if forward == True :\n send_request(self.m_host, self.m_port, 'depart:'+self.id)\n self.close = True\n self.message_queues[sock].put('Done...Bye Bye')", "def Node2Method(self, node): \n ##TODO(GuoChenkai) Nodef to Encodedmethod\n ## convert through the method_name\n #res = [] \n #methods = self.d.get_method(gvm_node.method_name)\n #for i in methods:\n #if i.get_name() == gvm_node.method_name:\n #res.append(i)\n #return res\n \n #print start_method.XREFfrom.items\n \n ## convert through the id (id does not match) \n #method = self.d.get_method_by_idx(gvm_node.id)\n #return method \n \n ## convert through the map_nodemethod {} within this class\n return self.d.get_method_descriptor(node.class_name,node.method_name,node.descriptor)\n #if not gvm_node.id in self.map_nodemethod:\n #return None \n #elif self.map_nodemethod[gvm_node.id] != None:\n #method = self.map_nodemethod[gvm_node.id]\n #return method\n #else: return None", "def departure(self, departureTime):\n self._departure = departureTime", "def departed(self, airport, howMany=MAX_RECORD_LENGTH, filter=TrafficFilter.ALL, offset=0):\n data = {\"airport\": airport, \"howMany\": howMany, \"filter\": filter, \"offset\": offset}\n return self._request(\"Departed\", data)", "def add_departure(self, departure_date, departure_time):\r\n self.departure_date = departure_date\r\n self.departure_time = departure_time", "def depart_ucomment_node(self, node):\n pass", "def parse(self, node):\n parseMethod = getattr(self, \"parse%s\" % node.__class__.__name__)\n parseMethod(node)", "def addInstanceRemovedDagPathCallback(*args, **kwargs):\n \n pass", "def next_nearby_departures_of_station(\n self, station_id: int, time: str, lang: str = \"en\"\n ) -> Optional[PublicTransitResponse]:\n\n data = {\n \"lang\": lang,\n \"stnId\": station_id,\n \"time\": time,\n \"apikey\": self._api_key,\n }\n return self.__get(data, \"board.json\", \"NextDepartures\")", "def departure_time(self, departure_time: int):\n\n self._departure_time = departure_time", "def get_next_departure(self, t):\n if t > self.next_departure:\n raise Exception(\"current time is after departure!\")\n return self.next_departure - t", "def generate_cold_departure(self, t):\n self.next_departure = t + self.cold_service_process.generate_trace()", "def process_deceased_field(deceased_field):\n # Try to parse the deceased fields when the fields are comma separated.\n try:\n return parse_comma_delimited_deceased_field(deceased_field)\n except Exception:\n pass\n\n # Try to parse the deceased fields when the fields are pipe separated.\n try:\n return parse_pipe_delimited_deceased_field(deceased_field)\n except Exception:\n pass\n\n # Try to parse the deceased fields when the fields are space separated.\n try:\n return parse_space_delimited_deceased_field(deceased_field)\n except Exception:\n pass\n\n # Try to parse the deceased fields assuming it contains an age.\n try:\n return parse_age_deceased_field(deceased_field)\n except Exception:\n pass\n\n raise ValueError(f'Cannot parse {Fields.DECEASED}: {deceased_field}')", "def dd_run_static_method(dd_node):\n return execute_static_method_dynamically(dd_node.Attributes[DD_NODE_ATTR_NAME['module_name']].GetValueAsString(),\n dd_node.Attributes[DD_NODE_ATTR_NAME['class_name']].GetValueAsString(),\n dd_node.Attributes[DD_NODE_ATTR_NAME['method_name']].GetValueAsString(),\n dd_node)", "def addDagDagPathCallback(*args, **kwargs):\n \n pass", "def erode(self, grid, dt=None, node_elevs='topographic__elevation',\n node_drainage_areas='drainage_area',\n node_receiving_flow='flow_receiver',\n node_order_upstream='upstream_ID_order',\n node_slope='topographic__steepest_slope',\n steepest_link='links_to_flow_receiver',\n runoff_rate_if_used=None,\n #W_if_used=None, Q_if_used=None,\n stability_condition='loose',\n Dchar_if_used=None, io=None):\n\n if runoff_rate_if_used != None:\n runoff_rate = runoff_rate_if_used\n assert type(runoff_rate) in (int, float, np.ndarray)\n else:\n runoff_rate = 1.\n\n if dt==None:\n dt = self.tstep\n try:\n self.Dchar=self.Dchar_in\n except AttributeError:\n try:\n self.Dchar=grid.at_node[Dchar_if_used]\n except FieldError:\n assert type(Dchar_if_used)==np.ndarray\n self.Dchar=Dchar_if_used\n\n if type(node_elevs)==str:\n node_z = grid.at_node[node_elevs]\n else:\n node_z = node_elevs\n\n if type(node_drainage_areas)==str:\n node_A = grid.at_node[node_drainage_areas]\n else:\n node_A = node_drainage_areas\n\n if type(node_receiving_flow)==str:\n flow_receiver = grid.at_node[node_receiving_flow]\n else:\n flow_receiver = node_receiving_flow\n\n #new V3:\n if type(node_order_upstream)==str:\n s_in = grid.at_node[node_order_upstream]\n else:\n s_in = node_order_upstream\n\n if type(node_slope)==str:\n node_S = grid.at_node[node_slope]\n else:\n node_S = node_slope\n\n if self.lamb_flag:\n variable_shields_crit = 0.15*node_S**0.25\n try:\n variable_thresh = variable_shields_crit*self.shields_prefactor_to_shear\n except AttributeError:\n variable_thresh = variable_shields_crit*self.shields_prefactor_to_shear_noDchar*self.Dchar\n\n\n if type(steepest_link)==str:\n link_length = np.empty(grid.number_of_nodes,dtype=float)\n link_length.fill(np.nan)\n draining_nodes = np.not_equal(grid.at_node[steepest_link], BAD_INDEX_VALUE)\n core_draining_nodes = np.intersect1d(np.where(draining_nodes)[0], grid.core_nodes, assume_unique=True)\n link_length[core_draining_nodes] = grid.link_length[grid.at_node[steepest_link][core_draining_nodes]]\n #link_length=grid.node_spacing_horizontal\n else:\n link_length = grid.link_length[steepest_link]\n square_link_length = np.square(link_length) #nans propagate forward\n\n try:\n transport_capacities_thresh = self.thresh*self.Qs_thresh_prefactor*runoff_rate**(0.66667*self._b)*node_A**self.Qs_power_onAthresh\n except AttributeError:\n transport_capacities_thresh = variable_thresh*self.Qs_thresh_prefactor*runoff_rate**(0.66667*self._b)*node_A**self.Qs_power_onAthresh\n\n transport_capacity_prefactor_withA = self.Qs_prefactor*runoff_rate**(0.6+self._b/15.)*node_A**self.Qs_power_onA\n\n internal_t = 0.\n break_flag = False\n dt_secs = dt*31557600.\n counter = 0\n\n while 1: #use the break flag, to improve computational efficiency for runs which are very stable\n #we assume the drainage structure is forbidden to change during the whole dt\n #print \"loop...\"\n #note slopes will be *negative* at pits\n #track how many loops we perform:\n counter += 1\n downward_slopes = node_S.clip(0.)\n #positive_slopes = np.greater(downward_slopes, 0.)\n transport_capacities_S = transport_capacity_prefactor_withA*(downward_slopes)**0.7\n trp_diff = (transport_capacities_S - transport_capacities_thresh).clip(0.)\n transport_capacities = np.sqrt(trp_diff*trp_diff*trp_diff)\n\n if stability_condition == 'tight':\n mock_diffusivities = np.zeros_like(transport_capacities, dtype=float)\n mock_diffusivities = transport_capacities/downward_slopes\n tstep_each_node = 10.*square_link_length/mock_diffusivities #we're relaxing the condition fivefold here, as the true VonNeumann condition is VERY restrictive\n #if no node exceeds crit, tstep_each_node will just be nans and infs\n delta_t_internal = np.nanmin(tstep_each_node) #in seconds, nanmin avoids the pit nodes\n if delta_t_internal == np.inf: #no node exceeds crit\n delta_t_internal = dt_secs #nothing happened, so let the loop complete, awaiting more uplift\n if internal_t + delta_t_internal >= dt_secs:\n dt_this_step = dt_secs-internal_t #now in seconds\n break_flag = True\n else:\n dt_this_step = delta_t_internal #a min tstep was found (seconds). We terminate the loop\n else: #loose, gradient based method\n dt_this_step = dt_secs-internal_t #and the adjustment is made AFTER the dz calc\n\n sed_into_node = np.zeros(grid.number_of_nodes, dtype=float)\n dz = np.zeros(grid.number_of_nodes, dtype=float)\n len_s_in = s_in.size\n cell_areas = self.cell_areas\n\n for i in s_in[::-1]: #work downstream\n sed_flux_into_this_node = sed_into_node[i]\n sed_flux_out_of_this_node = transport_capacities[i] #we work in volume flux, not volume per se here\n flux_excess = sed_flux_into_this_node - sed_flux_out_of_this_node #gets deposited\n dz[i] = flux_excess/cell_areas*dt_this_step\n sed_into_node[flow_receiver[i]] += sed_flux_out_of_this_node\n\n if stability_condition == 'loose':\n elev_diff = node_z - node_z[flow_receiver]\n delta_dz = dz[flow_receiver] - dz\n node_flattening = self.fraction_gradient_change*elev_diff - delta_dz #note the condition is that gradient may not change by >X%, not must be >0\n #note all these things are zero for a pit node\n most_flattened_nodes = np.argmin(node_flattening[grid.core_nodes])\n most_flattened_nodes = np.take(grid.core_nodes, most_flattened_nodes) #get it back to node number, not core_node number\n most_flattened_val = np.take(node_flattening, most_flattened_nodes)\n if most_flattened_val>=0.:\n break_flag = True #all nodes are stable\n else: # a fraction < 1\n dt_fraction = self.fraction_gradient_change*np.take(elev_diff, most_flattened_nodes)/np.take(delta_dz, most_flattened_nodes)\n #print dt_fraction\n #correct those elevs\n dz *= dt_fraction\n dt_this_step *= dt_fraction\n\n #print np.amax(dz), np.amin(dz)\n\n node_z[grid.core_nodes] += dz[grid.core_nodes]\n\n if break_flag:\n break\n #do we need to reroute the flow/recalc the slopes here? -> NO, slope is such a minor component of Diff we'll be OK\n #BUT could be important not for the stability, but for the actual calc. So YES.\n node_S = np.zeros_like(node_S)\n #print link_length[core_draining_nodes]\n node_S[core_draining_nodes] = (node_z-node_z[flow_receiver])[core_draining_nodes]/link_length[core_draining_nodes]\n internal_t += dt_this_step #still in seconds, remember\n\n self.grid=grid\n\n active_nodes = grid.get_active_cell_node_ids()\n if io:\n try:\n io[active_nodes] += node_z[active_nodes]\n except TypeError:\n if type(io)==str:\n elev_name = io\n else:\n return grid, io\n\n else:\n elev_name = node_elevs\n\n if self.return_ch_props:\n #add the channel property field entries,\n #'channel_width', 'channel_depth', and 'channel_discharge'\n Q = self.k_Q*runoff_rate*node_A**self._c\n W = self.k_w*Q**self._b\n H = Q**(0.6*(1.-self._b))*(self.mannings_n/self.k_w)**0.6*node_S**-0.3\n tau = self.fluid_density*self.g*H*node_S\n grid.at_node['channel_width'] = W\n grid.at_node['channel_depth'] = H\n grid.at_node['channel_discharge'] = Q\n grid.at_node['channel_bed_shear_stress'] = tau\n\n\n grid.at_node['fluvial_sediment_transport_capacity'] = transport_capacities\n grid.at_node['fluvial_sediment_flux_into_node'] = sed_into_node\n #elevs set automatically to the name used in the function call.\n if stability_condition == 'tight':\n grid.at_node['effective_fluvial_diffusivity'] = mock_diffusivities\n self.iterations_in_dt = counter\n\n return grid, grid.at_node[elev_name]", "def wait_for_services(self, node_name='default_driver'):\n rospy.init_node(node_name)\n rospy.loginfo('rospy init node '+str(node_name))\n \n rospy.loginfo('waiting for services')\n\n rospy.wait_for_service('/lead/goal')\n rospy.wait_for_service('/lead/next')\n rospy.wait_for_service('/lead/start')\n rospy.wait_for_service('/lead/back')\n\n # Assign callables for the Path services\n self.lead_goal = rospy.ServiceProxy('/lead/goal', Goal)\n self.lead_next = rospy.ServiceProxy('/lead/next', Goal)\n self.lead_start = rospy.ServiceProxy('/lead/start', Goal)\n self.lead_back = rospy.ServiceProxy('/lead/back', Goal)", "def traverse(self, node, branch=None, **kw):\n\n parent = node\n if branch:\n node = node[branch]\n\n if node is None:\n return\n\n if branch and 'type' in parent:\n self.debug('TRAVERSE {parent[type]} -> {branch}:{node[type]}',\n node=node, parent=parent, branch=branch)\n else:\n self.debug('TRAVERSE {node[type]}', node=node)\n\n assert '__traversed' not in node\n node['__traversed'] = True\n\n self.set_location(node)\n\n try:\n handler = self.node_handlers[node['type']]\n except KeyError:\n if self.err.debug_level:\n self.error(err_id=('traverser', 'traverse', 'unknown_node'),\n error='Unknown node type: {[type]}'.format(node))\n\n log.exception('Unknown node type: {[type]}'.format(node))\n key = 'unknown_node_types'\n self.err.metadata.setdefault(key, defaultdict(int))\n self.err.metadata[key][node['type']] += 1\n else:\n with self._debug_level:\n result = handler(node, **kw)\n if isinstance(result, (JSWrapper, JSValue)):\n result.parse_node = node\n return result", "def visit(self, node, args=()):\n if not isinstance(node, broom.Node):\n node = node.node(*args)\n self.visitGraph(node)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called when entering unknown `Node` types. Raise an exception unless overridden.
def unknown_visit(self, node): if (self.document.settings.strict_visitor or node.__class__.__name__ not in self.optional): raise NotImplementedError( '%s visiting unknown node type: %s' % (self.__class__, node.__class__.__name__))
[ "def currentNodeHasUnknownType(*args, **kwargs):\n \n pass", "def unknownNode(plugin=bool, realClassName=bool, realClassTag=bool):\n pass", "def generic_visit(self, node):\n raise NotImplementedError('Unsupported AST node %s' % node)", "def register_for_new_hierarchy_nodes(self):\n pass", "def allNodeTypes(includeAbstract=bool):\n pass", "def process(self):\n for node_class in self.setup[\"node_classes\"]:\n for node in nuke.allNodes(recurseGroups=True):\n class_name = node.Class()\n if class_name != node_class:\n continue\n\n self.logger.info(\"%s '%s' because its node class (%s) is \"\n \"included in %s\", self.setup[\"mode\"],\n node.name(), class_name,\n self.setup[\"node_classes\"])\n\n self.handle_node(node)", "def add_node(self,node_type):\n #Our start node is more specific than this... Need to have another validation method\n if node_type not in node_types:\n raise Exception('node type must be one of greent.node_types')\n self.definition.node_types.append(node_type)", "def warn_on_undocumented(self, node):\n # Ignore nodes that are created during the expansion of enum nodes:\n # users cannot add documentation for these.\n if node.base and node.base.is_enum_node:\n return\n\n # Likewise for the very abstract generic list type\n elif node.is_generic_list_type:\n return\n\n WarningSet.undocumented_nodes.warn_if(\n not node._doc, 'This node lacks documentation')", "def handle_unknown(self, name, spec, attrs):\n inherited = self.find_inherited(name, spec, attrs)\n\n attributes = spec\n if attributes.get(\"__extend__\", True):\n attributes = self.combine_dicts(inherited, spec)\n\n kls = attributes.get(\"__main__\")\n kwargs = self.attributes_from(attributes)\n\n self.handle_attributes(name, {name.lower():(name, kls, kwargs)}, None, attrs, bookkeeper_method=\"add_custom\")", "def _get_node_types(self):\n for type in self.cfg.node_types:\n self._node_types[type.name] = type.label", "def register_for_deleted_hierarchy_nodes(self):\n pass", "def resolve_type_nodes(self, root: ASTNode) -> None:\n\n errors = []\n for child in itertools.chain(self.properties,\n self.functions.values(),\n self.classes.values()):\n try:\n try:\n # Give priority to narrowest scope (class-level scope in this case)\n child.resolve_type_nodes(self) # type: ignore\n except TypeResolutionError:\n child.resolve_type_nodes(root) # type: ignore\n except TypeResolutionError as e:\n errors.append(str(e))\n if len(errors) > 0:\n raise TypeResolutionError(\n 'Failed to resolve \"{}\" class against \"{}\". Errors: {}'.format(\n self.full_export_name, root.full_export_name, errors\n )\n )", "def register_for_new_hierarchies(self):\n pass", "def unknown_meta_event(self, meta_type, data):\n pass", "def nodeTypeNameBase(node):\n return ('',)", "def pre_ImportFrom(self):\n if self.cur_node.module == \"typing\":\n self.replace(None)", "def test_node_instance(self):\n self.assertTrue(isinstance(self.node, SuperTestNode))", "def _mark_children(self, t):\n for elt_t in t.children():\n self._mark_type(elt_t)", "def handle(self, tree, msg, lastRetVal=None):\n raise NotImplementedError, 'needs to be overridden in a subclass'", "def onUnknown(self, data):\n return CONTINUE" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy the current node, and make it the new acting parent.
def default_visit(self, node): newnode = node.copy() self.parent.append(newnode) self.parent_stack.append(self.parent) self.parent = newnode
[ "def set_parent(self, parent: 'Node'):\n if parent == self.parent:\n return\n self.parent = parent\n if parent is not None:\n self.parent.add_child(self)", "def set_parent(self, parent_node):\n self.parent = parent_node", "def setParent(self, parent):\n \n # Don't allow a node to set its parent as one of its children!\n if (parent in self.unorderedChildren):\n logging.error(\"Node.setParent: cannot set a node's child to be its own parent! node = {}; parent = {}\"\n .format(self.name, parent.name))\n return\n \n # 1st, remove this child from its current parent\n if (self.parent is not None):\n self.parent.__removeChild(self)\n \n # 2nd, set the new parent (setting to None is OK)\n self.parent = parent\n if (self.parent is not None):\n self.parent.__addChild(self)", "def replace_with_node(self,node):\n\n self.set_for_parents(node) # connect new to parent on proper locations\n node.parent= self.parent # set node paent correctly\n self.parent = None # disconnect self from the parent\n return node.find_root() # find root again", "def copy(self, g, node=None, node_parent=None):\n\n if node is None:\n g.tree_height = self.tree_height\n node = self.root_node\n\n if node is None:\n return None\n\n newnode = node.clone()\n\n if node_parent is None:\n g.setRoot(newnode)\n else:\n newnode.setParent(node_parent)\n node_parent.replaceChild(node, newnode)\n\n for ci in xrange(len(newnode)):\n GTreeBase.copy(self, g, newnode.getChild(ci), newnode)\n\n return newnode", "def update_parent(self, new_parent) -> None:\n prev_parent = self.parent\n if prev_parent is not None and prev_parent.children is not None:\n prev_parent.set_children(\n [child for child in prev_parent.children if child is not self]\n )\n self.parent = new_parent\n ls = self.left_sibling\n rs = self.right_sibling\n if ls:\n ls.right_sibling = rs\n if rs:\n rs.left_sibling = ls\n self.left_sibling = None\n self.right_sibling = None\n self.update_depth(new_parent.depth + 1)", "def addParent(self, node):\n self.parent = node", "def setParent(self, parent):\n assert isinstance(parent, RedBlackTree) or parent == None\n self.parentTree = parent", "def update_parent(self, old_parent, new_parent):\n\n bpy.ops.object.mode_set(mode='EDIT')\n edit_bones = self.obj.data.edit_bones\n\n for child in edit_bones[old_parent].children:\n child.parent = edit_bones[new_parent]", "def clone(self, parent):\n # noinspection PyArgumentList\n return self.__class__(parent)", "def add_node_with_parent(self,node,parent) :\n node.parent = parent\n if not parent is None:\n parent.add_child(node)", "def set_parent(self, node_id: int):\r\n self.parent = node_id", "def copy(self, g):\n g.parent = self.parent\n g.childs = self.childs[:]", "def copy(self,node):\n node.data = self.data\n node.top = self.top\n if not self.is_leaf():\n node.left = self.left\n node.right = self.right", "def set_parent ( self, parent ):\n self.parent_ref = get_object_ref ( parent )", "def _setParent(self, parent):\n if parent is None:\n self._parent = None\n else:\n self._parent = weakref.ref(parent)", "def set_parent(self, parent):\n self._parent = parent", "def setParent(self,p,uparent=None,eparent=None):\n if self.parent != None:\n self.parent.children.remove(self)\n self.parent = p\n self.uparent = uparent\n self.eparent = eparent\n p.children.append(self)", "def make_node(self):\n self.parent().make_node(self.node, custom=self.custom)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Quote attributes for pseudoxml
def pseudo_quoteattr(value): return '"%s"' % value
[ "def quoteAttr(self, value):\n ret = quoteattr(\"'\"+value+\"'\")\n return ret[2:len(ret)-2]", "def quoteattr(data, entities={}):\r\n data = escape(data, entities)\r\n if '\"' in data:\r\n if \"'\" in data:\r\n data = '\"%s\"' % data.replace('\"', \"&quot;\")\r\n else:\r\n data = \"'%s'\" % data\r\n else:\r\n data = '\"%s\"' % data\r\n return data", "def test_attr_escape_quotes(self):\r\n tmpl = MarkupTemplate(\"\"\"<div xmlns:py=\"http://genshi.edgewall.org/\">\r\n <elem class=\"$myvar\"/>\r\n </div>\"\"\")\r\n self.assertEqual(\"\"\"<div>\r\n <elem class=\"&#34;foo&#34;\"/>\r\n </div>\"\"\", str(tmpl.generate(myvar='\"foo\"')))", "def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, str) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)", "def quoted_attribute_value(self, value):\n quote_with = '\"'\n if '\"' in value:\n if \"'\" in value:\n replace_with = '&quot;'\n value = value.replace('\"', replace_with)\n else:\n quote_with = \"'\"\n return quote_with + value + quote_with", "def writeWithAttributeEscaping(write):\n def _write(data):\n write(escapeForContent(data).replace(b'\"', b'&quot;'))\n return _write", "def _mcpyrate_quotes_attr(attr, *, force_import=False):\n return _mcpyrate_attr(f\"quotes.{attr}\", force_import=force_import)", "def test_quotes(self):\n node1 = Attribute(wraptext(\"id\"), wraptext(\"foo\"), None)\n node2 = Attribute(wraptext(\"id\"), wraptext(\"bar\"))\n node3 = Attribute(wraptext(\"id\"), wraptext(\"foo bar baz\"))\n self.assertIs(None, node1.quotes)\n self.assertEqual('\"', node2.quotes)\n node1.quotes = \"'\"\n node2.quotes = None\n self.assertEqual(\"'\", node1.quotes)\n self.assertIs(None, node2.quotes)\n self.assertRaises(ValueError, setattr, node1, \"quotes\", \"foobar\")\n self.assertRaises(ValueError, setattr, node3, \"quotes\", None)\n self.assertRaises(ValueError, Attribute, wraptext(\"id\"),\n wraptext(\"foo bar baz\"), None)", "def _escape_attr_value(val):\n if isinstance(val, int):\n return val\n return val.replace(u'&', u'&amp;') \\\n .replace(u'\\t', u'&#x9;') \\\n .replace(u'\\n', u'&#xA;') \\\n .replace(u'\\r', u'&#xD;') \\\n .replace(u'\"', u'&quot;') \\\n .replace(u'<', u'&lt;')", "def __repr__(self):\n return str.format(\n 'TextAttributes(letter_spacing={}, paragraph_align={})',\n repr(self.letter_spacing), repr(self.paragraph_align))", "def attributeEscapingDoneOutside(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n return data", "def buildattributestring(self, attr):\n if not isinstance(attr, dict):\n attr = dict()\n\n parmlist = []\n for k, v in attr.items():\n if k not in self.EXCLUDEATTR:\n # any properly formed xml/json should have keywords already\n # escaped however this is just a sanity check. also, it\n # misses 'to' which is not a keyword in python, but is\n # treated as such in pymeta oh well\n if keyword.iskeyword(k):\n k += '_'\n\n v = repr(v)\n parmlist.append('%s=%s' % (k, v))\n\n attribstr = ', '.join(parmlist)\n\n return attribstr", "def _attrprint(d, delimiter=', '):\n return delimiter.join(('\"%s\"=\"%s\"' % item) for item in sorted(d.items()))", "def attrs2html(self, attrs):\n ks = []\n if 'class' in attrs.keys():\n ks.append('class')\n if 'id' in attrs.keys():\n ks.append('id')\n for k in attrs.keys():\n if k not in ['id', 'class']:\n ks.append(k)\n ls = []\n for k in ks:\n v = attrs[k]\n if k == 'id':\n v = f'{self.before_id}{v}'\n s = f'{k}=\"{v}\"'\n ls.append(s)\n return \" \".join(ls)", "def text(self) -> str:\n attr_text = self.separator.join(self.values)\n return f'{self.name}=\"{attr_text}\"'", "def format_custom_attr(ddic):\n s = \"\"\n for k1, d2 in ddic.items():\n if s:\n s += \" \"\n s += \"%s\" % k1\n s2 = \"\"\n for k2, v2 in d2.items():\n if s2:\n s2 += \" \"\n s2 += \"%s:%s;\" % (k2, v2)\n s += \" {%s}\" % s2\n return s", "def _xmlattrs_str(self):\n return ''.join(self._xmlattrs)", "def quote(key, as_key=True):\n return el.quote(key, as_key=as_key)", "def quote(value):\n return '\"%(value)s\"' % {\n 'value': str(value),\n }", "def catch_phrase_attribute(cls):\r\n return cls.random_element(cls.attributes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get CSV data from the directive content, from an external file, or from a URL reference.
def get_csv_data(self): encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) error_handler = self.state.document.settings.input_encoding_error_handler if self.content: # CSV data is from directive content. if 'file' in self.options or 'url' in self.options: error = self.state_machine.reporter.error( '"%s" directive may not both specify an external file and' ' have content.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) source = self.content.source(0) csv_data = self.content elif 'file' in self.options: # CSV data is from an external file. if 'url' in self.options: error = self.state_machine.reporter.error( 'The "file" and "url" options may not be simultaneously' ' specified for the "%s" directive.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) source_dir = os.path.dirname( os.path.abspath(self.state.document.current_source)) source = os.path.normpath(os.path.join(source_dir, self.options['file'])) source = utils.relative_path(None, source) try: self.state.document.settings.record_dependencies.add(source) csv_file = io.FileInput(source_path=source, encoding=encoding, error_handler=error_handler) csv_data = csv_file.read().splitlines() except IOError as error: severe = self.state_machine.reporter.severe( 'Problems with "%s" directive path:\n%s.' % (self.name, SafeString(error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) elif 'url' in self.options: # CSV data is from a URL. # Do not import urllib2 at the top of the module because # it may fail due to broken SSL dependencies, and it takes # about 0.15 seconds to load. import urllib.request, urllib.error, urllib.parse source = self.options['url'] try: csv_text = urllib.request.urlopen(source).read() except (urllib.error.URLError, IOError, OSError, ValueError) as error: severe = self.state_machine.reporter.severe( 'Problems with "%s" directive URL "%s":\n%s.' % (self.name, self.options['url'], SafeString(error)), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(severe) csv_file = io.StringInput( source=csv_text, source_path=source, encoding=encoding, error_handler=(self.state.document.settings.\ input_encoding_error_handler)) csv_data = csv_file.read().splitlines() else: error = self.state_machine.reporter.warning( 'The "%s" directive requires content; none supplied.' % self.name, nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) return csv_data, source
[ "def get_csv_data(self):\r\n encoding = self.options.get(\r\n 'encoding', self.state.document.settings.input_encoding)\r\n error_handler = self.state.document.settings.input_encoding_error_handler\r\n if self.content:\r\n # CSV data is from directive content.\r\n if 'file' in self.options or 'url' in self.options:\r\n error = self.state_machine.reporter.error(\r\n '\"%s\" directive may not both specify an external file and'\r\n ' have content.' % self.name, nodes.literal_block(\r\n self.block_text, self.block_text), line=self.lineno)\r\n raise SystemMessagePropagation(error)\r\n source = self.content.source(0)\r\n csv_data = self.content\r\n elif 'file' in self.options:\r\n # CSV data is from an external file.\r\n if 'url' in self.options:\r\n error = self.state_machine.reporter.error(\r\n 'The \"file\" and \"url\" options may not be simultaneously'\r\n ' specified for the \"%s\" directive.' % self.name,\r\n nodes.literal_block(self.block_text, self.block_text),\r\n line=self.lineno)\r\n raise SystemMessagePropagation(error)\r\n source_dir = os.path.dirname(\r\n os.path.abspath(self.state.document.current_source))\r\n source = os.path.normpath(os.path.join(source_dir,\r\n self.options['file']))\r\n source = utils.relative_path(None, source)\r\n try:\r\n self.state.document.settings.record_dependencies.add(source)\r\n csv_file = io.FileInput(source_path=source,\r\n encoding=encoding,\r\n error_handler=error_handler)\r\n csv_data = csv_file.read().splitlines()\r\n except IOError, error:\r\n severe = self.state_machine.reporter.severe(\r\n u'Problems with \"%s\" directive path:\\n%s.'\r\n % (self.name, SafeString(error)),\r\n nodes.literal_block(self.block_text, self.block_text),\r\n line=self.lineno)\r\n raise SystemMessagePropagation(severe)\r\n elif 'url' in self.options:\r\n # CSV data is from a URL.\r\n # Do not import urllib2 at the top of the module because\r\n # it may fail due to broken SSL dependencies, and it takes\r\n # about 0.15 seconds to load.\r\n import urllib2\r\n source = self.options['url']\r\n try:\r\n csv_text = urllib2.urlopen(source).read()\r\n except (urllib2.URLError, IOError, OSError, ValueError), error:\r\n severe = self.state_machine.reporter.severe(\r\n 'Problems with \"%s\" directive URL \"%s\":\\n%s.'\r\n % (self.name, self.options['url'], SafeString(error)),\r\n nodes.literal_block(self.block_text, self.block_text),\r\n line=self.lineno)\r\n raise SystemMessagePropagation(severe)\r\n csv_file = io.StringInput(\r\n source=csv_text, source_path=source, encoding=encoding,\r\n error_handler=(self.state.document.settings.\\\r\n input_encoding_error_handler))\r\n csv_data = csv_file.read().splitlines()\r\n else:\r\n error = self.state_machine.reporter.warning(\r\n 'The \"%s\" directive requires content; none supplied.'\r\n % self.name, nodes.literal_block(\r\n self.block_text, self.block_text), line=self.lineno)\r\n raise SystemMessagePropagation(error)\r\n return csv_data, source", "def load_csv(cls, path):\n with pyglet.resource.file(path, mode='r') as csv_file:\n csv_data = list(csv.reader(csv_file))\n\n return csv_data", "def get_csv():\n def generate(header, lines):\n yield '\"'+header+'\"' + '\\n'\n for line in lines: # lines have already quoted fields\n yield line + '\\n'\n if request.form.get('variants_button'):\n header = request.form['vheader']\n lines = request.form.getlist('variant')\n filename = str(request.form.get('subm_id')) + '.Variant.csv'\n else:\n header = request.form['cdheader']\n lines = request.form.getlist('case')\n filename = str(request.form.get('subm_id')) + '.CaseData.csv'\n\n headers = Headers()\n headers.add('Content-Disposition','attachment', filename=filename)\n return Response(generate(header, lines), mimetype='text/csv', headers=headers)", "def csv_reader(topology, schema, file, header=False, encoding=None, separator=None, ignoreExtraFields=False, hot=False, name=None):\n fe = streamsx.spl.op.Expression.expression(Format.csv.name)\n _op = _FileSource(topology, schema, file=file, format=fe, hotFile=hot,encoding=encoding,separator=separator,ignoreExtraCSVValues=ignoreExtraFields)\n return _op.outputs[0]", "def download_csv(type, date_range):\n print('Beginning %s file download with requests' % (type))\n try:\n payload.update({'type': type, 'date_range': date_range})\n response = requests.get(url, params=payload)\n filename = type + '.csv'\n with open(filename, 'wb') as f:\n f.write(response.content)\n except:\n pass\n # todo\n return", "def extract_data(path_to_csv):\n # Typically, one would try to catch exceptions, incase there is an IO error\n # or some other kind of problem. But because this entire script hinges on\n # the idea that we can open this file, I'd rather let the exception surface\n # to the command prompt.\n csvfile = open(path_to_csv)\n return csv.DictReader(csvfile)", "def csv_file_download_with_stream():\n idPARSING_DSF = int(request.args.get('pdsf_id', 0))\n if idPARSING_DSF != 0:\n pdsf = services.estimator.pdsf_file_info(idPARSING_DSF)\n else:\n return redirect(\"/my_task\")\n\n filename = pdsf[\"ParsingFile\"]\n fname = filename.split(\"/\")[-1]\n temp_df = pd.read_csv(filename, encoding='utf-8')\n\n # 그 결과를 앞서 만든 IO stream에 저장\n output_stream = StringIO()\n\n temp_df.to_csv(output_stream, index=False, encoding='utf-8')\n response = Response(\n output_stream.getvalue(),\n mimetype='text/csv; charset=utf-8',\n content_type='application/octet-stream',\n )\n\n response.headers[\"Content-Disposition\"] = f\"attachment; filename={fname}\".encode('utf-8')\n\n return response", "def csv(self):\r\n reader = csv.reader(self.text.splitlines())\r\n return [l for l in reader]", "def extract_source(date):\n with open(\"config.json\") as config:\n config_dict = json.load(config)\n\n csv_source_link = config_dict[\"csv_source_link\"] + f\"{date}.csv\"\n source_link = config_dict[\"source_link\"]\n csv_xpath = config_dict[\"csv_xpath\"]\n\n try:\n source_csv = pd.read_csv(csv_source_link, low_memory=False)\n print(\"Found source csv using csv source link in config...\")\n return source_csv, csv_source_link\n except HTTPError:\n print(f\"Cannot find csv with specified date: {date}\")\n\n driver = webdriver.Safari()\n driver.get(source_link)\n try:\n csv_element = WebDriverWait(driver, 30).until(\n EC.presence_of_element_located((By.XPATH, csv_xpath))\n )\n csv_url = csv_element.get_attribute(\"href\")\n print(f'Source csv found using xpath: {csv_url}')\n finally:\n driver.quit()\n\n if csv_url is not None:\n print(f\"Found source csv using xpath...\")\n return pd.read_csv(csv_url, low_memory=False), csv_url\n\n print(\"Cannot find csv from source using xpath\")\n return None", "def fetch_dataset(url, pandas_impl=pandas):\n\n print(f'fetching dataset at {url}')\n return pandas_impl.read_csv(url, dtype=str)", "def read_csv(self, filename):\n\n self.response.read_csv(filename)", "def get_report():\n response = requests.get(REPORT_URL)\n return csv.DictReader(response.content.decode().split('\\r\\n'))", "def import_csv(in_csv, delimit=','):\n with open(in_csv, encoding='utf-8') as source:\n sourcereader = csv.reader(source, delimiter=delimit)\n data_list = []\n for row in sourcereader:\n data_list.append(row)\n return data_list", "def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp", "def csv_import(name, sep, header):\n csv_file = pd.read_csv(name, sep = sep, header = header) ##loading data using read_csv from pandas\n return csv_file #returning the data structure", "def test_list_addresses_csv(self):\n test_service.list_addresses_csv(self)\n\n query_string = [('ids', \"1,2\")]\n headers = { \n 'Accept': 'application/csv',\n }\n response = self.client.open(\n '/{currency}/addresses.csv'.format(currency='btc'),\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def __append_to_csv(self, content):\n csv_content = []\n\n # csv headers\n headers = self.queries[self.key]['headers']\n\n issue_content = [content[ih] for ih in headers['issue']] if 'issue' in headers else []\n if 'comment' in headers:\n for c in content['comments']:\n csv_content.append([c[ch] for ch in headers['comment']] + issue_content) \n else:\n csv_content.append(issue_content)\n\n print(csv_content)\n with open(self.queries[self.key]['output_filename'], 'a+') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerows(csv_content)", "def process_csv(self, file_name: str):", "def parse(cls, path: str) -> List[QuoteModel]:\r\n if not cls.can_ingest(path):\r\n raise Exception('cannot ingest filetype')\r\n\r\n quotes = []\r\n data_frame = pandas.read_csv(path, header=0)\r\n\r\n for i, row in data_frame.iterrows():\r\n quote = QuoteModel(f'\"{row[\"body\"]}\"', row['author'])\r\n quotes.append(quote)\r\n\r\n return quotes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locate and return a role function from its languagedependent name, along with a list of system messages. If the role is not found in the current
def role(role_name, language_module, lineno, reporter): normname = role_name.lower() messages = [] msg_text = [] if normname in _roles: return _roles[normname], messages if role_name: canonicalname = None try: canonicalname = language_module.roles[normname] except AttributeError as error: msg_text.append('Problem retrieving role entry from language ' 'module %r: %s.' % (language_module, error)) except KeyError: msg_text.append('No role entry for "%s" in module "%s".' % (role_name, language_module.__name__)) else: canonicalname = DEFAULT_INTERPRETED_ROLE # If we didn't find it, try English as a fallback. if not canonicalname: try: canonicalname = _fallback_language_module.roles[normname] msg_text.append('Using English fallback for role "%s".' % role_name) except KeyError: msg_text.append('Trying "%s" as canonical role name.' % role_name) # The canonical name should be an English name, but just in case: canonicalname = normname # Collect any messages that we generated. if msg_text: message = reporter.info('\n'.join(msg_text), line=lineno) messages.append(message) # Look the role up in the registry, and return it. if canonicalname in _role_registry: role_fn = _role_registry[canonicalname] register_local_role(normname, role_fn) return role_fn, messages else: return None, messages # Error message will be generated by caller.
[ "def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter", "def find_role(cls, keyword):\n return _CompilerRole.find(keyword)", "def get_role(obj, role_name):\n for role in obj.roles:\n if role.name == role_name:\n return role\n return None", "def role_from_first_message(message: Message) -> Dialogue.Role:\n return BaseOefSearchDialogue.Role.AGENT", "def get_by_name(self, name: str) -> tp.Optional[RoleType]:\n pass", "def role_from_first_message(message: Message) -> Dialogue.Role:\n return DefaultDialogue.Role.AGENT", "def validate_role(context, param, value):\n role = context.obj.api.role_by_name(value)\n if role:\n return role\n else:\n raise click.BadParameter(\"\\\"%s\\\" was not found\" % value)", "def system_wide_role(self):\n # FIXME: This method should be in `ggrc_basic_permissions`, since it\n # depends on `Role` and `UserRole` objects\n\n if self.email in getattr(settings, \"BOOTSTRAP_ADMIN_USERS\", []):\n return u\"Superuser\"\n\n role_hierarchy = {\n u'Administrator': 0,\n u'Editor': 1,\n u'Reader': 2,\n u'Creator': 3,\n }\n unique_roles = set([\n user_role.role.name\n for user_role in self.user_roles\n if user_role.role.name in role_hierarchy\n ])\n if len(unique_roles) == 0:\n return u\"No Access\"\n else:\n # -1 as default to make items not in this list appear on top\n # and thus shown to the user\n sorted_roles = sorted(unique_roles,\n key=lambda x: role_hierarchy.get(x, -1))\n return sorted_roles[0]", "def function_lookup(pymod_path):\n module_name, func_name = pymod_path.rsplit('.', 1)\n module = importlib.import_module(module_name)\n shell_function = getattr(module, func_name)\n assert callable(shell_function), shell_function\n return shell_function", "def role(self, name):\n for r, n in itertools.chain(self._role_to_prop.items(), self._ref_role_to_prop.items()):\n if n == name:\n return r\n else:\n return -1", "def get_role_class(expected_rolename):\n \n try:\n role_class = ROLE_CLASSES_BY_TYPE[expected_rolename]\n except KeyError:\n raise tuf.FormatError(repr(expected_rolename)+' not supported')\n else:\n return role_class", "def get_role_name(self):\n try:\n return self.tags['Role']\n except KeyError:\n return None", "async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")", "def parse_roles(s: str) -> Roles:\n #return Roles.PEDESTRIAN #stub\n #return ... (s) #template\n \n if s == \"Pedestrian\":\n return Roles.PEDESTRIAN\n elif s == \"Car Driver\":\n return Roles.CAR_DRIVER\n elif s == \"Car Passenger\":\n return Roles.CAR_PASSENGER\n elif s == \"Cyclist\":\n return Roles.CYCLIST \n elif s == \"Other\":\n return Roles.OTHER", "def get_role(cls, name):\n return cls.query.filter_by(name=name).first()", "def _get_role(role_name):\n known_roles = kv().get('charm.azure.roles', {})\n if role_name in known_roles:\n return known_roles[role_name]\n sub_id = kv().get('charm.azure.sub-id')\n role_file = Path('files/roles/{}.json'.format(role_name))\n role_data = json.loads(role_file.read_text())\n role_fullname = role_data['Name'].format(sub_id)\n scope = role_data['AssignableScopes'][0].format(sub_id)\n role_data['Name'] = role_fullname\n role_data['AssignableScopes'][0] = scope\n try:\n log('Ensuring role {}', role_fullname)\n _azure('role', 'definition', 'create',\n '--role-definition', json.dumps(role_data))\n except AlreadyExistsAzureError:\n pass\n known_roles[role_name] = role_fullname\n kv().set('charm.azure.roles', known_roles)\n return role_fullname", "def _existing_only(func):\n\n @wraps(func)\n def _check_existence(db, entity, role=None, *, rolename=None):\n if isinstance(role, str):\n rolename = role\n if rolename is not None:\n # if given as a str, lookup role by name\n role = orm.Role.find(db, rolename)\n if role is None:\n raise ValueError(f\"Role {rolename} does not exist\")\n\n return func(db, entity, role)\n\n return _check_existence", "def locate_qualified_function(qualified_name: str) -> Callable[[], Iterable[ET]]:\n if \".\" not in qualified_name:\n raise QueryException(\"Could not find a '.' in the function name, e.g. my.reddit.rexport.comments\")\n rdot_index = qualified_name.rindex(\".\")\n return locate_function(qualified_name[:rdot_index], qualified_name[rdot_index + 1:])", "def _findFunction(self, functionPath):\n\n # Strip module.funcName type paths to deal with earlier versions\n # of the daemon. module is simply thrown away\n parts = functionPath.split(\".\")\n if len(parts)>1:\n calledName = parts[1]\n else:\n calledName = functionPath\n\n if calledName not in self._functions.keys():\n raise xmlrpc.NoSuchFunction(xmlrpc.XMLRPC.NOT_FOUND, \\\n \"Requested function (%s) does not exist!\" % calledName)\n func = self._functions[calledName]\n\n return func", "def retrieve_role(role_name):\n return RolesManager.retrieve_role(role_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register an interpreted text role by its canonical name.
def register_canonical_role(name, role_fn): set_implicit_options(role_fn) _role_registry[name] = role_fn
[ "def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '", "def register_token(cls, a_name, a_re, a_type):\n \n if a_type not in cls._token_type:\n raise Exception(\"No token type with name %s has been registered\"%(a_name))\n else:\n cls._token_type[a_type].append(a_name)\n cls._tokens_re[a_name] = a_re", "def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)", "async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")", "def register_role(name):\n role, created = Role.objects.get_or_create(name=name)\n if created:\n return role\n else:\n return False", "def test_used_as_role_type (self):\n self._test_typed(self.create_role())", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "def register_access_role(cls):\n try:\n role_name = cls.ROLE\n REGISTERED_ACCESS_ROLES[role_name] = cls\n except AttributeError:\n log.exception(\"Unable to register Access Role with attribute 'ROLE'.\")\n return cls", "def register_as(self, name):\n raise NotImplementedError()\n self.aliases.append(name)\n def gen_aliaser(k, v):\n cherrypy.config['tools.lg_authority.' + k] = v\n def req_handler():\n post_conf = getattr(cherrypy.serving, 'lg_authority_aliased', None)\n if post_conf is not None:\n log('Applying: {0}'.format(post_conf))\n cherrypy.request.namespaces(post_conf)\n log('New config: {0}'.format(cherrypy.request.config))\n def req_aliaser(k, v):\n import traceback\n traceback.print_stack()\n log('Aliasing {0}, {1}'.format(k,v))\n temp = getattr(cherrypy.serving, 'lg_authority_aliased', None)\n if temp is None:\n temp = {}\n cherrypy.serving.lg_authority_aliased = temp\n cherrypy.request.hooks.attach('on_start_resource', req_handler)\n log('Hook attached for aliases')\n temp['tools.lg_authority.' + k] = v\n cherrypy.config.namespaces[name] = gen_aliaser\n cherrypy.Application.namespaces[name] = req_aliaser\n cherrypy.Application.request_class.namespaces[name] = req_aliaser", "def parse_role(self, s, nac):\n org_name = self.find_first_item(s, ('role',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find role in Role section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Role section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Role section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def set_role(self, role):\n self.role.set(role)", "def RegisterName(self, name):\n self._node.RegisterNameForMBox(self, name)", "def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter", "def add(self, name, *args, **kwargs):\n members = {}\n for key, role in self._roles.items():\n try:\n member_arg = kwargs.pop(key)\n except KeyError:\n continue\n members[role] = (member_arg,) if isinstance(member_arg, str) else member_arg\n kwargs['members'] = members\n family = _CompilerFamily(self, name, *args, **kwargs)\n self._families[name] = family\n return family", "def set_role(self, role):\n self.role = role\n for i, _var_ in enumerate(self.variants):\n self.variants[i].role = role", "def init_role(role): # -> None:\n ...", "def add_Snode(self,snode):\r\n \r\n if isinstance(snode, Message):\r\n\r\n self.add_role(snode.get_role1())\r\n self.add_role(snode.get_role2())\r\n \r\n if isinstance(snode, Choice):\r\n self.add_role(snode.get_role())\r\n \r\n self.add_node(snode)", "def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register an interpreted text role by its local or languagedependent name.
def register_local_role(name, role_fn): set_implicit_options(role_fn) _roles[name] = role_fn
[ "def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '", "def addtemplate(self, name, text):\n\t\tself.context[name] = self.parser.parsetext(name, text)", "async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")", "def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def register_token(cls, a_name, a_re, a_type):\n \n if a_type not in cls._token_type:\n raise Exception(\"No token type with name %s has been registered\"%(a_name))\n else:\n cls._token_type[a_type].append(a_name)\n cls._tokens_re[a_name] = a_re", "def register(mgr):\n mgr.set_lang_info(lang,\n silvercity_lexer=XMLLexer(),\n buf_class=XMLBuffer,\n langintel_class=XMLLangIntel,\n import_handler_class=None,\n cile_driver_class=None,\n is_cpln_lang=True)", "def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def test_used_as_role_type (self):\n self._test_typed(self.create_role())", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:ietf:params:xml:ns:yang:vnf-bd', defining_module='vnf-bd', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)", "def register_role(name):\n role, created = Role.objects.get_or_create(name=name)\n if created:\n return role\n else:\n return False", "def register_processor( self, language, proc):\n self.registry[language] = proc", "def register_shader(name, **kwargs):\n\n ShaderPart(name, **kwargs)", "def register(mgr):\n mgr.set_lang_info(\"Less\",\n silvercity_lexer=LessLexer(),\n buf_class=LessBuffer,\n langintel_class=LessLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"SCSS\",\n silvercity_lexer=SCSSLexer(),\n buf_class=SCSSBuffer,\n langintel_class=SCSSLangIntel,\n is_cpln_lang=True)\n mgr.set_lang_info(\"Sass\",\n silvercity_lexer=SassLexer(),\n buf_class=SassBuffer,\n langintel_class=SassLangIntel,\n is_cpln_lang=True)", "def add(self, name, *args, **kwargs):\n members = {}\n for key, role in self._roles.items():\n try:\n member_arg = kwargs.pop(key)\n except KeyError:\n continue\n members[role] = (member_arg,) if isinstance(member_arg, str) else member_arg\n kwargs['members'] = members\n family = _CompilerFamily(self, name, *args, **kwargs)\n self._families[name] = family\n return family", "def init_role(role): # -> None:\n ...", "def set_role(self, role):\n self.role = role\n for i, _var_ in enumerate(self.variants):\n self.variants[i].role = role", "def _add_translation_string(self, *args, **kwargs):\r\n self.stringset.add(GenericTranslation(*args, **kwargs))", "def test_local_roles():\n vocab = roles.LocalRolesChoices\n\n assert len(vocab) == 9\n assert vocab['system'].value == 'system'\n assert vocab['system'].name == 'system'\n assert vocab['system'].label == 'r:system'", "def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int,\n inliner: Inliner, options: dict | None = None,\n content: Sequence[str] = (),\n ) -> tuple[list[Node], list[system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno,\n inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add customization options to role functions, unless explicitly set or disabled.
def set_implicit_options(role_fn): if not hasattr(role_fn, 'options') or role_fn.options is None: role_fn.options = {'class': directives.class_option} elif 'class' not in role_fn.options: role_fn.options['class'] = directives.class_option
[ "def experimental_options(self):\n ...", "def add_experimental_option(self, name, value):\n ...", "async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")", "def _extrasetup(self, name, func):", "def supports_function_admin(self):\n return False # Change to True when implemented.", "def conf(func):\n\n func._is_conf = True\n return func", "def manipOptions(hideManipOnShift=bool, pivotRotateHandleOffset=int, refreshMode=int, lineSize=float, showPivotRotateHandle=int, hideManipOnCtrl=bool, hideManipOnShiftCtrl=bool, showPlaneHandles=int, handleSize=float, planeHandleOffset=int, pointSize=float, relative=bool, rememberActiveHandleAfterToolSwitch=bool, scale=float, forceRefresh=bool, preselectHighlight=bool, linePick=float, rememberActiveHandle=bool):\n pass", "def supports_function_admin(self):\n return # boolean", "def add_function(self, func):\n self._conf['functions'].append(func)", "def apply_customization(self, serializer, customization):\n # apply fields or exclude\n if customization.fields is not None:\n if len(customization.fields) == 0:\n # customization fields are empty, set Meta.fields to '__all__'\n serializer.Meta.fields = ALL_FIELDS\n else:\n serializer.Meta.fields = customization.fields\n if customization.exclude is not None:\n serializer.Meta.exclude = customization.exclude\n\n # apply extra_kwargs\n if customization.extra_kwargs is not None:\n serializer.Meta.extra_kwargs = customization.extra_kwargs\n\n # apply validate_methods\n for method_name, method in customization.validate_methods.items():\n setattr(serializer, method_name, method)", "def _configure_iam_role(config, crm, iam):\n config = copy.deepcopy(config)\n\n email = SKYPILOT_SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(\n account_id=SKYPILOT_SERVICE_ACCOUNT_ID,\n project_id=config[\"provider\"][\"project_id\"],\n )\n service_account = _get_service_account(email, config, iam)\n\n permissions = VM_MINIMAL_PERMISSIONS\n roles = DEFAULT_SERVICE_ACCOUNT_ROLES\n if config[\"provider\"].get(HAS_TPU_PROVIDER_FIELD, False):\n roles = DEFAULT_SERVICE_ACCOUNT_ROLES + TPU_SERVICE_ACCOUNT_ROLES\n permissions = VM_MINIMAL_PERMISSIONS + TPU_MINIMAL_PERMISSIONS\n\n satisfied, policy = _is_permission_satisfied(\n service_account, crm, iam, permissions, roles\n )\n\n if not satisfied:\n # SkyPilot: Fallback to the old ray service account name for\n # backwards compatibility. Users using GCP before #2112 have\n # the old service account setup setup in their GCP project,\n # and the user may not have the permissions to create the\n # new service account. This is to ensure that the old service\n # account is still usable.\n email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(\n account_id=DEFAULT_SERVICE_ACCOUNT_ID,\n project_id=config[\"provider\"][\"project_id\"],\n )\n logger.info(f\"_configure_iam_role: Fallback to service account {email}\")\n\n ray_service_account = _get_service_account(email, config, iam)\n ray_satisfied, _ = _is_permission_satisfied(\n ray_service_account, crm, iam, permissions, roles\n )\n logger.info(\n \"_configure_iam_role: \"\n f\"Fallback to service account {email} succeeded? {ray_satisfied}\"\n )\n\n if ray_satisfied:\n service_account = ray_service_account\n satisfied = ray_satisfied\n elif service_account is None:\n logger.info(\n \"_configure_iam_role: \"\n \"Creating new service account {}\".format(SKYPILOT_SERVICE_ACCOUNT_ID)\n )\n # SkyPilot: a GCP user without the permission to create a service\n # account will fail here.\n service_account = _create_service_account(\n SKYPILOT_SERVICE_ACCOUNT_ID,\n SKYPILOT_SERVICE_ACCOUNT_CONFIG,\n config,\n iam,\n )\n satisfied, policy = _is_permission_satisfied(\n service_account, crm, iam, permissions, roles\n )\n\n assert service_account is not None, \"Failed to create service account\"\n\n if not satisfied:\n logger.info(\n \"_configure_iam_role: \" f\"Adding roles to service account {email}...\"\n )\n _add_iam_policy_binding(service_account, policy, crm, iam)\n\n account_dict = {\n \"email\": service_account[\"email\"],\n # NOTE: The amount of access is determined by the scope + IAM\n # role of the service account. Even if the cloud-platform scope\n # gives (scope) access to the whole cloud-platform, the service\n # account is limited by the IAM rights specified below.\n \"scopes\": [\"https://www.googleapis.com/auth/cloud-platform\"],\n }\n if _is_head_node_a_tpu(config):\n # SKY: The API for TPU VM is slightly different from normal compute instances.\n # See https://cloud.google.com/tpu/docs/reference/rest/v2alpha1/projects.locations.nodes#Node\n account_dict[\"scope\"] = account_dict[\"scopes\"]\n account_dict.pop(\"scopes\")\n config[\"head_node\"][\"serviceAccount\"] = account_dict\n else:\n config[\"head_node\"][\"serviceAccounts\"] = [account_dict]\n\n return config", "def decorate(f, **kwargs):\n f = debug_option(f)\n f = verbose_option(f)\n f = click.help_option(\"-h\", \"--help\")(f)\n\n # if the format option is being allowed, it needs to be applied to `f`\n if \"format\" not in disable_opts:\n f = format_option(f)\n\n # if the --map-http-status option is being allowed, ...\n if \"map_http_status\" not in disable_opts:\n f = map_http_status_option(f)\n\n return f", "def add_options(config):\n return config[\"module\"][\"application\"].add_options(config)", "async def addRole(self, ctx, role: discord.Role):\n guild = self.bot.cache.get_setting(ctx.guild.id)\n allowed_roles = guild.allowed_roles\n if not allowed_roles:\n roles = []\n roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)\n return\n if role.id in allowed_roles:\n return await ctx.reply(\n f\":negative_squared_cross_mark: | `{role.name}` role already has permissions to make announcements!\"\n )\n allowed_roles.append(role.id)\n await self.bot.pool.execute(\n \"UPDATE settings SET allowed_roles = $1 WHERE guild_id = $2\",\n allowed_roles,\n ctx.guild.id,\n )\n # updating the cache\n self.bot.cache.settings[ctx.guild.id] = {\n \"prefix\": guild[\"prefix\"],\n \"allowed_roles\": allowed_roles,\n }\n embed = generate_embed(\n f\":thumbsup: | Successfully added `{role.name}` to allowed roles list, now any person with `{role.name}` role can make announcements!\"\n )\n embed.set_footer(\n text=f\"Tip: To remove a role from making announcements, use: `{ctx.prefix}config remRole <role>`\",\n icon_url=ctx.guild.icon_url,\n )\n await ctx.reply(embed=embed)", "def setup_function(func):\n func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)", "async def _setfullaccessrole(self, ctx: commands.Context, role: discord.Role):\n await self.config.guild(ctx.guild).fullaccessrole.set(role.id)\n await ctx.send(f\"Full rcon access role has been set to {role}\")", "async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)", "async def readd_roles(self, ctx):\n config = hf.database_toggle(ctx, self.bot.db['readd_roles'])\n if config['enable']:\n if not ctx.me.guild_permissions.manage_roles:\n await ctx.send(\"I lack permission to manage roles. Please fix that before enabling this\")\n hf.database_toggle(ctx, self.bot.db['readd_roles'])\n return\n await ctx.send(f\"I will readd roles to people who have previously left the server\")\n else:\n await ctx.send(\"I will NOT readd roles to people who have previously left the server\")\n if 'users' not in config:\n config['users'] = {}\n await hf.dump_json()", "def common_options(*args, **kwargs):\n\n disable_opts = kwargs.get(\"disable_options\", [])\n\n def decorate(f, **kwargs):\n \"\"\"\n Work of actually decorating a function -- wrapped in here because we\n want to dispatch depending on how `common_options` is invoked\n \"\"\"\n f = debug_option(f)\n f = verbose_option(f)\n f = click.help_option(\"-h\", \"--help\")(f)\n\n # if the format option is being allowed, it needs to be applied to `f`\n if \"format\" not in disable_opts:\n f = format_option(f)\n\n # if the --map-http-status option is being allowed, ...\n if \"map_http_status\" not in disable_opts:\n f = map_http_status_option(f)\n\n return f\n\n return detect_and_decorate(decorate, args, kwargs)", "def test_roles_decorator_overrides_env_roles():\n @roles('r1')\n def command():\n pass\n eq_effective_roles(command, ['r1'], env={'roledefs': fake_roles,\n 'roles': ['r2']})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For roles which simply wrap a given `node_class` around the text.
def register_generic_role(canonical_name, node_class): role = GenericRole(canonical_name, node_class) register_canonical_role(canonical_name, role)
[ "def add_class_to_node(node, classname):\n\n if 'class' in node.attrib:\n node.attrib['class'] += ' ' + classname\n else:\n node.attrib['class'] = classname", "def node_roles(node):\n return \"_\".join(sorted(node[\"roles\"]))", "def add_child_classes(node):\n for para in node.traverse(nodes.paragraph):\n para[\"classes\"] = ([] if \"classes\" in para else para[\"classes\"]) + [\"card-text\"]\n for title in node.traverse(nodes.title):\n title[\"classes\"] = ([] if \"classes\" in title else title[\"classes\"]) + [\n \"card-title\"\n ]", "def expand_roles(self):\n for i in range(len(self.roles)):\n role = self.roles[i]\n if role in NodeLayout.DEPRECATED_ROLES:\n AppScaleLogger.warn(\"'{}' role has been deprecated, please use '{}'\"\n .format(role, NodeLayout.DEPRECATED_ROLES[role]))\n self.roles.remove(role)\n self.roles.append(NodeLayout.DEPRECATED_ROLES[role])\n\n if 'master' in self.roles:\n self.roles.remove('master')\n self.roles.append('shadow')\n self.roles.append('load_balancer')\n\n # TODO: remove these, db_slave and taskqueue_slave are currently deprecated.\n if 'db_slave' in self.roles or 'db_master' in self.roles \\\n and 'database' not in self.roles:\n self.roles.append('database')\n\n if 'taskqueue_slave' in self.roles or 'taskqueue_master' in self.roles \\\n and 'taskqueue' not in self.roles:\n self.roles.append('taskqueue')\n\n # Remove any duplicate roles\n self.roles = list(set(self.roles))", "def insert_role_node(self, node):\n self._insert_child(node)\n return self", "async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")", "def add_class(self, value: str) -> HTMLNode:\n return self.add_attr(\"class\", value)", "def add_Snode(self,snode):\r\n \r\n if isinstance(snode, Message):\r\n\r\n self.add_role(snode.get_role1())\r\n self.add_role(snode.get_role2())\r\n \r\n if isinstance(snode, Choice):\r\n self.add_role(snode.get_role())\r\n \r\n self.add_node(snode)", "def _cls(self, tag_name, class_name):\n return 'descendant-or-self::node()/%s[contains(concat(\" \", normalize-space(@class), \" \"), \" %s \")]' % (tag_name, class_name)", "def massage_roles(self):\n if not self.opts.role:\n self.guess_role()\n if self.opts.role:\n self.opts.role = [xx.lower() for xx in self.opts.role]\n for role in [nrole for nrole in VALID_ROLES\n if nrole[:4] == 'node']:\n if role in self.opts.role and not 'node' in self.opts.role:\n self.opts.role.append('node')\n if 'broker' in self.opts.role and not 'client' in self.opts.role:\n self.opts.role.append('client')\n self.logger.info('Please note: --role=broker implicitly '\n 'enables --role=client to ensure /usr/bin/rhc '\n 'is available for testing and '\n 'troubleshooting.')", "def add_tempest_roles():\n _add_additional_roles(TEMPEST_ROLES)", "def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def get_role(row):\n role = row[6]\n\n # Normalize roles Lead Link and Rep Link, as they contain the circle name as well\n if \"Lead Link\" in role:\n role = \"Lead Link\"\n\n if \"Rep Link\" in role:\n role = \"Rep Link\"\n\n return role", "def replace_cluster_role(self, name, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.replace_cluster_role_with_http_info(name, body, **kwargs)\n else:\n (data) = self.replace_cluster_role_with_http_info(name, body, **kwargs)\n return data", "def nodeOutliner(string, replace=\"string\", docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", lastMenuChoice=\"string\", numberOfPopupMenus=bool, connectivity=\"string\", width=int, dragCallback=\"string\", showConnectedOnly=bool, highlightColor=float, annotation=\"string\", enable=bool, longNames=bool, preventOverride=bool, nodesDisplayed=bool, showNonKeyable=bool, showInputs=bool, showOutputs=bool, attrAlphaOrder=\"string\", pressHighlightsUnconnected=bool, menuCommand=\"string\", exists=bool, showPublished=bool, showNonConnectable=bool, showHidden=bool, multiSelect=bool, addObject=\"string\", niceNames=bool, enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, useTemplate=\"string\", noBackground=bool, fullPathName=bool, dropCallback=\"string\", selectCommand=\"string\", popupMenuArray=bool, addCommand=\"string\", removeAll=bool, backgroundColor=float, noConnectivity=bool, manage=bool, showReadOnly=bool, menuMultiOption=bool, isObscured=bool, currentSelection=bool, remove=\"string\"):\n pass", "def spelling_ignore(role, rawtext, text, lineno, inliner,\n options={}, content=[]):\n node = nodes.Text(text)\n setattr(node, \"spellingIgnore\", True)\n return [node], []", "def _process_wrap_node(\n self,\n wrap_node: nodes.Element,\n token: SyntaxTreeNode,\n explicit: bool,\n classes: list[str],\n path_dest: str,\n ):\n self.add_line_and_source_path(wrap_node, token)\n self.copy_attributes(token, wrap_node, (\"class\", \"id\", \"title\"))\n self.current_node.append(wrap_node)\n\n if explicit:\n inner_node = nodes.inline(\"\", \"\", classes=classes)\n with self.current_node_context(inner_node):\n self.render_children(token)\n elif isinstance(wrap_node, addnodes.download_reference):\n inner_node = nodes.literal(path_dest, path_dest, classes=classes)\n else:\n inner_node = nodes.inline(\"\", \"\", classes=classes)\n\n wrap_node.append(inner_node)", "def wrap_namespace(self, node):\n self._push_splicer(\"class\")\n for cls in node.classes:\n if not cls.wrap.lua:\n continue\n name = cls.name\n self.reset_file()\n self._push_splicer(name)\n self.wrap_class(cls)\n # self.write_extension_type(cls)\n self._pop_splicer(name)\n self._pop_splicer(\"class\")\n\n self.reset_file()\n if node.functions:\n self._push_splicer(\"function\")\n self.wrap_functions(None, node.functions)\n self._pop_splicer(\"function\")\n\n for ns in node.namespaces:\n if ns.wrap.lua:\n self.wrap_namespace(ns)", "def addNodeType(self, nodeClass, paths, override=True):\n return NodeLibrary.addNodeType(self, nodeClass=nodeClass, paths=paths, override=override)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Auxiliary function to set options['classes'] and delete options['class'].
def set_classes(options): if 'class' in options: assert 'classes' not in options options['classes'] = options['class'] del options['class']
[ "def reset_class(self, classes):\n self._clear_cached_op()\n self.classes = classes\n self.num_class = len(classes)", "def remove_class(class_id):\r\n return 200", "def __init__ (self, options=[]):\r\n for opt in options:\r\n setattr(self, opt, None)", "def remove_classes(\n seg: np.ndarray,\n rm_classes: Sequence[int],\n classes: Dict[int, int] = None,\n background: int = 0,\n ) -> Union[np.ndarray, Tuple[np.ndarray, Dict[int, int]]]:\n for rc in rm_classes:\n seg[seg == rc] = background\n if classes is not None:\n classes.pop(rc)\n if classes is None:\n return seg\n else:\n return seg, classes", "def remove_class(self, value: str) -> HTMLNode:\n return self.remove_attr(\"class\", value)", "def _reset_options(self, cc: int):\n if cc != self.current_cc:\n if cc not in _generator_ccs:\n raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.')\n self.current_cc = cc\n self.options = option_registry.options_for_cc(self.current_cc, self.operation_kind)", "def removeClass(node, name):\n s = node.getAttribute(\"class\")\n if s:\n classes = s.split(None)\n classes.remove(name)\n if classes:\n node.setAttribute(\"class\", ' '.join(classes))\n else:\n node.removeAttribute(\"class\")", "def _reset_class_weights(self):\n classifier = self._get_classifier()\n \n for attr in \"class_prior\", \"class_weight\":\n if hasattr(classifier, attr):\n setattr(classifier, attr, None)\n break", "def remove_css_classes(self, *css_classes):\n for cls in css_classes:\n try:\n self._css_classes.remove(cls)\n except KeyError:\n pass", "def remove_class(self, klass):\n self.attrs.remove_value(self.AttributeNames.CLASS, klass)\n return self", "def cmd_remove_mo_class(self):\n self.save()\n self.add_remove(self.OPERATION_REMOVE_MO_CLASS)\n self.quit()", "def set_class_attr(\n self, class_name: str, attr: str, value: Any\n ) -> \"DatacodeOptions\":\n import datacode as dc\n\n logger.debug(\n f\"Setting datacode options for class attr {class_name}.{attr} to {value}\"\n )\n\n klass = getattr(dc, class_name)\n self._set_class_attr(klass, attr, value)\n return self", "def set_options(self, options):\n self.n_iter = options['n_iterations']\n self.k_folds = options['k_folds']\n self.splitting_method = 'kfold'\n self._split_type = 'user'\n self.evaluator.set_kfolds(self.k_folds)\n\n if self.k_folds == 1:\n self.splitting_method = 'naive'\n self.options = options.copy()\n\n for option, value in options.items():\n if hasattr(self, '_' + option):\n setattr(self, '_' + option, value)", "def test_swarm_updates_parsed_options_when_single_userclass_specified(self):\n\n class User1(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n class User2(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n self.environment.web_ui.userclass_picker_is_active = True\n self.environment.available_user_classes = {\"User1\": User1, \"User2\": User2}\n\n response = requests.post(\n \"http://127.0.0.1:%i/swarm\" % self.web_port,\n data={\n \"user_count\": 5,\n \"spawn_rate\": 5,\n \"host\": \"https://localhost\",\n \"user_classes\": [\"User1\"],\n },\n )\n self.assertListEqual([\"User1\"], response.json()[\"user_classes\"])\n\n # stop\n gevent.sleep(1)\n response = requests.get(\"http://127.0.0.1:%i/stop\" % self.web_port)\n self.assertEqual(response.json()[\"message\"], \"Test stopped\")\n\n # Checking environment.parsed_options.user_classes was updated\n self.assertListEqual(self.environment.parsed_options.user_classes, [\"User1\"])", "def remove_error_class(klass):\n if isinstance(klass, python.str_types):\n if klass not in ERROR_CLASS_MAP:\n raise ValueError('Code %s is not registered' % (klass,))\n elif isinstance(klass, python.class_types):\n classes = ERROR_CLASS_MAP.values()\n if klass not in classes:\n raise ValueError('Class %s is not registered' % (klass,))\n\n klass = ERROR_CLASS_MAP.keys()[classes.index(klass)]\n else:\n raise TypeError(\"Invalid type, expected class or string\")\n\n del ERROR_CLASS_MAP[klass]", "def unregister():\n for c in classes:\n bpy.utils.unregister_class(c)\n ui.reset_avatar_properties()", "def reset_class(cls):\n cls.infected.clear()\n cls.target = None", "def test_swarm_updates_parsed_options_when_multiple_userclasses_specified(self):\n\n class User1(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n class User2(User):\n wait_time = constant(1)\n\n @task\n def t(self):\n pass\n\n self.environment.web_ui.userclass_picker_is_active = True\n self.environment.available_user_classes = {\"User1\": User1, \"User2\": User2}\n\n response = requests.post(\n \"http://127.0.0.1:%i/swarm\" % self.web_port,\n data={\n \"user_count\": 5,\n \"spawn_rate\": 5,\n \"host\": \"https://localhost\",\n \"user_classes\": [\"User1\", \"User2\"],\n },\n )\n self.assertListEqual([\"User1\", \"User2\"], response.json()[\"user_classes\"])\n\n # stop\n gevent.sleep(1)\n response = requests.get(\"http://127.0.0.1:%i/stop\" % self.web_port)\n self.assertEqual(response.json()[\"message\"], \"Test stopped\")\n\n # Checking environment.parsed_options.user_classes was updated\n self.assertListEqual(self.environment.parsed_options.user_classes, [\"User1\", \"User2\"])", "def remove_feature_classes():\r\n arcpy.env.workspace = OUTPUT_WORKSPACE\r\n feature_classes = arcpy.ListFeatureClasses(\"*\")\r\n\r\n for fc in feature_classes:\r\n count1 = str(arcpy.GetCount_management(fc))\r\n if count1 == \"0\":\r\n fclass = r\"{}\\{}\".format(OUTPUT_WORKSPACE, fc)\r\n arcpy.Delete_management(fclass)", "def set_hypercube_class(self):\n self.class_dict = dict.fromkeys(list(set([x.class_id for x in self.examples])), 0)\n old_class = self.hypercube_class\n if not self.examples:\n self.hypercube_class = EMPTY_HYPERCUBE_INDICATOR\n else:\n max_class = -1\n for class_id in self.class_dict.keys():\n class_size = len(list(filter(lambda x: x.class_id == class_id, self.examples)))\n # adding the number of examples to the class\n self.class_dict[class_id] += class_size\n if class_size > max_class:\n max_class = class_size\n self.hypercube_class = class_id\n if not old_class == self.hypercube_class:\n print(\"Changed hypercube's class!\\tCoords: \" + str(\n self.coords) + \"\\tOld class: \" + old_class + \"\\tNew class: \" + self.hypercube_class)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse `input_lines` and modify the `document` node in place.
def run(self, input_lines, document, input_offset=0, match_titles=True, inliner=None): self.language = languages.get_language( document.settings.language_code) self.match_titles = match_titles if inliner is None: inliner = Inliner() inliner.init_customizations(document.settings) self.memo = Struct(document=document, reporter=document.reporter, language=self.language, title_styles=[], section_level=0, section_bubble_up_kludge=False, inliner=inliner) self.document = document self.attach_observer(document.note_source) self.reporter = self.memo.reporter self.node = document results = StateMachineWS.run(self, input_lines, input_offset, input_source=document['source']) assert results == [], 'RSTStateMachine.run() results should be empty!' self.node = self.memo = None # remove unneeded references
[ "def process(self, lines):\n for line in lines:\n self._process_line(line)", "def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)", "def updateLineParsing(self):\n self.titleLine = self.parseLine(self.getTitleLine())\n self.outputLines = [self.parseLine(line) for line in\n self.getOutputLines(False)]\n if self.origOutputLines:\n self.origOutputLines = [self.parseLine(line) for line in\n self.getOutputLines(True)]", "def _mux(docs: list, process_stdin: IO, q: queue.Queue):\n for i, doc in enumerate(docs):\n count = 0\n sents = doc.strip().split('\\n')\n for line in sents:\n line = line + '\\n'\n process_stdin.write(line.encode('utf-8'))\n count += 1\n q.put((i, count))\n q.put(None) #poison\n process_stdin.close()", "def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n root = markdown.etree.Element(\"div\")\r\n self.parseChunk(root, '\\n'.join(lines))\r\n return markdown.etree.ElementTree(root)", "def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)", "def changeOutputLines(self, lines, keepBlanks=False):\n self.outputLines = []\n for line in lines:\n newLine = self.parseLine(line)\n if keepBlanks or newLine:\n self.outputLines.append(newLine)\n if self.useBullets:\n self.origOutputLines = self.outputLines[:]\n self.addBullets()\n if self.useTables:\n self.origOutputLines = self.outputLines[:]\n self.addTables()", "def process_lines(self, lines, file):\n return lines", "def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.document().markContentsDirty(block.position(),\r\n block.position() + block.length())\r\n self.rehighlightBlock(block)", "def applyEventsToLines(lines, events):\n for event in events:\n for change in event[\"changes\"]:\n ix = change[\"lineIndex\"]\n lines[ix : ix + len(change[\"oldLines\"])] = change[\"newLines\"]", "def import_doc(client: Client, input: list[str]):\n if not client.is_connected:\n ctx = click.get_current_context()\n ctx.fail(\"Import failed: Not connected to a neo4j instance.\")\n for fp in input:\n graph = read_doc(fp)\n client.import_doc(graph)", "def _process_lines(lines: typing.List[str], offset: int, registration_processor: RegistrationProcessor):\n\n onnx_op = \"ONNX_OPERATOR_KERNEL_CLASS_NAME\"\n onnx_op_len = len(onnx_op)\n onnx_typed_op = \"ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME\"\n onnx_typed_op_len = len(onnx_typed_op)\n onnx_versioned_op = \"ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME\"\n onnx_versioned_op_len = len(onnx_versioned_op)\n onnx_versioned_typed_op = \"ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME\"\n onnx_versioned_typed_op_len = len(onnx_versioned_typed_op)\n onnx_two_typed_op = \"ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME\"\n onnx_two_typed_op_len = len(onnx_two_typed_op)\n onnx_versioned_two_typed_op = \"ONNX_OPERATOR_VERSIONED_TWO_TYPED_KERNEL_CLASS_NAME\"\n onnx_versioned_two_typed_op_len = len(onnx_versioned_two_typed_op)\n end_marks = tuple([\");\", \")>\", \")>,\", \")>,};\", \")>};\"])\n\n end_mark = \"\"\n lines_to_process = []\n\n # merge line if split over multiple.\n # original lines will be in lines_to_process. merged and stripped line will be in code_line\n while True:\n lines_to_process.append(lines[offset])\n stripped = lines[offset].strip()\n line_end = False\n\n for mark in end_marks:\n if stripped.endswith(mark):\n end_mark = mark\n line_end = True\n break\n\n if line_end:\n break\n\n offset += 1\n if offset > len(lines):\n log.error(\"Past end of input lines looking for line terminator.\")\n sys.exit(-1)\n\n code_line = \"\".join([line.strip() for line in lines_to_process])\n\n if onnx_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 7, Cos)>,\n trim_at = code_line.index(onnx_op) + onnx_op_len + 1\n *_, domain, start_version, op_type = (arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\"))\n\n registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, None)\n\n elif onnx_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 7, double, Sin)>,\n trim_at = code_line.index(onnx_typed_op) + onnx_typed_op_len + 1\n *_, domain, start_version, type, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, type)\n\n elif onnx_versioned_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 1, 10, Hardmax)>,\n trim_at = code_line.index(onnx_versioned_op) + onnx_versioned_op_len + 1\n *_, domain, start_version, end_version, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), None\n )\n\n elif onnx_versioned_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 1, 10, float, LogSoftmax)>,\n trim_at = code_line.index(onnx_versioned_typed_op) + onnx_versioned_typed_op_len + 1\n *_, domain, start_version, end_version, type, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), type\n )\n\n elif onnx_two_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,\n trim_at = code_line.index(onnx_two_typed_op) + onnx_two_typed_op_len + 1\n *_, domain, start_version, type1, type2, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), None, type1 + \", \" + type2\n )\n\n elif onnx_versioned_two_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,\n trim_at = code_line.index(onnx_versioned_two_typed_op) + onnx_versioned_two_typed_op_len + 1\n *_, domain, start_version, end_version, type1, type2, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), type1 + \", \" + type2\n )\n\n else:\n log.warning(f\"Ignoring unhandled kernel registration variant: {code_line}\")\n for line in lines_to_process:\n registration_processor.process_other_line(line)\n\n return offset + 1", "def update_lines(self, lines, update_org_width=False):\n self._org_lines = lines\n self.init_layout(self._height, self._org_width, 0, update_org_width=update_org_width)", "def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp", "def process_tokens(self, tokens):\n self._tokens = list(tokens)\n self._pos = 0\n self._ast = self._assert(self._chunk(), 'input to be a program')\n self._ast.store_token_groups(self._tokens)", "def update(self, new_documents: List[str]):\n if len(new_documents) > 0:\n tokenized_docs = [self.tokenizer(doc) for doc in new_documents]\n # for some reason mypy doesn't get that this converts from a list of tuples to a tuple of lists\n words, positions = map(list, zip(*tokenized_docs)) # type: ignore\n else:\n words, positions = [], []\n self.index.add_documents(words, positions)\n self.documents += new_documents\n self.tokenized_documents += words\n for ngram, index in self.ngram_indexes.items():\n ngram_tok = core.ngrams_from_documents(\n words,\n ngram,\n self.ngram_sep,\n self.ngram_prefix,\n self.ngram_suffix,\n )\n index.index.add_documents(ngram_tok)", "def parse(cls, input):", "def preproc_doc(document):\n\n # Each document is a list of lines\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n # set a random seed for reproducability\n hash_object = hashlib.md5(document[0])\n rng = random.Random(int(hash_object.hexdigest(), 16) % (10**8))\n\n # Each document is composed of a list of text lines. Each text line is a\n # paragraph. We split the line into sentences but keep the paragraph grouping.\n # The utility functions below expect the document to be split by paragraphs.\n list_of_paragraphs = []\n for line in document:\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n sent_tokens = [tokenizer.tokenize(sent) for sent in sents if sent]\n list_of_paragraphs.append(sent_tokens)\n\n # In case of any empty paragraphs, remove them.\n list_of_paragraphs = [x for x in list_of_paragraphs if x]\n\n # Convert the list of paragraphs into TrainingInstance object\n # See preprocessing_utils.py for definition\n if FLAGS.format == FORMAT_BINARY:\n instances = create_instances_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n elif FLAGS.format == FORMAT_PARAGRAPH:\n instances = create_paragraph_order_from_document(list_of_paragraphs,\n FLAGS.max_seq_length, rng)\n\n # Convert token lists into ids and add any needed tokens and padding for BERT\n tf_examples = [\n convert_instance_to_tf_example(tokenizer, instance,\n FLAGS.max_seq_length)[0]\n for instance in instances\n ]\n\n # Serialize TFExample for writing to file.\n tf_examples = [example.SerializeToString() for example in tf_examples]\n\n return tf_examples", "def _set_input(self, input_: str):\n if len(input_) != 0:\n rows = input_.split('\\n')\n self.command = []\n self.params = []\n for cmd in rows:\n split = cmd.split()\n self.command.append(split[0])\n self.params.append(int(split[1]))", "def __process_input_file(self, output):\n with open(self.input_file, 'r') as f:\n for line in f:\n if line.replace(' ', '') == \"\\\"playlists\\\":[\\n\":\n # playlist_start = True\n output.write(line)\n self.__process_playlist(f, output)\n else:\n output.write(line)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Jump to input line `abs_line_offset`, ignoring jumps past the end.
def goto_line(self, abs_line_offset): try: self.state_machine.goto_line(abs_line_offset) except EOFError: pass
[ "def jump_to_line(self, lineno=None):\r\n if lineno is not None:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(lineno)\r\n return\r\n\r\n maximum = self.blockCount()\r\n line = QInputDialog.getInt(self, self.tr(\"Jump to Line\"),\r\n self.tr(\"Line:\"), 1, 1, maximum, 1)\r\n if line[1]:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(line[0] - 1)", "def jump_to_line(self, lineno):\r\n self._main.editor_jump_to_line(lineno=lineno)", "def goToLine(self, lineno):\n # Go to start and move pointer to given line no\n self.goToStart()\n line_count = 1\n eof = False\n pos = 0\n while not eof and line_count != lineno:\n line = self.file_obj.readline()\n if not line:\n eof = True\n continue\n pos = self.file_obj.tell()\n line_count += 1\n\n self.line_no = line_count\n self.offset = pos", "def editor_go_to_line(self, line):\r\n editorWidget = self.get_current_editor()\r\n if editorWidget:\r\n editorWidget.jump_to_line(line)", "def init_goto_line(self):\n self.local_state = State.GOTO_LINE\n self.setFocus()\n self.setValidator(self.int_validator)", "def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)", "def _validate_lineno(self, target_line):\n if target_line < 1:\n raise AtTopOfFile()\n elif target_line > self.number_of_lines():\n raise PastEndOfBuffer(str(target_line))", "def jmp(self, offset):\n self.ip += int(offset)", "def goto(self, line):\n\n self._text.mark_set(tk.INSERT, '%s.1' % line)\n self.highlight_line(line)\n self.update_info_bar()\n self.update_line_numbers()", "def op_jump(self, offset):\n\n old_pc = self._opdecoder.program_counter\n\n # The offset to the jump instruction is known to be a 2-byte\n # signed integer. We need to make it signed before applying\n # the offset.\n if (offset >= (1<<15)):\n offset = - (1<<16) + offset\n log(\"Jump unconditionally to relative offset %d\" % offset)\n\n # Apparently reading the 2 bytes of operand *isn't* supposed\n # to increment the PC, thus we need to apply this offset to PC\n # that's still pointing at the 'jump' opcode. Hence the -2\n # modifier below.\n new_pc = self._opdecoder.program_counter + offset - 2\n self._opdecoder.program_counter = new_pc\n log(\"PC has changed from from %x to %x\" % (old_pc, new_pc))", "def breakpoint(self, line):\n\n source = self.chrome.driver.find_element(By.ID, \"sources-panel-sources-view\")\n assert source is not None, \"Failed to find sources.\"\n lines = source.find_elements(By.CSS_SELECTOR, \"div[class=\\'CodeMirror-linenumber CodeMirror-gutter-elt\\']\")\n length = len(lines)\n assert len(lines) >= line, \"Line {0} not found! Total lines of code: {1}\".format(str(line), str(length))\n lines[line - 1].click()\n sleep(1)\n Log.info(\"Toggle breakpoint on line {0}\".format(str(line)))", "def offset_from_line(line, firstlineno, lnotab):\n # TODO: Handle negetive offsets!\n n = len(lnotab)\n assert n & 1 == 0\n\n l = firstlineno\n tab = lnotab\n offset = 0\n index = 0\n while tab:\n index += 1\n b, d, *tab = tab\n l += d\n offset += b\n if l >= line:\n return offset, index\n raise IndexError(\"Line out of bound\")", "def jump_to(self, bytes):\n new_pos = self.find(bytes, max(0, self.position))\n if new_pos > -1:\n new_pos -= self.position\n if self._position == -1:\n self._position = 0\n self._position += (new_pos + len(bytes) - 1)\n return True\n else:\n raise StopIteration", "def gotoLine(self, n):\n self.fileIndex = n", "def prev_line(rule):\n return shift_line(-1, rule)", "def select_line(self, line):\n cursor = self.textCursor()\n cursor.movePosition(QTextCursor.Start, QTextCursor.MoveAnchor, 1)\n cursor.movePosition(QTextCursor.Down, QTextCursor.MoveAnchor, line-1)\n self.setTextCursor(cursor)", "def go(self, offset: int) -> None:\n if len(self) == 0:\n return\n if offset < 0:\n offset = len(self) + offset\n if offset < 0:\n offset = 0\n if offset > len(self) - 1:\n offset = len(self) - 1\n self.focus.flow = self[offset]", "def next_line(rule):\n return shift_line(1, rule)", "def get_line_jump_seq(self):\n line_jump_seq = \"\"\n if not world.config.no_ansi and not world.config.no_line_jump and not world.config.write_steps_once:\n line_jump_seq = \"\\r\\033[A\\033[K\"\n return line_jump_seq", "def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n # This is an error, meaning that there aren't even offset_line+1 lines in self.path.\n if f.readline() == '':\n raise ValueError('offset: {} is higher than the total number of lines in file {}'.format(\n offset_line, self.path))\n\n total_lines += 1\n\n # Retrieve the console_output just between offset_line and offset_line + max_lines\n for i in range(offset_line, offset_line + max_lines):\n line = f.readline()\n\n # We have reached the end of the file, or a line that has not finished being written to.\n if line == '' or not line.endswith(\"\\n\"):\n break\n\n console_output.append(line)\n output_lines += 1\n total_lines += 1\n\n # If there are more lines, then keep on counting in order to populate total_lines properly\n while f.readline():\n total_lines += 1\n\n return ConsoleOutputSegment(offset_line, output_lines, total_lines, ''.join(console_output))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new StateMachine rooted at `node` and run it over the input `block`.
def nested_parse(self, block, input_offset, node, match_titles=False, state_machine_class=None, state_machine_kwargs=None): use_default = 0 if state_machine_class is None: state_machine_class = self.nested_sm use_default += 1 if state_machine_kwargs is None: state_machine_kwargs = self.nested_sm_kwargs use_default += 1 block_length = len(block) state_machine = None if use_default == 2: try: state_machine = self.nested_sm_cache.pop() except IndexError: pass if not state_machine: state_machine = state_machine_class(debug=self.debug, **state_machine_kwargs) state_machine.run(block, input_offset, memo=self.memo, node=node, match_titles=match_titles) if use_default == 2: self.nested_sm_cache.append(state_machine) else: state_machine.unlink() new_offset = state_machine.abs_line_offset() # No `block.parent` implies disconnected -- lines aren't in sync: if block.parent and (len(block) - block_length) != 0: # Adjustment for block if modified in nested parse: self.state_machine.next_line(len(block) - block_length) return new_offset
[ "async def mine_new_block():\n block = await self.create_block_async_func(Address.create_empty_account())\n if not block:\n self.input_q.put((None, {}))\n return\n mining_params = self.get_mining_param_func()\n mining_params[\"consensus_type\"] = self.consensus_type\n # handle mining simulation's timing\n if \"target_block_time\" in mining_params:\n target_block_time = mining_params[\"target_block_time\"]\n mining_params[\"target_time\"] = (\n block.header.create_time\n + self._get_block_time(block, target_block_time)\n )\n work = MiningWork(\n block.header.get_hash_for_mining(),\n block.header.height,\n block.header.difficulty,\n )\n self.work_map[work.hash] = block\n if self.process:\n self.input_q.put((work, mining_params))\n return\n\n self.process = AioProcess(\n target=self.mine_loop,\n args=(work, mining_params, self.input_q, self.output_q),\n )\n self.process.start()\n await handle_mined_block()", "def build_from_python(self, node, _id=None):\n if _id is None:\n self._check_and_raise(node, 'id')\n _id = node['id']\n self._check_and_raise(node, 'type', ' for node ' + _id)\n _type = copy.copy(node['type'])\n if _type == 'action':\n self._check_and_raise(node, 'script', ' for node ' + _id)\n return leaf.Action(name=_id, memory=self.memory, expression=node['script'])\n elif _type == 'condition':\n params = ['expression', 'true_state', 'false_state']\n self._check_and_raise(node, params, ' for node ' + _id)\n node_copy = copy.copy(node)\n for state in ['true_state', 'false_state']:\n if isinstance(node[state], str):\n node_copy[state] = State.from_str(node[state])\n return leaf.Condition(name=_id, memory=self.memory, **dict((k, node_copy[k]) for k in params))\n elif _type in ['sequence', 'fallback', 'skipper', 'selector']:\n if _type == 'selector':\n _type = 'fallback'\n seq = sequential.Sequential(skip_state=sequential.Sequential.Names[_type], name=_id, memory=self.memory)\n self._check_and_raise(node, 'children', ' for node ' + _id)\n seq.children = node['children']\n return seq", "def execute_sm(self):\n rospy.loginfo(\"start state machine...\")\n return self.sm.execute()", "def make_sm(self):\n return smach.StateMachine(outcomes=['succeeded','aborted','preempted'])", "def start(self):\r\n if self.initial_state == None: # Check that an initial state was declared\r\n raise RuntimeError(\"No initial state set on the state machine.\")\r\n\r\n self.current_state = self.initial_state\r\n\r\n for state in self.states.values():\r\n state.generator = state.handler_func(self)\r\n next(state.generator) # start up the co-routine\r", "def ex_run_node(self, node):\r\n # Refresh node state\r\n e_vm = self.connection.request(node.extra['uri_id']).object\r\n state = e_vm.findtext('state')\r\n\r\n if state != 'NOT_ALLOCATED':\r\n raise LibcloudError('Invalid Node state', self)\r\n\r\n # --------------------------------------------------------\r\n # Deploy the Node\r\n # --------------------------------------------------------\r\n self._deploy_remote(e_vm)\r\n\r\n # --------------------------------------------------------\r\n # Retrieve it again, to get some schedule-defined\r\n # values.\r\n # --------------------------------------------------------\r\n edit_vm = get_href(e_vm, 'edit')\r\n headers = {'Accept': self.NODE_MIME_TYPE}\r\n e_vm = self.connection.request(edit_vm, headers=headers).object\r\n return self._to_node(e_vm, self)", "def start_node(self, node):\n return self._action(node, \"start\")", "def apply_transaction(\n self,\n transaction,\n block):\n # Don't modify the given block\n block.make_immutable()\n self.set_state_root(block.header.state_root)\n computation = self.execute_transaction(transaction)\n\n # Set block.\n block, trie_data_dict = self.add_transaction(transaction, computation, block)\n block.header.state_root = self.state_root\n return computation, block, trie_data_dict", "def __create_evm_state(\n self,\n trie_root_hash: Optional[bytes],\n sender_disallow_map: Dict[bytes, int],\n timestamp: Optional[int] = None,\n block_hash: Optional[bytes] = None,\n ):\n state = EvmState(\n env=self.env.evm_env, db=self.raw_db, qkc_config=self.env.quark_chain_config\n )\n state.shard_config = self.shard_config\n if trie_root_hash:\n state.trie.root_hash = trie_root_hash\n state.sender_disallow_map = sender_disallow_map\n if timestamp:\n state.timestamp = timestamp\n # iterate until reaches genesis or header list reaches 256\n # since most headers are in LRU cache, this should not affect performance too much\n while block_hash and len(state.prev_headers) < 256:\n h = self.db.get_minor_block_header_by_hash(block_hash)\n if not h:\n break\n state.prev_headers.append(h)\n block_hash = h.hash_prev_minor_block\n return state", "def __init__(self,\n block: AbstractInstructionBlock,\n context: Dict[AbstractInstructionBlock, 'ImmutableInstructionBlock']=None) -> None:\n super().__init__()\n if context is None:\n context = dict()\n self.__return_ip = None\n return_ip = block.return_ip\n if return_ip is not None:\n self.__return_ip = InstructionPointer(context[return_ip.block], return_ip.offset)\n context[block] = self\n\n def make_immutable(instruction: Instruction) -> Instruction:\n if isinstance(instruction, GOTOInstruction):\n return GOTOInstruction(\n InstructionPointer(\n ImmutableInstructionBlock(instruction.target.block, context),\n instruction.target.offset)\n )\n elif isinstance(instruction, REPJInstruction):\n return REPJInstruction(\n instruction.count,\n InstructionPointer(\n ImmutableInstructionBlock(instruction.target.block, context),\n instruction.target.offset)\n )\n elif isinstance(instruction, CJMPInstruction):\n return CJMPInstruction(\n instruction.trigger,\n InstructionPointer(\n ImmutableInstructionBlock(instruction.target.block, context),\n instruction.target.offset)\n )\n else:\n return instruction\n\n self._instruction_tuple = tuple(make_immutable(instr) for instr in block.instructions)", "def setStateMachine(self, arg2: 'ScXMLStateMachine') -> \"void\":\n return _coin.ScXMLMinimumEvaluator_setStateMachine(self, arg2)", "def initialize(evm):\n contr = Contract()\n dfg = DiGraph() # Data Flow Graph\n cfg = DiGraph() # Control Flow Graph\n\n cur_blk = BasicBlock(0)\n pc = 0\n while pc < len(evm):\n op = evm[pc]\n if op not in opcodes.listing:\n raise KeyError('Invalid op. op: {:#x}, offset: {:#x}'.format(op, pc))\n\n name = opcodes.listing[op][0]\n size = opcodes.operand_size(op)\n if size != 0:\n arg = int.from_bytes(evm[pc + 1:pc + 1 + size], byteorder='big')\n else:\n arg = None\n\n instr = Instruction(op, name, arg)\n if name == 'JUMPDEST':\n if len(cur_blk.instructions) > 0:\n contr.blocks.append(cur_blk)\n # Add CFG nodes, representing basic blocks\n cfg.graph.add_node(cur_blk.offset, blk=cur_blk)\n new_blk = BasicBlock(pc)\n cur_blk.next = new_blk\n cur_blk = new_blk\n cur_blk.offset += 1\n contr.jump_destination[pc] = cur_blk\n contr.instructions[pc] = instr\n else:\n cur_blk.instructions.append(instr)\n contr.instructions[pc] = instr\n\n if opcodes.is_swap(op) or opcodes.is_dup(op):\n # Omit SWAP and DUP from IDG\n pass\n elif (name == 'JUMP' or name == 'JUMPI' or name == 'STOP' or name == 'RETURN' or\n name == 'REVERT' or name == 'INVALID' or name == 'SUICIDE'):\n contr.blocks.append(cur_blk)\n # Add CFG nodes, representing basic blocks\n cfg.graph.add_node(cur_blk.offset, blk=cur_blk)\n new_blk = BasicBlock(pc + 1)\n cur_blk.next = new_blk\n cur_blk = new_blk\n # Add DFG nodes, representing instructions\n dfg.graph.add_node(pc, instr=instr)\n else:\n # Add DFG nodes, representing instructions\n dfg.graph.add_node(pc, instr=instr)\n\n pc += size + 1\n\n if len(cur_blk.instructions) > 0 or cur_blk.offset - 1 in contr.jump_destination:\n contr.blocks.append(cur_blk)\n # Add CFG nodes, representing basic blocks\n cfg.graph.add_node(cur_blk.offset, blk=cur_blk)\n else:\n contr.blocks[-1].next = None\n\n return contr, dfg, cfg", "def call_state(self, addr: Address, args=[]) -> ESILState:\n\n if type(addr) == str:\n addr = self.r2api.get_address(addr)\n\n # seek to function and init vm\n self.r2api.seek(addr)\n self.init_vm()\n state = self.init_state()\n self.set_args(state, addr, args)\n # state.registers[\"PC\"] = addr \n\n return state", "def __init__(self, root_block):\n self.root_block = root_block\n self.blocks = {'@': root_block}\n self.block_names = {\"default\":[]}\n #registering blocks by id\n self.register_blocks(root_block.ch_blocks)\n self.register_block_names()", "def test_group(self):\n\n class DoneState(State):\n def __init__(self):\n State.__init__(self,outcomes=['done'])\n def execute(self,ud=None):\n return 'done'\n\n sm = StateMachine(['succeeded','done'])\n with sm:\n StateMachine.add('FAILSAUCE',DoneState())\n transitions = {'aborted':'FAILSAUCE','preempted':'FAILSAUCE'}\n with sm:\n StateMachine.add('FIRST', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n StateMachine.add('SECOND', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g2), transitions)\n StateMachine.add('THIRD', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n spinner = threading.Thread(target=self.spin)\n spinner.start()\n outcome = sm.execute()\n\n assert outcome == 'done'", "def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree", "def generate(self):\n if len(self.network.chain) == 0:\n print(\n \"`generate` called, but chain is nonexistant;\",\n \"delegating to `genesis`...\")\n self.genesis()\n return\n\n block = Block(self.network.chain[-1].index+1,\n self.network.chain[-1].hash_val,\n random.choice(DATA_MESSAGES))\n\n # mine block\n block.hash(self.network.difficulty)\n\n # add block to this Node's chain and send it to all other Nodes in\n # network\n self.network.add_block(block)\n self.broadcast(block)", "def function_from_block(block):\n return Function(block.fields.get('Function', None),\n block.fields.get('Purpose', None), block.fields.get('Inputs', None),\n block.fields.get('Outputs', None))", "def start(self, *args, **kwargs):\n if self.started:\n raise RuntimeError(\"state machine has already been started\")\n if self.initial_state is None:\n raise ValueError(\"undefined initial state\")\n self._enter(self.initial_state, args, kwargs)", "def create_initial_node(fsm):\n global_state = create_initial_global_state(len(fsm)) \n \n # check transitions of the state and add them to the stack \n transitions_list = []\n for i in range(len(fsm)):\n state = global_state[i][i]\n \n machine = fsm[i]\n transitions = machine.get_transition(state)\n for t in transitions: \n transitions_list.append(t)\n \n node = Node(global_state, transitions_list)\n return node" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for a valid subsection header. Return 1 (true) or None (false). When a new section is reached that isn't a subsection of the current section, back up the line count (use ``previous_line(x)``), then ``raise EOFError``. The current StateMachine will finish, then the calling StateMachine can reexamine the title. This will work its way back up the calling chain until the correct section level isreached.
def check_subsection(self, source, style, lineno): memo = self.memo title_styles = memo.title_styles mylevel = memo.section_level try: # check for existing title style level = title_styles.index(style) + 1 except ValueError: # new title style if len(title_styles) == memo.section_level: # new subsection title_styles.append(style) return 1 else: # not at lowest level self.parent += self.title_inconsistent(source, lineno) return None if level <= mylevel: # sibling or supersection memo.section_level = level # bubble up to parent section if len(style) == 2: memo.section_bubble_up_kludge = True # back up 2 lines for underline title, 3 for overline title self.state_machine.previous_line(len(style) + 1) raise EOFError # let parent section re-evaluate if level == mylevel + 1: # immediate subsection return 1 else: # invalid subsection self.parent += self.title_inconsistent(source, lineno) return None
[ "def _check_section_header_typo(self, verdict: Verdict, line: str, lineno: int) -> Verdict:\n if verdict == Verdict.MAYBE_HEADING:\n try:\n name, match_title, _ = self.guess_heading(line, strict=True)\n except GuessHeadingFailError as ghfe:\n # Not being able to guess the heading here is OK since we only *think* it's a\n # heading\n self.sections._print(ghfe)\n return Verdict.NOT_HEADING\n if ':' in line:\n mess = f'Line seems to be a section header but doesn\\'t directly end with with \\':\\', did you mean \\'{match_title}\\'?'\n else:\n mess = f'Line seems to be a section header but missing \\':\\', did you mean \\'{match_title}:\\'?'\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, self.diags.section_header_maybe_header, mess,\n self.make_source_range(name, line, lineno)\n )\n return verdict", "def _is_on_section(self, section_title, subsection_title):\n current_section_list = self.q(css='nav>div.chapter.is-open>h3>a').text\n current_subsection_list = self.q(css='nav>div.chapter.is-open li.active>a>p').text\n\n if len(current_section_list) == 0:\n self.warning(\"Could not find the current section\")\n return False\n\n elif len(current_subsection_list) == 0:\n self.warning(\"Could not find current subsection\")\n return False\n\n else:\n return (\n current_section_list[0].strip() == section_title and\n current_subsection_list[0].strip().split('\\n')[0] == subsection_title\n )", "def verify_start_of_header_for_body(self):\r\n if self.compressed:\r\n next_line = str(self.file.readline(), 'utf-8')\r\n else:\r\n next_line = self.file.readline()\r\n\r\n if next_line.startswith(f'#CHROM'):\r\n self.body_header_line = Body_header_line(next_line)\r\n if self.body_header_line.invalid is True:\r\n self.invalid = True\r\n self.error_message = self.body_header_line.error_message\r\n else:\r\n self.invalid = True\r\n self.error_message = f'There is no second header line specifiying data in the body in file: {self.path}'", "def is_section_header(self, text):\n return (self.section_regex1.search(text) or\n self.section_regex2.search(text))", "def validate_unfinished_header_block(\n constants: ConsensusConstants,\n blocks: BlockchainInterface,\n header_block: UnfinishedHeaderBlock,\n check_filter: bool,\n expected_difficulty: uint64,\n expected_sub_slot_iters: uint64,\n skip_overflow_last_ss_validation: bool = False,\n skip_vdf_is_valid: bool = False,\n check_sub_epoch_summary: bool = True,\n) -> Tuple[Optional[uint64], Optional[ValidationError]]:\n # 1. Check that the previous block exists in the blockchain, or that it is correct\n\n prev_b = blocks.try_block_record(header_block.prev_header_hash)\n genesis_block = prev_b is None\n if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:\n return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)\n\n overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)\n if skip_overflow_last_ss_validation and overflow:\n if final_eos_is_already_included(header_block, blocks, expected_sub_slot_iters):\n skip_overflow_last_ss_validation = False\n finished_sub_slots_since_prev = len(header_block.finished_sub_slots)\n else:\n finished_sub_slots_since_prev = len(header_block.finished_sub_slots) + 1\n else:\n finished_sub_slots_since_prev = len(header_block.finished_sub_slots)\n\n new_sub_slot: bool = finished_sub_slots_since_prev > 0\n\n can_finish_se: bool = False\n can_finish_epoch: bool = False\n if genesis_block:\n height: uint32 = uint32(0)\n assert expected_difficulty == constants.DIFFICULTY_STARTING\n assert expected_sub_slot_iters == constants.SUB_SLOT_ITERS_STARTING\n else:\n assert prev_b is not None\n height = uint32(prev_b.height + 1)\n if new_sub_slot:\n can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(\n constants,\n blocks,\n prev_b.height,\n prev_b.prev_hash,\n prev_b.deficit,\n prev_b.sub_epoch_summary_included is not None,\n )\n else:\n can_finish_se = False\n can_finish_epoch = False\n\n # 2. Check finished slots that have been crossed since prev_b\n ses_hash: Optional[bytes32] = None\n if new_sub_slot and not skip_overflow_last_ss_validation:\n # Finished a slot(s) since previous block. The first sub-slot must have at least one block, and all\n # subsequent sub-slots must be empty\n for finished_sub_slot_n, sub_slot in enumerate(header_block.finished_sub_slots):\n # Start of slot challenge is fetched from SP\n challenge_hash: bytes32 = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge\n\n if finished_sub_slot_n == 0:\n if genesis_block:\n # 2a. check sub-slot challenge hash for genesis block\n if challenge_hash != constants.GENESIS_CHALLENGE:\n return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)\n else:\n assert prev_b is not None\n curr: BlockRecord = prev_b\n while not curr.first_in_sub_slot:\n curr = blocks.block_record(curr.prev_hash)\n assert curr.finished_challenge_slot_hashes is not None\n\n # 2b. check sub-slot challenge hash for non-genesis block\n if not curr.finished_challenge_slot_hashes[-1] == challenge_hash:\n print(curr.finished_challenge_slot_hashes[-1], challenge_hash)\n return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)\n else:\n # 2c. check sub-slot challenge hash for empty slot\n if (\n not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash()\n == challenge_hash\n ):\n return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)\n\n if genesis_block:\n # 2d. Validate that genesis block has no ICC\n if sub_slot.infused_challenge_chain is not None:\n return None, ValidationError(Err.SHOULD_NOT_HAVE_ICC)\n else:\n assert prev_b is not None\n icc_iters_committed: Optional[uint64] = None\n icc_iters_proof: Optional[uint64] = None\n icc_challenge_hash: Optional[bytes32] = None\n icc_vdf_input = None\n if prev_b.deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:\n # There should be no ICC chain if the last block's deficit is 16\n # Prev sb's deficit is 0, 1, 2, 3, or 4\n if finished_sub_slot_n == 0:\n # This is the first sub slot after the last sb, which must have deficit 1-4, and thus an ICC\n curr = prev_b\n while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:\n curr = blocks.block_record(curr.prev_hash)\n if curr.is_challenge_block(constants):\n icc_challenge_hash = curr.challenge_block_info_hash\n icc_iters_committed = uint64(prev_b.sub_slot_iters - curr.ip_iters(constants))\n else:\n assert curr.finished_infused_challenge_slot_hashes is not None\n icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]\n icc_iters_committed = prev_b.sub_slot_iters\n icc_iters_proof = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))\n if prev_b.is_challenge_block(constants):\n icc_vdf_input = ClassgroupElement.get_default_element()\n else:\n icc_vdf_input = prev_b.infused_challenge_vdf_output\n else:\n # This is not the first sub slot after the last block, so we might not have an ICC\n if (\n header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit\n < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK\n ):\n finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1]\n assert finished_ss.infused_challenge_chain is not None\n\n # Only sets the icc iff the previous sub slots deficit is 4 or less\n icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash()\n icc_iters_committed = prev_b.sub_slot_iters\n icc_iters_proof = icc_iters_committed\n icc_vdf_input = ClassgroupElement.get_default_element()\n\n # 2e. Validate that there is not icc iff icc_challenge hash is None\n assert (sub_slot.infused_challenge_chain is None) == (icc_challenge_hash is None)\n if sub_slot.infused_challenge_chain is not None:\n assert icc_vdf_input is not None\n assert icc_iters_proof is not None\n assert icc_challenge_hash is not None\n assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None\n # 2f. Check infused challenge chain sub-slot VDF\n # Only validate from prev_b to optimize\n target_vdf_info = VDFInfo(\n icc_challenge_hash,\n icc_iters_proof,\n sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,\n )\n if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(\n target_vdf_info,\n number_of_iterations=icc_iters_committed,\n ):\n return None, ValidationError(Err.INVALID_ICC_EOS_VDF)\n if not skip_vdf_is_valid:\n if (\n not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity\n and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(\n constants, icc_vdf_input, target_vdf_info, None\n )\n ):\n return None, ValidationError(Err.INVALID_ICC_EOS_VDF)\n if (\n sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity\n and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(\n constants,\n ClassgroupElement.get_default_element(),\n sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,\n )\n ):\n return None, ValidationError(Err.INVALID_ICC_EOS_VDF)\n\n if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:\n # 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16\n if (\n sub_slot.infused_challenge_chain.get_hash()\n != sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash\n ):\n return None, ValidationError(Err.INVALID_ICC_HASH_CC)\n else:\n # 2h. Check infused challenge sub-slot hash not included for other deficits\n if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:\n return None, ValidationError(Err.INVALID_ICC_HASH_CC)\n\n # 2i. Check infused challenge sub-slot hash in reward sub-slot\n if (\n sub_slot.infused_challenge_chain.get_hash()\n != sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash\n ):\n return None, ValidationError(Err.INVALID_ICC_HASH_RC)\n else:\n # 2j. If no icc, check that the cc doesn't include it\n if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:\n return None, ValidationError(Err.INVALID_ICC_HASH_CC)\n\n # 2k. If no icc, check that the cc doesn't include it\n if sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash is not None:\n return None, ValidationError(Err.INVALID_ICC_HASH_RC)\n\n if sub_slot.challenge_chain.subepoch_summary_hash is not None:\n assert ses_hash is None # Only one of the slots can have it\n ses_hash = sub_slot.challenge_chain.subepoch_summary_hash\n\n # 2l. check sub-epoch summary hash is None for empty slots\n if finished_sub_slot_n != 0:\n if sub_slot.challenge_chain.subepoch_summary_hash is not None:\n return None, ValidationError(Err.INVALID_SUB_EPOCH_SUMMARY_HASH)\n\n if can_finish_epoch and sub_slot.challenge_chain.subepoch_summary_hash is not None:\n # 2m. Check new difficulty and ssi\n if sub_slot.challenge_chain.new_sub_slot_iters != expected_sub_slot_iters:\n return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)\n if sub_slot.challenge_chain.new_difficulty != expected_difficulty:\n return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)\n else:\n # 2n. Check new difficulty and ssi are None if we don't finish epoch\n if sub_slot.challenge_chain.new_sub_slot_iters is not None:\n return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)\n if sub_slot.challenge_chain.new_difficulty is not None:\n return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)\n\n # 2o. Check challenge sub-slot hash in reward sub-slot\n if sub_slot.challenge_chain.get_hash() != sub_slot.reward_chain.challenge_chain_sub_slot_hash:\n return (\n None,\n ValidationError(\n Err.INVALID_CHALLENGE_SLOT_HASH_RC,\n \"sub-slot hash in reward sub-slot mismatch\",\n ),\n )\n\n eos_vdf_iters: uint64 = expected_sub_slot_iters\n cc_start_element: ClassgroupElement = ClassgroupElement.get_default_element()\n cc_eos_vdf_challenge: bytes32 = challenge_hash\n if genesis_block:\n if finished_sub_slot_n == 0:\n # First block, one empty slot. prior_point is the initial challenge\n rc_eos_vdf_challenge: bytes32 = constants.GENESIS_CHALLENGE\n cc_eos_vdf_challenge = constants.GENESIS_CHALLENGE\n else:\n # First block, but have at least two empty slots\n rc_eos_vdf_challenge = header_block.finished_sub_slots[\n finished_sub_slot_n - 1\n ].reward_chain.get_hash()\n else:\n assert prev_b is not None\n if finished_sub_slot_n == 0:\n # No empty slots, so the starting point of VDF is the last reward block. Uses\n # the same IPS as the previous block, since it's the same slot\n rc_eos_vdf_challenge = prev_b.reward_infusion_new_challenge\n eos_vdf_iters = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))\n cc_start_element = prev_b.challenge_vdf_output\n else:\n # At least one empty slot, so use previous slot hash. IPS might change because it's a new slot\n rc_eos_vdf_challenge = header_block.finished_sub_slots[\n finished_sub_slot_n - 1\n ].reward_chain.get_hash()\n\n # 2p. Check end of reward slot VDF\n target_vdf_info = VDFInfo(\n rc_eos_vdf_challenge,\n eos_vdf_iters,\n sub_slot.reward_chain.end_of_slot_vdf.output,\n )\n if not skip_vdf_is_valid and not sub_slot.proofs.reward_chain_slot_proof.is_valid(\n constants,\n ClassgroupElement.get_default_element(),\n sub_slot.reward_chain.end_of_slot_vdf,\n target_vdf_info,\n ):\n return None, ValidationError(Err.INVALID_RC_EOS_VDF)\n\n # 2q. Check challenge chain sub-slot VDF\n partial_cc_vdf_info = VDFInfo(\n cc_eos_vdf_challenge,\n eos_vdf_iters,\n sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,\n )\n if genesis_block:\n cc_eos_vdf_info_iters = constants.SUB_SLOT_ITERS_STARTING\n else:\n assert prev_b is not None\n if finished_sub_slot_n == 0:\n cc_eos_vdf_info_iters = prev_b.sub_slot_iters\n else:\n cc_eos_vdf_info_iters = expected_sub_slot_iters\n # Check that the modified data is correct\n if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(\n partial_cc_vdf_info,\n number_of_iterations=cc_eos_vdf_info_iters,\n ):\n return None, ValidationError(Err.INVALID_CC_EOS_VDF, \"wrong challenge chain end of slot vdf\")\n\n if not skip_vdf_is_valid:\n # Pass in None for target info since we are only checking the proof from the temporary point,\n # but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)\n if (\n not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity\n and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(\n constants, cc_start_element, partial_cc_vdf_info, None\n )\n ):\n return None, ValidationError(Err.INVALID_CC_EOS_VDF)\n if (\n sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity\n and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(\n constants,\n ClassgroupElement.get_default_element(),\n sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,\n )\n ):\n return None, ValidationError(Err.INVALID_CC_EOS_VDF)\n\n if genesis_block:\n # 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)\n if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:\n return (\n None,\n ValidationError(\n Err.INVALID_DEFICIT,\n f\"genesis, expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}\",\n ),\n )\n else:\n assert prev_b is not None\n if prev_b.deficit == 0:\n # 2s. If prev sb had deficit 0, resets deficit to MIN_BLOCK_PER_CHALLENGE_BLOCK\n if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:\n log.error(\n constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,\n )\n return (\n None,\n ValidationError(\n Err.INVALID_DEFICIT,\n f\"expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}, saw \"\n f\"{sub_slot.reward_chain.deficit}\",\n ),\n )\n else:\n # 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0\n if sub_slot.reward_chain.deficit != prev_b.deficit:\n return None, ValidationError(Err.INVALID_DEFICIT, \"deficit is wrong at slot end\")\n\n # 3. Check sub-epoch summary\n # Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished)\n if not skip_overflow_last_ss_validation:\n if ses_hash is not None:\n # 3a. Check that genesis block does not have sub-epoch summary\n if genesis_block:\n return (\n None,\n ValidationError(\n Err.INVALID_SUB_EPOCH_SUMMARY_HASH,\n \"genesis with sub-epoch-summary hash\",\n ),\n )\n assert prev_b is not None\n\n # 3b. Check that we finished a slot and we finished a sub-epoch\n if not new_sub_slot or not can_finish_se:\n return (\n None,\n ValidationError(\n Err.INVALID_SUB_EPOCH_SUMMARY_HASH,\n f\"new sub-slot: {new_sub_slot} finishes sub-epoch {can_finish_se}\",\n ),\n )\n\n # 3c. Check the actual sub-epoch is correct\n if check_sub_epoch_summary:\n expected_sub_epoch_summary = make_sub_epoch_summary(\n constants,\n blocks,\n height,\n blocks.block_record(prev_b.prev_hash),\n expected_difficulty if can_finish_epoch else None,\n expected_sub_slot_iters if can_finish_epoch else None,\n )\n expected_hash = expected_sub_epoch_summary.get_hash()\n if expected_hash != ses_hash:\n log.error(f\"{expected_sub_epoch_summary}\")\n return (\n None,\n ValidationError(\n Err.INVALID_SUB_EPOCH_SUMMARY,\n f\"expected ses hash: {expected_hash} got {ses_hash} \",\n ),\n )\n elif new_sub_slot and not genesis_block:\n # 3d. Check that we don't have to include a sub-epoch summary\n if can_finish_se or can_finish_epoch:\n return (\n None,\n ValidationError(\n Err.INVALID_SUB_EPOCH_SUMMARY,\n \"block finishes sub-epoch but ses-hash is None\",\n ),\n )\n\n # 4. Check if the number of blocks is less than the max\n if not new_sub_slot and not genesis_block:\n assert prev_b is not None\n num_blocks = 2 # This includes the current block and the prev block\n curr = prev_b\n while not curr.first_in_sub_slot:\n num_blocks += 1\n curr = blocks.block_record(curr.prev_hash)\n if num_blocks > constants.MAX_SUB_SLOT_BLOCKS:\n return None, ValidationError(Err.TOO_MANY_BLOCKS)\n\n # If block state is correct, we should always find a challenge here\n # This computes what the challenge should be for this block\n\n challenge = get_block_challenge(\n constants,\n header_block,\n blocks,\n genesis_block,\n overflow,\n skip_overflow_last_ss_validation,\n )\n\n # 5a. Check proof of space\n if challenge != header_block.reward_chain_block.pos_ss_cc_challenge_hash:\n log.error(f\"Finished slots: {header_block.finished_sub_slots}\")\n log.error(\n f\"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} \"\n f\"{header_block.reward_chain_block.signage_point_index}\"\n f\"Prev: {prev_b}\"\n )\n log.error(f\"Challenge {challenge} provided {header_block.reward_chain_block.pos_ss_cc_challenge_hash}\")\n return None, ValidationError(Err.INVALID_CC_CHALLENGE)\n\n # 5b. Check proof of space\n if header_block.reward_chain_block.challenge_chain_sp_vdf is None:\n # Edge case of first sp (start of slot), where sp_iters == 0\n cc_sp_hash: bytes32 = challenge\n else:\n cc_sp_hash = header_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()\n\n q_str: Optional[bytes32] = verify_and_get_quality_string(\n header_block.reward_chain_block.proof_of_space, constants, challenge, cc_sp_hash, height=height\n )\n if q_str is None:\n return None, ValidationError(Err.INVALID_POSPACE)\n\n # 5c. Check plot id is not present within last `NUM_DISTINCT_CONSECUTIVE_PLOT_IDS` blocks.\n if height >= constants.SOFT_FORK4_HEIGHT:\n curr_optional_block_record: Optional[BlockRecord] = prev_b\n plot_id = get_plot_id(header_block.reward_chain_block.proof_of_space)\n curr_sp = cc_sp_hash\n sp_count = 1\n\n while curr_optional_block_record is not None and sp_count < constants.UNIQUE_PLOTS_WINDOW:\n prefix_bits = calculate_prefix_bits(constants, curr_optional_block_record.height)\n\n if curr_optional_block_record.cc_sp_hash != curr_sp:\n if passes_plot_filter(\n prefix_bits,\n plot_id,\n curr_optional_block_record.pos_ss_cc_challenge_hash,\n curr_optional_block_record.cc_sp_hash,\n ):\n return None, ValidationError(Err.CHIP_0013_VALIDATION, f\"CHIP-0013 Block Failed: {height}\")\n\n sp_count += 1\n curr_sp = curr_optional_block_record.cc_sp_hash\n if sp_count < constants.UNIQUE_PLOTS_WINDOW:\n curr_optional_block_record = blocks.try_block_record(curr_optional_block_record.prev_hash)\n\n # 6. check signage point index\n # no need to check negative values as this is uint 8\n if header_block.reward_chain_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:\n return None, ValidationError(Err.INVALID_SP_INDEX)\n\n # Note that required iters might be from the previous slot (if we are in an overflow block)\n required_iters: uint64 = calculate_iterations_quality(\n constants.DIFFICULTY_CONSTANT_FACTOR,\n q_str,\n header_block.reward_chain_block.proof_of_space.size,\n expected_difficulty,\n cc_sp_hash,\n )\n\n # 7. check required iters\n if required_iters >= calculate_sp_interval_iters(constants, expected_sub_slot_iters):\n return None, ValidationError(Err.INVALID_REQUIRED_ITERS)\n\n # 8a. check signage point index 0 has no cc sp\n if (header_block.reward_chain_block.signage_point_index == 0) != (\n header_block.reward_chain_block.challenge_chain_sp_vdf is None\n ):\n return None, ValidationError(Err.INVALID_SP_INDEX)\n\n # 8b. check signage point index 0 has no rc sp\n if (header_block.reward_chain_block.signage_point_index == 0) != (\n header_block.reward_chain_block.reward_chain_sp_vdf is None\n ):\n return None, ValidationError(Err.INVALID_SP_INDEX)\n\n sp_iters: uint64 = calculate_sp_iters(\n constants,\n expected_sub_slot_iters,\n header_block.reward_chain_block.signage_point_index,\n )\n\n ip_iters: uint64 = calculate_ip_iters(\n constants,\n expected_sub_slot_iters,\n header_block.reward_chain_block.signage_point_index,\n required_iters,\n )\n if header_block.reward_chain_block.challenge_chain_sp_vdf is None:\n # Blocks with very low required iters are not overflow blocks\n assert not overflow\n\n # 9. Check no overflows in the first sub-slot of a new epoch\n # (although they are OK in the second sub-slot), this is important\n if overflow and can_finish_epoch:\n if finished_sub_slots_since_prev < 2:\n return None, ValidationError(Err.NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH)\n\n # 10. Check total iters\n if genesis_block:\n total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)\n else:\n assert prev_b is not None\n if new_sub_slot:\n total_iters = prev_b.total_iters\n # Add the rest of the slot of prev_b\n total_iters = uint128(total_iters + prev_b.sub_slot_iters - prev_b.ip_iters(constants))\n # Add other empty slots\n total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))\n else:\n # Slot iters is guaranteed to be the same for header_block and prev_b\n # This takes the beginning of the slot, and adds ip_iters\n total_iters = uint128(prev_b.total_iters - prev_b.ip_iters(constants))\n total_iters = uint128(total_iters + ip_iters)\n if total_iters != header_block.reward_chain_block.total_iters:\n return (\n None,\n ValidationError(\n Err.INVALID_TOTAL_ITERS,\n f\"expected {total_iters} got {header_block.reward_chain_block.total_iters}\",\n ),\n )\n\n sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - (expected_sub_slot_iters if overflow else 0))\n if overflow and skip_overflow_last_ss_validation:\n dummy_vdf_info = VDFInfo(\n bytes32([0] * 32),\n uint64(1),\n ClassgroupElement.get_default_element(),\n )\n dummy_sub_slot = EndOfSubSlotBundle(\n ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),\n None,\n RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),\n SubSlotProofs(VDFProof(uint8(0), b\"\", False), None, VDFProof(uint8(0), b\"\", False)),\n )\n sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]\n else:\n sub_slots_to_pass_in = header_block.finished_sub_slots\n (\n cc_vdf_challenge,\n rc_vdf_challenge,\n cc_vdf_input,\n rc_vdf_input,\n cc_vdf_iters,\n rc_vdf_iters,\n ) = get_signage_point_vdf_info(\n constants,\n sub_slots_to_pass_in,\n overflow,\n prev_b,\n blocks,\n sp_total_iters,\n sp_iters,\n )\n\n # 11. Check reward chain sp proof\n if sp_iters != 0:\n assert (\n header_block.reward_chain_block.reward_chain_sp_vdf is not None\n and header_block.reward_chain_sp_proof is not None\n )\n target_vdf_info = VDFInfo(\n rc_vdf_challenge,\n rc_vdf_iters,\n header_block.reward_chain_block.reward_chain_sp_vdf.output,\n )\n if not skip_vdf_is_valid and not header_block.reward_chain_sp_proof.is_valid(\n constants,\n rc_vdf_input,\n header_block.reward_chain_block.reward_chain_sp_vdf,\n target_vdf_info,\n ):\n return None, ValidationError(Err.INVALID_RC_SP_VDF)\n rc_sp_hash = header_block.reward_chain_block.reward_chain_sp_vdf.output.get_hash()\n else:\n # Edge case of first sp (start of slot), where sp_iters == 0\n assert overflow is not None\n if header_block.reward_chain_block.reward_chain_sp_vdf is not None:\n return None, ValidationError(Err.INVALID_RC_SP_VDF)\n if new_sub_slot:\n rc_sp_hash = header_block.finished_sub_slots[-1].reward_chain.get_hash()\n else:\n if genesis_block:\n rc_sp_hash = constants.GENESIS_CHALLENGE\n else:\n assert prev_b is not None\n curr = prev_b\n while not curr.first_in_sub_slot:\n curr = blocks.block_record(curr.prev_hash)\n assert curr.finished_reward_slot_hashes is not None\n rc_sp_hash = curr.finished_reward_slot_hashes[-1]\n\n # 12. Check reward chain sp signature\n if not AugSchemeMPL.verify(\n header_block.reward_chain_block.proof_of_space.plot_public_key,\n rc_sp_hash,\n header_block.reward_chain_block.reward_chain_sp_signature,\n ):\n return None, ValidationError(Err.INVALID_RC_SIGNATURE)\n\n # 13. Check cc sp vdf\n if sp_iters != 0:\n assert header_block.reward_chain_block.challenge_chain_sp_vdf is not None\n assert header_block.challenge_chain_sp_proof is not None\n target_vdf_info = VDFInfo(\n cc_vdf_challenge,\n cc_vdf_iters,\n header_block.reward_chain_block.challenge_chain_sp_vdf.output,\n )\n\n if header_block.reward_chain_block.challenge_chain_sp_vdf != dataclasses.replace(\n target_vdf_info,\n number_of_iterations=sp_iters,\n ):\n return None, ValidationError(Err.INVALID_CC_SP_VDF)\n if not skip_vdf_is_valid:\n if (\n not header_block.challenge_chain_sp_proof.normalized_to_identity\n and not header_block.challenge_chain_sp_proof.is_valid(constants, cc_vdf_input, target_vdf_info, None)\n ):\n return None, ValidationError(Err.INVALID_CC_SP_VDF)\n if (\n header_block.challenge_chain_sp_proof.normalized_to_identity\n and not header_block.challenge_chain_sp_proof.is_valid(\n constants,\n ClassgroupElement.get_default_element(),\n header_block.reward_chain_block.challenge_chain_sp_vdf,\n )\n ):\n return None, ValidationError(Err.INVALID_CC_SP_VDF)\n else:\n assert overflow is not None\n if header_block.reward_chain_block.challenge_chain_sp_vdf is not None:\n return None, ValidationError(Err.INVALID_CC_SP_VDF)\n\n # 14. Check cc sp sig\n if not AugSchemeMPL.verify(\n header_block.reward_chain_block.proof_of_space.plot_public_key,\n cc_sp_hash,\n header_block.reward_chain_block.challenge_chain_sp_signature,\n ):\n return None, ValidationError(Err.INVALID_CC_SIGNATURE, \"invalid cc sp sig\")\n\n # 15. Check is_transaction_block\n if genesis_block:\n if header_block.foliage.foliage_transaction_block_hash is None:\n return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK, \"invalid genesis\")\n else:\n assert prev_b is not None\n # Finds the previous block\n curr = prev_b\n while not curr.is_transaction_block:\n curr = blocks.block_record(curr.prev_hash)\n\n # The first block to have an sp > the last tx block's infusion iters, is a tx block\n if overflow:\n our_sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - expected_sub_slot_iters)\n else:\n our_sp_total_iters = uint128(total_iters - ip_iters + sp_iters)\n if (our_sp_total_iters > curr.total_iters) != (header_block.foliage.foliage_transaction_block_hash is not None):\n return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)\n if (our_sp_total_iters > curr.total_iters) != (\n header_block.foliage.foliage_transaction_block_signature is not None\n ):\n return None, ValidationError(Err.INVALID_IS_TRANSACTION_BLOCK)\n\n # 16. Check foliage block signature by plot key\n if not AugSchemeMPL.verify(\n header_block.reward_chain_block.proof_of_space.plot_public_key,\n header_block.foliage.foliage_block_data.get_hash(),\n header_block.foliage.foliage_block_data_signature,\n ):\n return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)\n\n # 17. Check foliage block signature by plot key\n if header_block.foliage.foliage_transaction_block_hash is not None:\n assert header_block.foliage.foliage_transaction_block_signature is not None\n if not AugSchemeMPL.verify(\n header_block.reward_chain_block.proof_of_space.plot_public_key,\n header_block.foliage.foliage_transaction_block_hash,\n header_block.foliage.foliage_transaction_block_signature,\n ):\n return None, ValidationError(Err.INVALID_PLOT_SIGNATURE)\n\n # 18. Check unfinished reward chain block hash\n if (\n header_block.reward_chain_block.get_hash()\n != header_block.foliage.foliage_block_data.unfinished_reward_block_hash\n ):\n return None, ValidationError(Err.INVALID_URSB_HASH)\n\n # 19. Check pool target max height\n if (\n header_block.foliage.foliage_block_data.pool_target.max_height != 0\n and header_block.foliage.foliage_block_data.pool_target.max_height < height\n ):\n return None, ValidationError(Err.OLD_POOL_TARGET)\n\n # 20a. Check pre-farm puzzle hashes for genesis block.\n if genesis_block:\n if (\n header_block.foliage.foliage_block_data.pool_target.puzzle_hash\n != constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH\n ):\n log.error(f\"Pool target {header_block.foliage.foliage_block_data.pool_target} hb {header_block}\")\n return None, ValidationError(Err.INVALID_PREFARM)\n if (\n header_block.foliage.foliage_block_data.farmer_reward_puzzle_hash\n != constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH\n ):\n return None, ValidationError(Err.INVALID_PREFARM)\n else:\n # 20b. If pospace has a pool pk, heck pool target signature. Should not check this for genesis block.\n if header_block.reward_chain_block.proof_of_space.pool_public_key is not None:\n assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is None\n assert header_block.foliage.foliage_block_data.pool_signature is not None\n if not AugSchemeMPL.verify(\n header_block.reward_chain_block.proof_of_space.pool_public_key,\n bytes(header_block.foliage.foliage_block_data.pool_target),\n header_block.foliage.foliage_block_data.pool_signature,\n ):\n return None, ValidationError(Err.INVALID_POOL_SIGNATURE)\n else:\n # 20c. Otherwise, the plot is associated with a contract puzzle hash, not a public key\n assert header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash is not None\n if (\n header_block.foliage.foliage_block_data.pool_target.puzzle_hash\n != header_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash\n ):\n return None, ValidationError(Err.INVALID_POOL_TARGET)\n\n # 21. Check extension data if applicable. None for mainnet.\n # 22. Check if foliage block is present\n if (header_block.foliage.foliage_transaction_block_hash is not None) != (\n header_block.foliage_transaction_block is not None\n ):\n return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)\n\n if (header_block.foliage.foliage_transaction_block_signature is not None) != (\n header_block.foliage_transaction_block is not None\n ):\n return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)\n\n if header_block.foliage_transaction_block is not None:\n # 23. Check foliage block hash\n if header_block.foliage_transaction_block.get_hash() != header_block.foliage.foliage_transaction_block_hash:\n return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_HASH)\n\n if genesis_block:\n # 24a. Check prev block hash for genesis\n if header_block.foliage_transaction_block.prev_transaction_block_hash != constants.GENESIS_CHALLENGE:\n return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)\n else:\n assert prev_b is not None\n # 24b. Check prev block hash for non-genesis\n curr_b: BlockRecord = prev_b\n while not curr_b.is_transaction_block:\n curr_b = blocks.block_record(curr_b.prev_hash)\n if not header_block.foliage_transaction_block.prev_transaction_block_hash == curr_b.header_hash:\n log.error(\n f\"Prev BH: {header_block.foliage_transaction_block.prev_transaction_block_hash} \"\n f\"{curr_b.header_hash} curr sb: {curr_b}\"\n )\n return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)\n\n # 25. The filter hash in the Foliage Block must be the hash of the filter\n if check_filter:\n if header_block.foliage_transaction_block.filter_hash != std_hash(header_block.transactions_filter):\n return None, ValidationError(Err.INVALID_TRANSACTIONS_FILTER_HASH)\n\n # 26a. The timestamp in Foliage Block must not be over 5 minutes in the future\n if header_block.foliage_transaction_block.timestamp > int(time.time() + constants.MAX_FUTURE_TIME2):\n return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_FUTURE)\n\n if prev_b is not None:\n # 26b. The timestamp must be greater than the previous transaction block timestamp\n prev_transaction_b = blocks.block_record(header_block.foliage_transaction_block.prev_transaction_block_hash)\n assert prev_transaction_b.timestamp is not None\n if header_block.foliage_transaction_block.timestamp <= prev_transaction_b.timestamp:\n return None, ValidationError(Err.TIMESTAMP_TOO_FAR_IN_PAST)\n return required_iters, None # Valid unfinished header block", "def is_bare_section(self, title):\n return (title != mp_level01_titles[0] and self.level == 0)", "def _is_group_header_line(self, line):\r\n return line[0] == '[' and line[-1] == ']'", "def testSectionHeaders(self, b, u):\n i = 0\n while i < len(u):\n i = u.find(r'\\s', i)\n if i == -1:\n return\n c = u.find(r'\\c', i)\n if c == -1:\n return\n if c - i < 50:\n print('Misplaced Section Header against chapter in: ' + b)\n i = c", "def is_section(self):\n ret_val = self._is_section()\n return ret_val", "def is_section(line):\r\n line = line.strip()\r\n return line.startswith('[') and line.endswith(']')", "def test_with_invalid_section_order(self):\n reader = DiffXReader(io.BytesIO(\n b'#diffx: version=1.0\\n'\n b'#...file:\\n'\n ))\n\n message = (\n 'Error on line 2: Unknown or unexpected section ID \"...file\". '\n 'Expected one of: \".change\", \".meta\", \".preamble\"'\n )\n\n with self.assertRaisesMessage(DiffXParseError, message):\n list(reader)", "def start_of_sub_section(cell_value):\n\tif isinstance(cell_value, str):\n\t\tpattern = '^\\([ivx]{1,5}\\)'\n\t\tm = re.search(pattern, cell_value.strip())\n\t\tif m is not None:\n\t\t\treturn True\n\n\treturn False", "def has_sections(self):\n try:\n self.find_category(b\"array_structure_list_section\")\n return True\n except Exception as e:\n if \"CBF_NOTFOUND\" in str(e):\n return False\n raise e", "def end_of_sub_section(ws, row):\n\tfor column in range(4):\n\t\tif not is_empty_cell(ws, row, column):\n\t\t\treturn False\n\n\treturn True", "def validate_finished_header_block(\n constants: ConsensusConstants,\n blocks: BlockchainInterface,\n header_block: HeaderBlock,\n check_filter: bool,\n expected_difficulty: uint64,\n expected_sub_slot_iters: uint64,\n check_sub_epoch_summary: bool = True,\n) -> Tuple[Optional[uint64], Optional[ValidationError]]:\n unfinished_header_block = UnfinishedHeaderBlock(\n header_block.finished_sub_slots,\n header_block.reward_chain_block.get_unfinished(),\n header_block.challenge_chain_sp_proof,\n header_block.reward_chain_sp_proof,\n header_block.foliage,\n header_block.foliage_transaction_block,\n header_block.transactions_filter,\n )\n\n required_iters, validate_unfinished_err = validate_unfinished_header_block(\n constants,\n blocks,\n unfinished_header_block,\n check_filter,\n expected_difficulty,\n expected_sub_slot_iters,\n False,\n check_sub_epoch_summary=check_sub_epoch_summary,\n )\n\n genesis_block = False\n if validate_unfinished_err is not None:\n return None, validate_unfinished_err\n\n assert required_iters is not None\n\n if header_block.height == 0:\n prev_b: Optional[BlockRecord] = None\n genesis_block = True\n else:\n prev_b = blocks.block_record(header_block.prev_header_hash)\n new_sub_slot: bool = len(header_block.finished_sub_slots) > 0\n\n ip_iters: uint64 = calculate_ip_iters(\n constants,\n expected_sub_slot_iters,\n header_block.reward_chain_block.signage_point_index,\n required_iters,\n )\n if not genesis_block:\n assert prev_b is not None\n # 27. Check block height\n if header_block.height != prev_b.height + 1:\n return None, ValidationError(Err.INVALID_HEIGHT)\n\n # 28. Check weight\n if header_block.weight != prev_b.weight + expected_difficulty:\n log.error(f\"INVALID WEIGHT: {header_block} {prev_b} {expected_difficulty}\")\n return None, ValidationError(Err.INVALID_WEIGHT)\n else:\n # 27b. Check genesis block height, weight, and prev block hash\n if header_block.height != uint32(0):\n return None, ValidationError(Err.INVALID_HEIGHT)\n if header_block.weight != uint128(constants.DIFFICULTY_STARTING):\n return None, ValidationError(Err.INVALID_WEIGHT)\n if header_block.prev_header_hash != constants.GENESIS_CHALLENGE:\n return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)\n\n # RC vdf challenge is taken from more recent of (slot start, prev_block)\n if genesis_block:\n cc_vdf_output = ClassgroupElement.get_default_element()\n ip_vdf_iters = ip_iters\n if new_sub_slot:\n rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()\n else:\n rc_vdf_challenge = constants.GENESIS_CHALLENGE\n else:\n assert prev_b is not None\n if new_sub_slot:\n # slot start is more recent\n rc_vdf_challenge = header_block.finished_sub_slots[-1].reward_chain.get_hash()\n ip_vdf_iters = ip_iters\n cc_vdf_output = ClassgroupElement.get_default_element()\n\n else:\n # Prev sb is more recent\n rc_vdf_challenge = prev_b.reward_infusion_new_challenge\n ip_vdf_iters = uint64(header_block.reward_chain_block.total_iters - prev_b.total_iters)\n cc_vdf_output = prev_b.challenge_vdf_output\n\n # 29. Check challenge chain infusion point VDF\n if new_sub_slot:\n cc_vdf_challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()\n else:\n # Not first block in slot\n if genesis_block:\n # genesis block\n cc_vdf_challenge = constants.GENESIS_CHALLENGE\n else:\n assert prev_b is not None\n # Not genesis block, go back to first block in slot\n curr = prev_b\n while curr.finished_challenge_slot_hashes is None:\n curr = blocks.block_record(curr.prev_hash)\n cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]\n\n cc_target_vdf_info = VDFInfo(\n cc_vdf_challenge,\n ip_vdf_iters,\n header_block.reward_chain_block.challenge_chain_ip_vdf.output,\n )\n if header_block.reward_chain_block.challenge_chain_ip_vdf != dataclasses.replace(\n cc_target_vdf_info,\n number_of_iterations=ip_iters,\n ):\n expected = dataclasses.replace(\n cc_target_vdf_info,\n number_of_iterations=ip_iters,\n )\n log.error(f\"{header_block.reward_chain_block.challenge_chain_ip_vdf }. expected {expected}\")\n log.error(f\"Block: {header_block}\")\n return None, ValidationError(Err.INVALID_CC_IP_VDF)\n if (\n not header_block.challenge_chain_ip_proof.normalized_to_identity\n and not header_block.challenge_chain_ip_proof.is_valid(\n constants,\n cc_vdf_output,\n cc_target_vdf_info,\n None,\n )\n ):\n log.error(f\"Did not validate, output {cc_vdf_output}\")\n log.error(f\"Block: {header_block}\")\n return None, ValidationError(Err.INVALID_CC_IP_VDF)\n if (\n header_block.challenge_chain_ip_proof.normalized_to_identity\n and not header_block.challenge_chain_ip_proof.is_valid(\n constants,\n ClassgroupElement.get_default_element(),\n header_block.reward_chain_block.challenge_chain_ip_vdf,\n )\n ):\n return None, ValidationError(Err.INVALID_CC_IP_VDF)\n\n # 30. Check reward chain infusion point VDF\n rc_target_vdf_info = VDFInfo(\n rc_vdf_challenge,\n ip_vdf_iters,\n header_block.reward_chain_block.reward_chain_ip_vdf.output,\n )\n if not header_block.reward_chain_ip_proof.is_valid(\n constants,\n ClassgroupElement.get_default_element(),\n header_block.reward_chain_block.reward_chain_ip_vdf,\n rc_target_vdf_info,\n ):\n return None, ValidationError(Err.INVALID_RC_IP_VDF)\n\n # 31. Check infused challenge chain infusion point VDF\n if not genesis_block:\n overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)\n deficit = calculate_deficit(\n constants,\n header_block.height,\n prev_b,\n overflow,\n len(header_block.finished_sub_slots),\n )\n\n if header_block.reward_chain_block.infused_challenge_chain_ip_vdf is None:\n # If we don't have an ICC chain, deficit must be 4 or 5\n if deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:\n return None, ValidationError(Err.INVALID_ICC_VDF)\n else:\n assert header_block.infused_challenge_chain_ip_proof is not None\n # If we have an ICC chain, deficit must be 0, 1, 2 or 3\n if deficit >= constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:\n return (\n None,\n ValidationError(\n Err.INVALID_ICC_VDF,\n f\"icc vdf and deficit is bigger or equal to {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1}\",\n ),\n )\n if new_sub_slot:\n last_ss = header_block.finished_sub_slots[-1]\n assert last_ss.infused_challenge_chain is not None\n icc_vdf_challenge: bytes32 = last_ss.infused_challenge_chain.get_hash()\n icc_vdf_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element()\n else:\n assert prev_b is not None\n if prev_b.is_challenge_block(constants):\n icc_vdf_input = ClassgroupElement.get_default_element()\n else:\n icc_vdf_input = prev_b.infused_challenge_vdf_output\n curr = prev_b\n while curr.finished_infused_challenge_slot_hashes is None and not curr.is_challenge_block(constants):\n curr = blocks.block_record(curr.prev_hash)\n\n if curr.is_challenge_block(constants):\n icc_vdf_challenge = curr.challenge_block_info_hash\n else:\n assert curr.finished_infused_challenge_slot_hashes is not None\n icc_vdf_challenge = curr.finished_infused_challenge_slot_hashes[-1]\n\n icc_target_vdf_info = VDFInfo(\n icc_vdf_challenge,\n ip_vdf_iters,\n header_block.reward_chain_block.infused_challenge_chain_ip_vdf.output,\n )\n\n if icc_vdf_input is None or not header_block.infused_challenge_chain_ip_proof.is_valid(\n constants,\n icc_vdf_input,\n header_block.reward_chain_block.infused_challenge_chain_ip_vdf,\n icc_target_vdf_info,\n ):\n return None, ValidationError(Err.INVALID_ICC_VDF, \"invalid icc proof\")\n else:\n if header_block.infused_challenge_chain_ip_proof is not None:\n return None, ValidationError(Err.INVALID_ICC_VDF)\n\n # 32. Check reward block hash\n if header_block.foliage.reward_block_hash != header_block.reward_chain_block.get_hash():\n return None, ValidationError(Err.INVALID_REWARD_BLOCK_HASH)\n\n # 33. Check reward block is_transaction_block\n if (\n header_block.foliage.foliage_transaction_block_hash is not None\n ) != header_block.reward_chain_block.is_transaction_block:\n return None, ValidationError(Err.INVALID_FOLIAGE_BLOCK_PRESENCE)\n\n return required_iters, None", "def fits_section_header(self):\n if self.get_width() == 660 and self.get_height() == 65:\n return True\n else:\n return False", "def _check_valid_section_spacing(self, prevline: str, lineno: int) -> None:\n if prevline and not prevline.isspace():\n loc = self.make_source_range('', '', lineno)\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, self.diags.section_spacing,\n 'Missing empty line between sections, must have one before this section',\n loc, highlight=False, patch=Patch(loc, '\\n')\n )\n return", "def parse_sections(self, offset):\n\n self.sections = []\n MAX_SIMULTANEOUS_ERRORS = 3\n for i in range(self.FILE_HEADER.NumberOfSections):\n if i >= MAX_SECTIONS:\n self.__warnings.append(\"Too many sections {0} (>={1})\".format(\n self.FILE_HEADER.NumberOfSections, MAX_SECTIONS))\n break\n simultaneous_errors = 0\n section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )\n if not section:\n break\n section_offset = offset + section.sizeof() * i\n section.set_file_offset(section_offset)\n section_data = self.__data__[section_offset : section_offset + section.sizeof()]\n # Check if the section is all nulls and stop if so.\n if count_zeroes(section_data) == section.sizeof():\n self.__warnings.append(\n 'Invalid section {0}. Contents are null-bytes.'.format(i))\n break\n if not section_data:\n self.__warnings.append(\n 'Invalid section {0}. No data in the file (is this corkami\\'s virtsectblXP?).'.format(i))\n break\n section.__unpack__(section_data)\n self.__structures__.append(section)\n\n if section.SizeOfRawData+section.PointerToRawData > len(self.__data__):\n simultaneous_errors += 1\n self.__warnings.append(\n 'Error parsing section {0}. SizeOfRawData is larger than file.'.format(i))\n\n if self.adjust_FileAlignment( section.PointerToRawData,\n self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):\n simultaneous_errors += 1\n self.__warnings.append(\n 'Error parsing section {0}. PointerToRawData points beyond the end of the file.'.format(i))\n\n if section.Misc_VirtualSize > 0x10000000:\n simultaneous_errors += 1\n self.__warnings.append(\n 'Suspicious value found parsing section {0}. VirtualSize is extremely large > 256MiB.'.format(i))\n\n if self.adjust_SectionAlignment( section.VirtualAddress,\n self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:\n simultaneous_errors += 1\n self.__warnings.append(\n 'Suspicious value found parsing section {0}. VirtualAddress is beyond 0x10000000.'.format(i))\n\n if ( self.OPTIONAL_HEADER.FileAlignment != 0 and\n ( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):\n simultaneous_errors += 1\n self.__warnings.append(\n ('Error parsing section {0}. '\n 'PointerToRawData should normally be '\n 'a multiple of FileAlignment, this might imply the file '\n 'is trying to confuse tools which parse this incorrectly.').format(i))\n\n if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS:\n self.__warnings.append('Too many warnings parsing section. Aborting.')\n break\n\n\n section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')\n\n # Set the section's flags according the the Characteristics member\n set_flags(section, section.Characteristics, section_flags)\n\n if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and\n section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):\n\n if section.Name.rstrip(b'\\x00') == b'PAGE' and self.is_driver():\n # Drivers can have a PAGE section with those flags set without\n # implying that it is malicious\n pass\n else:\n self.__warnings.append(\n ('Suspicious flags set for section %d. ' % i) +\n 'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. '\n 'This might indicate a packed executable.')\n\n\n self.sections.append(section)\n\n # Sort the sections by their VirtualAddress and add a field to each of them\n # with the VirtualAddress of the next section. This will allow to check\n # for potentially overlapping sections in badly constructed PEs.\n self.sections.sort(key=lambda a: a.VirtualAddress)\n for idx, section in enumerate(self.sections):\n if idx == len(self.sections)-1:\n section.next_section_virtual_address = None\n else:\n section.next_section_virtual_address = self.sections[idx+1].VirtualAddress\n\n if self.FILE_HEADER.NumberOfSections > 0 and self.sections:\n return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections\n else:\n return offset", "def is_heading(self, line: str, prev_line: str) -> Verdict:\n def handle_header_with_colon(text: str) -> Verdict:\n if text.endswith('\\:'):\n return Verdict.NOT_HEADING\n\n textlo = text.casefold()\n if any(map(textlo.startswith, (t.casefold() + ':' for t in self.gen_titles()))):\n return Verdict.IS_HEADING\n\n if text.endswith(':'):\n if any(map(text.__contains__, (' - ', '=', '(', ')', '%', '$', '@', '#', '!', '^', '&', '+'))):\n return Verdict.IS_HEADING_BUT_PROBABLY_SHOULDNT_BE\n\n if _suspicious_colon_regex.search(textlo) is None:\n return Verdict.IS_HEADING\n return Verdict.IS_HEADING_BUT_PROBABLY_SHOULDNT_BE\n\n try:\n _, _, section = self.fuzzy_find_section(text, cache_result=False, strict=True)\n except GuessHeadingFailError:\n return Verdict.NOT_HEADING\n return Verdict.NOT_HEADING if isinstance(section, DefaultSection) else Verdict.IS_HEADING\n\n def handle_header_without_colon(line: str, prev_line: str) -> Verdict:\n linelo = line.casefold()\n results = list(filter(linelo.startswith, map(str.casefold, self.gen_titles())))\n if not results:\n return Verdict.NOT_HEADING\n if _suspicious_plain_regex.search(' '.join((prev_line.casefold(), linelo))):\n # suspicious regex detected, err on the side of caution and say this line is not a\n # heading\n return Verdict.NOT_HEADING\n # not suspicious, still not 100% though\n return Verdict.MAYBE_HEADING\n\n prev_line = prev_line.strip()\n line = line.strip()\n if not line or line.startswith(('+', '. ', '-', '$', '.vb', '.ve')):\n return Verdict.NOT_HEADING\n if ':' in line:\n return handle_header_with_colon(line)\n return handle_header_without_colon(line, prev_line)", "def validate_header(header: str) -> None:\n header_pattern = r\"^sourceId,RTSsymbol$\"\n if not re.match(header_pattern, header):\n raise ImproperFileFormat(\"Improperly formatted header\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build, compile and return a regular expression based on `definition`.
def build_regexp(definition, compile=True): name, prefix, suffix, parts = definition part_strings = [] for part in parts: if type(part) is tuple: part_strings.append(build_regexp(part, None)) else: part_strings.append(part) or_group = '|'.join(part_strings) regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals() if compile: return re.compile(regexp, re.UNICODE) else: return regexp
[ "def compile(self):\n return re.compile(self.pattern, self.flags)", "def get_compiled(self, name: str) -> re.compile:\n rx = re.compile(self.regexp)\n if self.flag_multiline:\n rx.flags ^= re.MULTILINE\n if self.flag_dotall:\n rx.flags ^= re.DOTALL\n return rx", "def __compile_re(self, flags = '', rules = []):\n if not rules:\n return DEFAULT_RE\n regexp = RegExp(flags, *rules).re\n return regexp", "def compile_regex(regex):\n return re.compile(regex, re.U)", "def compileRegexp(class_):\n if not class_.allowParseDep:\n return\n\n d = dict(flagFormat=class_.flagFormat, depFormat=class_.depFormat,\n WORD=class_.WORD, IDENT=class_.IDENT)\n\n # zero or more space-separated flags\n flagFmt = '(?:\\( *(%(flagFormat)s?(?: +%(flagFormat)s)*) *\\))?'\n # add ^ and $ to ensure we match the entire string passed in\n regexp = ('^ *(%(depFormat)s) *' + flagFmt + ' *$') % d\n # word is a slightly larger group of chars than ident -\n # includes . and +, because those are used in paths and\n # sonames. May need to be larger some day, and probably\n # could be more restrictive for some groups. Should not contain\n # /, as that's used as a special char in many dep classes.\n regexp = regexp.replace('WORD', d['WORD'])\n regexp = regexp.replace('IDENT',d['IDENT'])\n class_.regexpStr = regexp\n class_.regexp = re.compile(regexp)", "def make_regex(self):\n forwards_str = ')|('.join(self.forwards)\n reverses_str = ')|('.join(self.reverses)\n re_str = '^.*((' + forwards_str +')).*((' + reverses_str + ')).*$'\n return re.compile(re_str)", "def __toRegExp(self, fname, targetName) -> re:\n fname = os.path.join(self.packageDir(), fname)\n if not os.path.isfile(fname):\n EmergeDebug.die(\"%s not found at: %s\" % (targetName.capitalize(), os.path.abspath(fname)))\n regex = \"(\"\n for line in fileinput.input(fname):\n # Cleanup white spaces / line endings\n line = line.splitlines()\n line = line[0].rstrip()\n if line.startswith(\"#\") or len(line) == 0:\n continue\n try:\n tmp = \"^%s$\" % line\n regex += \"%s|\" % tmp\n re.compile(tmp, re.IGNORECASE) #for debug\n EmergeDebug.debug(\"%s added to %s as %s\" % (line, targetName, tmp), 2)\n except re.error:\n EmergeDebug.die(\"%s is not a valid regexp\" % tmp)\n return re.compile(\"%s)\" % regex[:-2], re.IGNORECASE)", "def build_regex(single_commenting_syntax, multi_commenting_syntax, extension):\n\n # Build single_line_regex and full regex\n regex = \"(\"\n single_line_regex = \"\"\n try:\n # Add single line syntax to regex\n for i in range(len(single_commenting_syntax[extension])):\n # single_syntax_length is the length of the single line commenting syntax\n single_syntax_length = len(single_commenting_syntax[extension][i])\n for j in range(single_syntax_length):\n regex += \"\\\\\" + single_commenting_syntax[extension][i][j]\n regex += \"(?:.*)$|\"\n\n single_line_regex = single_line_regex + regex[:-1]\n\n # Add multi line syntax to regex\n for i in range(0, len(multi_commenting_syntax[extension]), 2):\n for j in range(2):\n multi_syntax_length = len(multi_commenting_syntax[extension][i + j])\n # Add each character/symbol of a multi line commenting syntax one at a time into the regex\n for k in range(multi_syntax_length):\n regex += \"\\\\\" + multi_commenting_syntax[extension][i + j][k]\n # When done adding the start and end of a multi line commenting syntax, add regex\n if j == 0:\n regex += \"(?:(?:.|\\\\n)*?)\"\n # When we aren't finished adding the different ways to comment, add a or to the regex\n if (i + 2) < len(multi_commenting_syntax[extension]):\n regex += \"|\"\n\n regex += \")\"\n single_line_regex += \")\"\n except KeyError:\n print(\"Please add the syntax for commenting for that specific langauge in the .csv file to proceed.\")\n sys.exit(1)\n\n return single_line_regex, regex", "def make_regex(style=None):\n # As new styles are added the current default should be moved into the\n # dict.\n # TODO: this smells terrible\n default = re.compile(r'[\\x0c]{0,1}(\\w+)\\*?[\\s\\t]*(\\d{1,2})[\\s\\t]*(.*?)'\n '[\\s\\t]*\\(*(\\d+)\\s*-\\s*(\\d+)\\)*\\s*$')\n d = {0: re.compile(r'(\\w{1,2}[\\$\\-%]\\w*|PADDING)\\s*CHARACTER\\*(\\d{3})'\n '\\s*\\.{0,1}\\s*\\((\\d*):(\\d*)\\).*'),\n 1: re.compile(r'D (\\w+) \\s* (\\d{1,2}) \\s* (\\d*)'),\n 2: default}\n return d.get(style, default)", "def create_regex_factory(\n format_string=None, regex_type=None, ignore_case=False\n):\n if regex_type:\n format_string = REGEX_WRAPPERS.get(regex_type)\n if not format_string:\n raise KeyError(\"Unknown regex wrapper: {}\".format(regex_type))\n\n flags = 0\n if ignore_case:\n flags |= re.IGNORECASE\n\n if format_string:\n\n def create_regex(pattern):\n return re.compile(format_string.format(pattern), flags=flags)\n\n else:\n\n def create_regex(pattern):\n return re.compile(pattern, flags=flags)\n\n return create_regex", "def _re_compile(regex):\n\n return re.compile(regex, re.I | re.UNICODE)", "def build_custom_regex(text):\n\n # Match the final question mark\n text = re.sub(r\"\\?\", \"\\?\", text)\n # Because of optinal expensions, we need to be lenient on space matching. This will allow to skip some spaces\n text = re.sub(r\"\\s\", \"\\\\\\s*\", text)\n # Hack, because the templates in the dataset somehow don't match the templates exactly\n text = re.sub(\"another\", \"(?:another|a)\", text)\n text = re.sub(\"other\", \"(?:other)?\", text)\n # Replace all attributes by their possibilities, possibly in a group\n text = SIZE_REGEX.sub(partial(add_group, ALL_SIZES), text)\n text = COLOR_REGEX.sub(partial(add_group, ALL_COLORS), text)\n text = MATERIAL_REGEX.sub(partial(add_group, ALL_MATERIALS), text)\n text = SHAPE_REGEX.sub(partial(add_group, ALL_SHAPES), text)\n text = RELATION_REGEX.sub(partial(add_group, ALL_RELATIONS), text)\n # Optional text\n text = OPTIONAL_REGEX.sub(r\"(?:\\1)?\", text)\n # To match plurals in our groups, we detect -s suffixes\n text = PLURAL_REGEX.sub(r\")s)?\\1\", text)\n return re.compile(text)", "def _createRegex(self, pattern):\n return '%s$' % pattern.replace( '*', '.*').replace( '?', '.')", "def compile_match(pattern):\n\n regexp = \"\"\n\n while pattern:\n if pattern.startswith(\"**\"):\n regexp += r'.*'\n pattern = pattern[2:]\n elif pattern[0] == \"*\":\n regexp += r'[^/]*/?'\n pattern = pattern[1:]\n elif pattern[0] == '[':\n regexp += r'['\n pattern = pattern[1:]\n\n while pattern and pattern[0] != ']':\n regexp += pattern[0]\n pattern = pattern[1:]\n\n pattern = pattern[1:]\n regexp += ']'\n\n else:\n regexp += re.escape(pattern[0])\n pattern = pattern[1:]\n\n regexp += \"$\"\n\n return re.compile(regexp, re.I)", "def _make_regex(self, *scopes):\n cmds = []\n # We go through all commands, and collect those\n # who are in one of the given scopes:\n for name in Cmd.commands:\n for scope in scopes:\n if Cmd.commands[name].scope == scope:\n cmds.append(name)\n # Build the regex using the the \"or\" operator\n cmd_list = '|'.join(cmd for cmd in cmds)\n regex = re.compile(\n \"^(?P<command>{})(?:\\s+(?P<arguments>.*))?$\".format(cmd_list)\n )\n return regex", "def re_compiler(self, pattern):\n try:\n return re.compile(pattern, self.re_flags)\n except Exception as exc: # pylint: disable=broad-except\n _log(\"error\", \"failed to compile pattern `%s`: %s\", pattern, exc)", "def regenerate_match_re(self):\n def find_broken_token_regex():\n \"\"\"Tries to find which token regex is broken.\n\n Returns:\n (str, str). Tuple of token name and token regex.\n \"\"\"\n trs = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n trs += r\"(?P<{}>{})\".format(token.name, token.pattern_str)\n try:\n re.compile(trs, re.MULTILINE)\n except Exception:\n return (token.name, token.pattern_str)\n trs += r\"|\"\n\n token_re_str = r\"\"\n for token in self.__table.values():\n if token.pattern_str: # Skip tokens with empty pattern\n token_re_str += r\"(?P<{}>{})|\".format(token.name, token.pattern_str)\n # Remove trailing '|'\n token_re_str = token_re_str[0:-1]\n # Finally try to compile the regex\n try:\n self.__token_re = re.compile(token_re_str, re.MULTILINE)\n except Exception as e:\n tb = sys.exc_info()[2]\n token_name, broken_regex = find_broken_token_regex()\n emsg = str(e) + \" With token '{}' and regexp: '{}' and whole regexp: {}\".format(token_name, broken_regex, token_re_str)\n raise TokenizerRegexpError(emsg).with_traceback(tb)", "def compile_response_regex(regexp):\n return re.compile(regexp, re.IGNORECASE | re.DOTALL)", "def _make_re_from_phrase(phrase):\n paragraph_text = r'(^.+\\w.+\\n)*' # need \\S to ensure not just whitespace\n\n # TODO: check slowdown due to inclusion of '^.*' at start\n tmp = '^.*' + re.escape(phrase) + r'.*\\n' + paragraph_text + r'\\s+'\n tmp = tmp.replace(\"\\\\ \", \"(\\\\s|\\\\n)*\")\n tmp = tmp.replace(\":\", \"(:|\\\\s|\\\\n)*\")\n return re.compile(tmp, re.I | re.M) # make it case insensitive", "def prepare_pattern(regex):\n version_regex = \"\"\n confidence_regex = \"\"\n search_regex = regex\n if '\\\\;' in regex:\n for reg in regex.split('\\\\;'):\n if 'version:' in reg:\n version_regex = rep_slashes(reg)\n elif 'confidence:' in reg:\n confidence_regex = rep_slashes(reg)\n else:\n search_regex = rep_slashes(reg)\n try:\n re.compile(search_regex, re.I)\n return search_regex, version_regex, confidence_regex\n except re.error as e:\n LOGGER.warning(f\"compiling regex: {regex} {e}\")\n # regex that never matches:\n # http://stackoverflow.com/a/1845097/413622\n return r'(?!x)x', \"\", \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Settingbased customizations; run when parsing begins.
def init_customizations(self, settings): if settings.pep_references: self.implicit_dispatch.append((self.patterns.pep, self.pep_reference)) if settings.rfc_references: self.implicit_dispatch.append((self.patterns.rfc, self.rfc_reference))
[ "def setup(self):\n # Call the base class setup first so that all of the variables are fully initialized and formatted.\n super().setup()\n\n # Write out the custom config\n self.writeCustomConfig()", "def __call__(self, iperf):\n self.validate()\n for key, value in self.settings.items():\n setattr(iperf, self.settings_map[key], value)\n return", "def reloadSettings(self) -> None:\n # Do basic inits.\n super().reloadSettings()\n # Bind methods.\n if self.use_pygments_styles:\n self.getDefaultFormat = QtGui.QTextCharFormat\n self.getFormat = self.getPygmentsFormat\n self.setFormat = self.setPygmentsFormat\n else:\n self.getDefaultFormat = self.getLegacyDefaultFormat\n self.getFormat = self.getLegacyFormat\n self.setFormat = self.setLegacyFormat", "def _prepare_settings(cls, settings):\n opt_params = cls.get_optional_params()\n for setting_name, description in opt_params.items():\n if setting_name not in settings:\n settings[setting_name] = description[2]", "def set_render_settings(self, setting=None):\n file_location = 'C:/Users/cmj140030/code/artist_tools/surface_tools/turntable_tool/render_settings.xml'\n\n if not os.path.isfile(file_location):\n IO.error(\"The file, %s, does not exist\" % file_location)\n\n xml_fh = et.parse(file_location)\n root = xml_fh.getroot()\n xml_nodes = root.iter(setting)\n if not xml_nodes:\n print 'I could not find any child nodes'\n\n for xml_node in xml_nodes:\n # Loops through the first indented item, example: Low\n settings = xml_node.getchildren()\n for set in settings:\n # setting = defaultArnoldRenderOptions\n attrs = set.getchildren()\n for attr in attrs:\n # attr = AASamples\n val = attr.attrib['value']\n if str(val).isdigit():\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),int(val))\n elif '.' in val and val.replace('.', '').isdigit():\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),float(val))\n elif '-' in val and val.replace('-', '').isdigit():\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),int(val))\n elif '-' and '.' in str(val):\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),float(val))\n elif '/' or '$' or '&' in str(val):\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),str(val),type=\"string\")\n elif str(val) == '':\n cmds.setAttr(\"%s.%s\" % (set.tag,attr.tag),'',type=\"string\")\n else:\n print 'The value is not valid'", "def __setstate__(self, d):\n d = param_aliases(d)\n try:\n load_options = Store.load_counter_offset is not None\n if load_options:\n matches = [k for k in d if k.startswith('_custom_option')]\n for match in matches:\n custom_id = int(match.split('_')[-1])\n if not isinstance(d[match], dict):\n # Backward compatibility before multiple backends\n backend_info = {'matplotlib':d[match]}\n else:\n backend_info = d[match]\n for backend, info in backend_info.items():\n if backend not in Store._custom_options:\n Store._custom_options[backend] = {}\n Store._custom_options[backend][Store.load_counter_offset + custom_id] = info\n\n d.pop(match)\n\n if d['id'] is not None:\n d['id'] += Store.load_counter_offset\n else:\n d['id'] = None\n except:\n self.warning(\"Could not unpickle custom style information.\")\n self.__dict__.update(d)", "def parse_settings(self):\n global DEBUG, LOGGING, G_SET_COMMAND_STRING\n global G_LOGGER, FILE_HANDLER, CONSOLE_HANDLER\n fname = os.path.join(PATH, \"general_settings\")\n if os.path.isfile(fname):\n general_settings_file = open(fname, \"r\")\n try:\n for line in general_settings_file:\n words = line.strip().split(\"=\")\n if words[0] == \"logging\":\n wrds1 = words[1].strip().lower()\n if wrds1 == \"true\":\n self.logging = True\n LOGGING = True\n DEBUG = True\n G_LOGGER = logging.getLogger(\"default\")\n G_LOGGER.setLevel(logging.INFO)\n # Install exception handler\n sys.excepthook = custom_exception_handler\n FILE_HANDLER = logging.FileHandler(\n \"{0}/{1}.log\".format(PATH, \"log\"),\n mode=\"w\")\n G_LOGGER.addHandler(FILE_HANDLER)\n CONSOLE_HANDLER = logging.StreamHandler()\n G_LOGGER.addHandler(CONSOLE_HANDLER)\n G_LOGGER.info(\"Enabled logging to file.\")\n elif words[0] == \"use hotkeys\":\n wrds1 = words[1].strip().lower()\n if wrds1 == \"true\":\n self.use_hotkeys = True\n else:\n self.use_hotkeys = False\n if DEBUG:\n G_LOGGER.info(\"use_hotkeys: %s\", self.use_hotkeys)\n elif words[0] == \"next wallpaper hotkey\":\n binding_strings = words[1].strip().split(\"+\")\n if binding_strings:\n self.hk_binding_next = tuple(binding_strings)\n if DEBUG:\n G_LOGGER.info(\"hk_binding_next: %s\", self.hk_binding_next)\n elif words[0] == \"pause wallpaper hotkey\":\n binding_strings = words[1].strip().split(\"+\")\n if binding_strings:\n self.hk_binding_pause = tuple(binding_strings)\n if DEBUG:\n G_LOGGER.info(\"hk_binding_pause: %s\", self.hk_binding_pause)\n elif words[0] == \"set_command\":\n G_SET_COMMAND_STRING = words[1].strip()\n self.set_command = G_SET_COMMAND_STRING\n elif words[0].strip() == \"show_help_at_start\":\n show_state = words[1].strip().lower()\n if show_state == \"false\":\n self.show_help = False\n else:\n pass\n else:\n G_LOGGER.info(\"Exception: Unkown general setting: %s\",\n words[0])\n finally:\n general_settings_file.close()\n else:\n # if file does not exist, create it and write default values.\n general_settings_file = open(fname, \"x\")\n general_settings_file.write(\"logging=false\\n\")\n general_settings_file.write(\"use hotkeys=true\\n\")\n general_settings_file.write(\"next wallpaper hotkey=control+super+w\\n\")\n self.hk_binding_next = (\"control\", \"super\", \"w\")\n general_settings_file.write(\"pause wallpaper hotkey=control+super+shift+p\\n\")\n self.hk_binding_pause = (\"control\", \"super\", \"shift\", \"p\")\n general_settings_file.write(\"set_command=\")\n general_settings_file.close()", "def update_override_settings(self, override_settings: dict) -> None:", "def settingsCollector(self):\n def converter(value, varType):\n if varType == float:\n value = float(value)\n elif varType == int:\n value = int(value)\n return value\n \n for setting in self.entries:\n settingType = self.settings.settings[setting].type\n value = self.entries[setting].get()\n self.settings.settings[setting].value = converter(value, settingType)\n for setting in self.buttons:\n value = self.buttons[setting].get()\n self.settings.settings[setting].value = value\n self.experimentName = self.nameEntry.get()", "def __init__(self, settings):\n ColorDefParser.__init__(self, settings)\n self._define_parser_colordef_attr()", "def _text2settings(self, color: str):\n t2xs = [\n (self.t2f, \"font\"),\n (self.t2s, \"slant\"),\n (self.t2w, \"weight\"),\n (self.t2c, \"color\"),\n ]\n # setting_args requires values to be strings\n\n default_args = {\n arg: getattr(self, arg) if arg != \"color\" else str(color) for _, arg in t2xs\n }\n\n settings = self._get_settings_from_t2xs(t2xs, default_args)\n settings.extend(self._get_settings_from_gradient(default_args))\n\n # Handle overlaps\n\n settings.sort(key=lambda setting: setting.start)\n for index, setting in enumerate(settings):\n if index + 1 == len(settings):\n break\n\n next_setting = settings[index + 1]\n if setting.end > next_setting.start:\n new_setting = self._merge_settings(setting, next_setting, default_args)\n new_index = index + 1\n while (\n new_index < len(settings)\n and settings[new_index].start < new_setting.start\n ):\n new_index += 1\n settings.insert(new_index, new_setting)\n\n # Set all text settings (default font, slant, weight)\n temp_settings = settings.copy()\n start = 0\n for setting in settings:\n if setting.start != start:\n temp_settings.append(TextSetting(start, setting.start, **default_args))\n start = setting.end\n if start != len(self.text):\n temp_settings.append(TextSetting(start, len(self.text), **default_args))\n settings = sorted(temp_settings, key=lambda setting: setting.start)\n\n line_num = 0\n if re.search(r\"\\n\", self.text):\n for start, end in self._find_indexes(\"\\n\", self.text):\n for setting in settings:\n if setting.line_num == -1:\n setting.line_num = line_num\n if start < setting.end:\n line_num += 1\n new_setting = copy.copy(setting)\n setting.end = end\n new_setting.start = end\n new_setting.line_num = line_num\n settings.append(new_setting)\n settings.sort(key=lambda setting: setting.start)\n break\n for setting in settings:\n if setting.line_num == -1:\n setting.line_num = line_num\n\n return settings", "def do_load_settings(self):\n return run_trigger('set', arg=self.profile)\n # return run_alfred(':fzyset {}'.format(self.profile))", "def customs(self, customs):\n\n self._customs = customs", "def apply_configuration(self):\n pass # pragma: no cover", "def _apply_configuration(self, terminal):\n terminal.set_colors(self._fg_color, self._bg_color, self._palette)\n terminal.set_font_scale(self._font_scale)\n if self._font_family:\n font = terminal.get_font()\n font.set_family(self._font_family)\n terminal.set_font(font)", "def _init_from_dict(self, settings: Settings) -> None:\n # The valid ivars and reasonable defaults.\n valid = dict(\n ignore_case=False,\n node_only=False,\n pattern_match=False,\n search_body=True,\n search_headline=True,\n suboutline_only=False, # Seems safest. # Was True !!!\n whole_word=True,\n )\n # Set ivars to reasonable defaults.\n for ivar in valid:\n setattr(self, ivar, valid.get(ivar))\n # Override ivars from settings.\n errors = 0\n for ivar in settings.keys():\n if ivar in valid:\n val = settings.get(ivar)\n if val in (True, False):\n setattr(self, ivar, val)\n else: # pragma: no cover\n g.trace(\"bad value: {ivar!r} = {val!r}\")\n errors += 1\n else: # pragma: no cover\n g.trace(f\"ignoring {ivar!r} setting\")\n errors += 1\n if errors: # pragma: no cover\n g.printObj(sorted(valid.keys()), tag='valid keys')", "def init_custom_fields(self):\n mapping = {\n 'application': self.config['sde_application'],\n 'project': self.config['sde_project'],\n 'context': self.config['alm_context']\n }\n\n config_custom_fields = ['alm_custom_fields']\n if self.feature_custom_lookup:\n config_custom_fields.append('alm_custom_lookup_fields')\n\n for config_option in config_custom_fields:\n self.transform_config_value(config_option, mapping)", "def __parseAllHelper( self, parsed ):\n parsedDict = vars(parsed)\n for name, obj in vars(self).iteritems():\n if isinstance( obj, ConfigHelper ):\n for var in obj.getOptions():\n key = \"%s.%s\" %( name,var )\n if key in parsedDict:\n try:\n obj.setOption( var, parsedDict[key] )\n except RuntimeError as e:\n self._errorMessages.append( \"ERROR: %s \" % e )", "def apply_customization(self, serializer, customization):\n # apply fields or exclude\n if customization.fields is not None:\n if len(customization.fields) == 0:\n # customization fields are empty, set Meta.fields to '__all__'\n serializer.Meta.fields = ALL_FIELDS\n else:\n serializer.Meta.fields = customization.fields\n if customization.exclude is not None:\n serializer.Meta.exclude = customization.exclude\n\n # apply extra_kwargs\n if customization.extra_kwargs is not None:\n serializer.Meta.extra_kwargs = customization.extra_kwargs\n\n # apply validate_methods\n for method_name, method in customization.validate_methods.items():\n setattr(serializer, method_name, method)", "def set_config(self, data: dict[str, str]) -> None:\n for key, value in data.items():\n if key not in self.config:\n raise CoreError(f\"unknown config: {key}\")\n self.custom_config[key] = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if inline markup startstring is 'quoted'. 'Quoted' in this context means the startstring is enclosed in a pair of matching opening/closing delimiters (not necessarily quotes) or at the end of the match.
def quoted_start(self, match): string = match.string start = match.start() if start == 0: # start-string at beginning of text return False prestart = string[start - 1] try: poststart = string[match.end()] except IndexError: # start-string at end of text return True # not "quoted" but no markup start-string either return punctuation_chars.match_chars(prestart, poststart)
[ "def quotedstart(self, match):\n string = match.string\n start = match.start()\n end = match.end()\n if start == 0: # start-string at beginning of text\n return 0\n prestart = string[start - 1]\n try:\n poststart = string[end]\n if self.inline.openers.index(prestart) \\\n == self.inline.closers.index(poststart): # quoted\n return 1\n except IndexError: # start-string at end of text\n return 1\n except ValueError: # not quoted\n pass\n return 0", "def is_quoted(string):\n string = string.lstrip()\n return ((string.startswith('\"') and string.endswith('\"')) or\n (string.startswith(\"'\") and string.endswith(\"'\")))", "def isQuotedString(self):\r\n return _osgDB.Field_isQuotedString(self)", "def test_contains_quoted_with_escaped_newline(self):\n\n self.assert_selector(\n self.MARKUP,\n 'body :-soup-contains(\"Test\\\\\\ning\")',\n ['1'],\n flags=util.HTML\n )", "def needs_quote(arg):\n for c in arg:\n if c in ('\"', \"'\"):\n return True\n if c.isspace():\n return True\n else:\n return False", "def test_complete_html_start_tag_with_single_attribute_with_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show = '1' >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 13", "def __processQuote(self, parentElem, lines, inList):\r\n dequoted = []\r\n i = 0\r\n blank_line = False # allow one blank line between paragraphs\r\n for line in lines:\r\n m = CORE_RE['quoted'].match(line)\r\n if m:\r\n dequoted.append(m.group(1))\r\n i += 1\r\n blank_line = False\r\n elif not blank_line and line.strip() != '':\r\n dequoted.append(line)\r\n i += 1\r\n elif not blank_line and line.strip() == '':\r\n dequoted.append(line)\r\n i += 1\r\n blank_line = True\r\n else:\r\n break\r\n\r\n blockquote = etree.SubElement(parentElem, \"blockquote\")\r\n\r\n self.parseChunk(blockquote, dequoted, inList)\r\n self.parseChunk(parentElem, lines[i:], inList)", "def is_quoted(self, *args) -> \"bool\":\n return _ida_pro.channel_redir_t_is_quoted(self, *args)", "def match_multiline(self, text, delimiter, in_state, style):\n\t\t# If inside triple-single quotes, start at 0\n\t\tif self.previousBlockState() == in_state:\n\t\t\tstart = 0\n\t\t\tadd = 0\n\t\t# Otherwise, look for the delimiter on this line\n\t\telse:\n\t\t\tstart = delimiter.indexIn(text)\n\t\t\t# Move past this match\n\t\t\tadd = delimiter.matchedLength()\n\n\t\t# As long as there's a delimiter match on this line...\n\t\twhile start >= 0:\n\t\t\t# Look for the ending delimiter\n\t\t\tend = delimiter.indexIn(text, start + add)\n\t\t\t# Ending delimiter on this line?\n\t\t\tif end >= add:\n\t\t\t\tlength = end - start + add + delimiter.matchedLength()\n\t\t\t\tself.setCurrentBlockState(0)\n\t\t\t# No; multi-line string\n\t\t\telse:\n\t\t\t\tself.setCurrentBlockState(in_state)\n\t\t\t\tlength = len(text) - start + add\n\t\t\t# Apply formatting\n\t\t\tself.setFormat(start, length, style)\n\t\t\t# Look for the next match\n\t\t\tstart = delimiter.indexIn(text, start + length)\n\n\t\t# Return True if still inside a multi-line string, False otherwise\n\t\tif self.currentBlockState() == in_state:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def is_multiline_string(self):\n return self.is_string and self.value.endswith(('\"\"\"', \"'''\"))", "def test_complete_html_start_tag_with_single_attribute():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show=1>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 8", "def test_tag_with_double_quote(self):\n code, out, err = self.t(\"start 1h ago 'this is a \\\"test\\\"'\")\n self.assertIn(\"Note: '\\\"this is a \\\\\\\"test\\\\\\\"\\\"' is a new tag\", out)\n self.t(\"stop\")\n self.t(\"delete @1\")", "def is_inline_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"inline\"", "def test_complete_html_start_tag_with_multiple_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show=1 maximize=1 opacity='70'>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 32", "def test_complete_html_start_tag_with_single_no_value_attributes_and_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 7", "def isNextString(self):\r\n reg = re.compile('^(\\\"[^\\\"]*\\\")', re.DOTALL)\r\n match = re.search(reg, self.lines)\r\n if match is None:\r\n return\r\n start , end = match.regs[0]\r\n if start == 0 and end != 0:\r\n self.token = self.lines[start+1:end-1]\r\n self.lines = self.lines[end:]\r\n self._tokenType = \"STRING_CONST\"\r\n return True", "def test_smart_complex_cases_star(self):\n\n self.check_markdown(\n '''\n ***I'm italic and bold* I am just bold.**\n\n ***I'm bold and italic!** I am just italic.*\n ''',\n '''\n <p><strong><em>I'm italic and bold</em> I am just bold.</strong></p>\n <p><em><strong>I'm bold and italic!</strong> I am just italic.</em></p>\n ''',\n True\n )", "def match(self, bot, user, msg, tag_info):\n cmd = msg.lower().strip()\n return cmd == \"!quote\" or cmd.startswith(\"!quote \")", "def test_complete_html_start_tag_with_single_no_value_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 6", "def test_contains_quoted_with_escaped_newline_with_carriage_return(self):\n\n self.assert_selector(\n self.MARKUP,\n 'body :-soup-contains(\"Test\\\\\\r\\ning\")',\n ['1'],\n flags=util.HTML\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check each of the patterns in `self.implicit_dispatch` for a match, and dispatch to the stored method for the pattern. Recursively check the text before and after the match. Return a list of `nodes.Text` and inline element nodes.
def implicit_inline(self, text, lineno): if not text: return [] for pattern, method in self.implicit_dispatch: match = pattern.search(text) if match: try: # Must recurse on strings before *and* after the match; # there may be multiple patterns. return (self.implicit_inline(text[:match.start()], lineno) + method(match, lineno) + self.implicit_inline(text[match.end():], lineno)) except MarkupMismatch: pass return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
[ "def _handleInline(self, line):\r\n\r\n if not(line):\r\n return [self.doc.createTextNode(' ')]\r\n\r\n for pattern in self.inlinePatterns:\r\n list = self._applyPattern( line, pattern)\r\n if list: return list\r\n\r\n return [self.doc.createTextNode(line)]", "def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l", "def __code_pattern_analyzer__(self):\n\n if self.get_pattern() is not None and len(self.get_pattern()) == len(self.get_pattern_seperator()):\n for i in range(len(self.get_pattern())):\n pattern_sep = str(self.get_pattern_seperator()[i]) if self.get_pattern_seperator()[i] else None\n data, pattern = condition_checker.check_condition(str(self.get_pattern()[i]), self.dataframe,\n pattern_sep)\n if self.get_run_pattern_match():\n self.__report_xlsx__(data, \"%s_pattern\" % self.get_pattern()[i])\n pattern.to_html(\"%s.html\" % os.path.join(self.report_path, self.get_pattern()[i] + \"Pivot_\" +\n self.get_timestamp()))\n else:\n print(\"The pattern input is expected to be list and should be of same length as pattern separators\")", "def explicit_construct(self, match):\r\n errors = []\r\n for method, pattern in self.explicit.constructs:\r\n expmatch = pattern.match(match.string)\r\n if expmatch:\r\n try:\r\n return method(self, expmatch)\r\n except MarkupError, error:\r\n lineno = self.state_machine.abs_line_number()\r\n message = ' '.join(error.args)\r\n errors.append(self.reporter.warning(message, line=lineno))\r\n break\r\n nodelist, blank_finish = self.comment(match)\r\n return nodelist + errors, blank_finish", "def process_paths(\n\t\ttext: str, pattern: str, process_match: Callable[[str], None],\n\t\treplacement: Union[str, Callable[..., str]]):\n\n\t# all the matching files in the given text\n\tfiles = re.findall(pattern, text)\n\n\t# breakpoint()\n\n\t# every one of them...\n\tfor file in files:\n\t\t# ...is processed\n\t\tprocess_match(file)\n\n\t# replacement of matches\n\treturn re.sub(pattern, replacement, text)", "def explicit_construct(self, match):\n errors = []\n for method, pattern in self.explicit.constructs:\n expmatch = pattern.match(match.string)\n if expmatch:\n try:\n return method(self, expmatch)\n except MarkupError, detail: # never reached?\n errors.append(\n self.statemachine.memo.reporter.warning('%s: %s'\n % (detail.__class__.__name__, detail)))\n break\n nodelist, blankfinish = self.comment(match)\n return nodelist + errors, blankfinish", "def parse(self, text):\n #: Do not process empty strings (Issue #3)\n if text.strip() == \"\":\n return \"\"\n #: Do not process strings consisting of a single punctuation mark (Issue #4)\n elif text.strip() in PUNCTUATION:\n _sym = text.strip()\n if _sym in tuple('.?!'):\n _tag = \".\"\n else:\n _tag = _sym\n if self.lemmata:\n return \"{0}/{1}/O/O/{0}\".format(_sym, _tag)\n else:\n return \"{0}/{1}/O/O\".format(_sym, _tag)\n if self.tokenize:\n _tokenized = \" \".join(self.tokenizer.tokenize(text))\n else:\n _tokenized = text\n\n _parsed = pattern_parse(_tokenized,\n # text is tokenized before it is passed on to\n # pattern.de.parse\n tokenize=False,\n tags=self.tags, chunks=self.chunks,\n relations=self.relations, lemmata=self.lemmata,\n encoding=self.encoding, tagset=self.tagset)\n if self.pprint:\n _parsed = pattern_pprint(_parsed)\n\n return _parsed", "def test_patterns2(text, patterns=[]):\n # look for each pattern in the text and print the results\n for pattern, desc in patterns:\n print 'Pattern %r (%s) \\n' % (pattern, desc)\n print ' %r' % text\n for match in re.finditer(pattern, text):\n s = match.start()\n e = match.end()\n prefix = ' ' * (s)\n print ' %s%s%s ' % (prefix, text[s:e], ' '*(len(text)-e)),\n print match.groups()\n if match.groupdict():\n print '%%s' % ( ' ' * (len(text)-s), match.groupdict())\n print\n return", "def test_patterns(text, patterns=[]):\n # Look for each pattern in the text and print the results\n for pattern, desc, in patterns:\n print \"Pattern %r (%s)\\n\" % (pattern, desc)\n print ' %r' % text\n for match in re.finditer(pattern, text):\n s = match.start()\n e = match.end()\n substr = text[s:e]\n n_backslashes = text[:s].count('\\\\')\n prefix = '.' * (s + n_backslashes)\n print ' %s%r' % (prefix, substr)\n print\n return", "def _render(self, tokens, options, env):\n pending_tags = []\n pending_content = [[]]\n for t, token in enumerate(tokens):\n if token.type == \"fence\": # Special case\n pending_content[-1].append(self.fence(tokens, t, options, env))\n elif token.tag != \"\":\n if not token.nesting: # Directly append to content\n c = [token.content] if token.content else []\n tag = getattr(dominate.tags, token.tag)\n tag = tag(*c) if token.attrs is None else tag(*c, **token.attrs)\n pending_content[-1].append(tag)\n elif len(pending_tags) > 0 and pending_tags[-1] == token.tag: # Closing tag\n t = pending_tags.pop()\n c = pending_content.pop()\n tag = getattr(dominate.tags, t)\n tag = tag(c) if token.attrs is None else tag(c, **token.attrs)\n pending_content[-1].append(tag)\n else: # Opening tag\n if token.tag == \"p\" and len(pending_tags) > 0 and pending_tags[-1] == \"li\":\n continue\n\n pending_tags.append(token.tag)\n pending_content.append([])\n elif token.children is not None:\n assert len(token.children) > 0\n pending_content[-1].extend(self._render(token.children, options, env))\n else:\n if not token.hidden:\n pending_content[-1].append(escapeHtml(token.content))\n\n assert len(pending_tags) == 0, pending_tags\n assert len(pending_content) == 1, pending_content\n\n return pending_content[-1]", "def getcallback(self, txt):\n\n for i in self.relist:\n try:\n result = re.search(i.compiled, txt)\n if result:\n return i\n except:\n pass", "def gather_statements(self, text):\r\n position = 0\r\n while position < len(text):\r\n # Get the next statement match\r\n match = _dsl.match(text, pos=position)\r\n\r\n if match is None:\r\n end_position = min(\r\n text.find('\\n', position) + 1,\r\n len(text)\r\n )\r\n msg = f\"Couldn't find a match at position {position}.\"\r\n msg += f\"Mis-match starts at {repr(text[position:end_position])}\"\r\n raise ValueError(msg)\r\n\r\n position = match.end()\r\n yield match.groupdict()", "def make_elements(tokens, text, start=0, end=None, fallback=None):\n # type: (List[Token], str, int, Optional[int], ElementType) -> List[InlineElement]\n result = [] # type: List[InlineElement]\n end = end or len(text)\n prev_end = start\n for token in tokens:\n if prev_end < token.start:\n result.append(fallback(text[prev_end : token.start])) # type: ignore\n result.append(token.as_element())\n prev_end = token.end\n if prev_end < end:\n result.append(fallback(text[prev_end:end])) # type: ignore\n return result", "def postprocess(self, block, switch=None):\n if constants.DETECT_RECURSION:\n assert not block.contains(self)\n \n # Pass the enclosing py:switch directive down in the hierarchy\n if isinstance(block, base_blocks.SwitchBlock):\n switch = block\n\n # Postprocess all the child blocks, it also allows for replacing them\n block.apply_transformation(self.postprocess, switch)\n\n # Collect py:when and py:otherwise directives for the enclosing py:switch one\n if isinstance(block, base_blocks.CaseBlock):\n assert switch, 'Found py:when directive without an enclosing py:choose on line #%d!' % block.lineno\n switch.when_blocks.append(block)\n return []\n if isinstance(block, base_blocks.OtherwiseBlock):\n assert switch, 'Found py:otherwise directive without an enclosing py:choose on line #%d!' % block.lineno\n switch.otherwise_blocks.append(block)\n return []\n\n # Mark the py:switch directive as \"prepared\" when all its children have been processed\n if isinstance(block, base_blocks.SwitchBlock):\n block.prepared = True\n \n # Do not escape the output of template functions defined in this template\n if isinstance(block, base_blocks.TextExpressionBlock):\n \n expression = block.data.strip()\n \n if expression.endswith(')'):\n \n function_name = expression.split('(', 1)[0].strip()\n \n if function_name == 'Markup':\n block = self.blocks_module.MarkupExpressionBlock(\n block.lineno, expression[7: -1].strip())\n \n if function_name in self.function_map:\n block = self.blocks_module.MarkupExpressionBlock(\n block.lineno, expression)\n \n elif expression == 'None':\n # Genshi converts None valued expressions to empty output\n return []\n \n # Finalize elements\n if isinstance(block, base_blocks.ElementBlock):\n \n if block.start_tag:\n \n # We can't shorten the element if there are any child elements\n # in it or we are outputting XHTML and this element does not\n # have a short form.\n # See also: http://www.w3.org/TR/xhtml1/#guidelines\n if (block.children or\n (self.output_standard == 'xhtml' and \n ':' not in block.data and\n block.data not in constants.SHORT_HTML_ELEMENTS_SET)):\n \n # Close start tag\n block.start_tag.append(\n self.blocks_module.MarkupBlock(block.lineno, u'>'))\n \n else:\n # Shorten the element\n block.start_tag.append(\n self.blocks_module.MarkupBlock(block.lineno, u' />'))\n block.end_tag = None\n \n return [block]", "def mark_text_lines(self, arrow, conditions_panels):\n fig = self.fig\n average_height = np.median([cc.height for cc in fig.connected_components])\n\n areas = [cc.area for cc in fig.connected_components]\n areas.sort()\n def condition1(cc): return cc.role != FigureRoleEnum.STRUCTUREAUXILIARY\n if arrow.is_vertical:\n def condition2(cc): return cc.top > arrow.top and cc.bottom < arrow.bottom\n else:\n def condition2(cc): return cc.left > arrow.left and cc.right < arrow.right\n\n condition = condition1 and condition2\n middle_pixel = arrow.center_px\n def distance_fn(cc): return 2.2 * cc.height\n core_ccs = find_nearby_ccs(middle_pixel, fig.connected_components, (3 * average_height, distance_fn),\n condition=condition)\n if not core_ccs:\n for pixel in arrow.pixels[::10]:\n core_ccs = find_nearby_ccs(pixel, fig.connected_components, (2 * average_height, distance_fn),\n condition=condition)\n if len(core_ccs) > 1:\n break\n else:\n log.warning('No conditions were found in the initial scan. Aborting conditions search...')\n return []\n\n if conditions_panels:\n for panel in conditions_panels:\n core_ccs += find_nearby_ccs(panel, fig.connected_components, (3 * average_height, distance_fn),\n condition=condition)\n\n conditions_region = Panel.create_megarect(core_ccs)\n\n cropped_region = Crop(erase_elements(fig, conditions_panels), conditions_region) # Do not look at structures\n\n text_lines = [TextLine(None, None, top, bottom, crop=cropped_region, anchor=anchor) for (top, bottom, anchor) in\n self.identify_text_lines(cropped_region)]\n\n text_lines = [text_line.in_main_figure for text_line in text_lines]\n\n return text_lines", "def match(self, text, pos, lno):\n mtch = self.pattern.match(text, pos)\n ret = []\n if self.next_rule is not None and mtch is not None:\n pos = 0\n for rule in self.next_rule:\n another_mtch, another_t = rule.match(mtch.group(), pos, 0)\n if another_mtch:\n ret.append(another_t)\n pos += len(another_mtch.group())\n else:\n if mtch:\n ret = mtch.group()\n else:\n ret = ''\n return mtch, Token(self.identifier, content=ret, position=pos, lineno=lno)", "def _parse(self, remaining_text, tree, frontier):\n\n # If the tree covers the text, and there's nothing left to\n # expand, then we've found a complete parse; return it.\n if len(remaining_text) == 0 and len(frontier) == 0:\n if self._trace:\n self._trace_succeed(tree, frontier)\n yield tree\n\n # If there's still text, but nothing left to expand, we failed.\n elif len(frontier) == 0:\n if self._trace:\n self._trace_backtrack(tree, frontier)\n\n # If the next element on the frontier is a tree, expand it.\n elif isinstance(tree[frontier[0]], Tree):\n yield from self._expand(remaining_text, tree, frontier)\n\n # If the next element on the frontier is a token, match it.\n else:\n yield from self._match(remaining_text, tree, frontier)", "def _speak_as(\n self,\n element,\n regular_expression,\n data_property_value,\n operation\n ):\n\n children = []\n pattern = re.compile(regular_expression)\n content = element.get_text_content()\n while content:\n matches = pattern.search(content)\n if matches is not None:\n index = matches.start()\n children = operation(content, index, children)\n\n new_index = index + 1\n content = content[new_index:]\n else:\n break\n if children:\n if content:\n children.append(self._create_content_element(\n content,\n data_property_value\n ))\n while element.has_children():\n element.get_first_node_child().remove_node()\n for child in children:\n element.append_element(child)", "def _parses(self, chart, start_sym, tree_class):\n return chart.parses(start_sym, tree_class=tree_class)", "def get_events_from_text_msg(self, text_msg):\n\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check attribution shape. Return the index past the end of the attribution, and the indent.
def check_attribution(self, indented, attribution_start): indent = None i = attribution_start + 1 for i in range(attribution_start + 1, len(indented)): line = indented[i].rstrip() if not line: break if indent is None: indent = len(line) - len(line.lstrip()) elif len(line) - len(line.lstrip()) != indent: return None, None # bad shape; not an attribution else: # return index of line after last attribution line: i += 1 return i, (indent or 0)
[ "def indirection_level(self):\n return self.ty.count(\"*\") + self.ty.count(\"[\")", "def getMarkPosition(self, i: int) -> int:\n ...", "def _indents(self, line) -> Tuple[int, int]:\n import re\n\n indent = len(re.match(r'( *)', line).group(1))\n list_match = re.match(r'( *)(([*\\-+>]+|\\w+\\)|\\w+\\.) +)', line)\n if list_match:\n sub_indent = indent + len(list_match.group(2))\n else:\n sub_indent = indent\n\n return indent, sub_indent", "def rowCheck(self, i):\n #row is list of tuples\n #row represents a row of pixels of a photo\n row = self.array[i]\n if row.count(self.outline) > self.size[0]/2:\n return (True, i)\n else: return (False,i)", "def calc_sag_offset_idx(self):\n return self.offset_pnt-1", "def _check_legal_index(self, row, col):\n return 0 <= row and row < self._size and\\\n 0 <= col and col < self._size", "def getAbsLineIndent(self,pointer):\n p=pointer\n while p>0 and self.pihCode[p]!=\"\\n\":\n p=p-1\n p=p+1\n indent=0\n while p<len(self.pihCode) and self.pihCode[p] in string.whitespace:\n p+=1\n indent+=1\n return indent", "def get_element_indent(file, encaps):\n line_nr = encaps.start.line - 1\n start_indent = get_indent_of_line(file, line_nr)\n if len(file[line_nr].rstrip()) <= encaps.start.column:\n indent = get_indent_of_line(file, line_nr + 1)\n else:\n indent = encaps.start.column\n\n indent = indent - start_indent if indent > start_indent else 0\n return indent", "def entDimPos(ent):\n return entDimString(ent), entPosString(ent)", "def test_offset():\n segmenter = NLTKSentencizer()\n text = ' This , text is... . Amazing !!'\n docs_chunks = segmenter.segment(np.stack([text, text]))\n for chunks in docs_chunks:\n assert len(chunks) - 1 == chunks[-1]['offset']", "def _beginningOfContent(line: str) -> int:\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0", "def check_shape(chunk, info):\n logger.warn(\"%s:%s[%d]: Checking shapes:\" % \n (os.path.basename(inspect.stack()[2].filename),\n inspect.stack()[2].function,\n inspect.stack()[2].lineno))\n if chunk:\n for key in chunk:\n logger.warn(\"%s %s\" % ( key, str(chunk[key].shape)))\n return chunk", "def offset_at_position(self):\n offset = 0\n for i, curr_line in enumerate(self.doc.iter_lines()):\n if i == self.line:\n break\n offset += len(curr_line)\n\n return offset + self.col", "def test_locations():\n segmenter = NLTKSentencizer()\n text = (\n \"This is a sentence. Here's another sentence. One more sentence? Aaand, yes, one more! \\n\"\n \"Lastly, this one is the last sentence.\"\n )\n docs_chunks = segmenter.segment(np.stack([text, text]))\n\n for chunks in docs_chunks:\n # first sentence should start at the first index or later\n assert chunks[0]['location'][0] >= 0\n # last sentence can not end at an index greater than the length of text\n assert chunks[-1]['location'][-1] <= len(text)\n # sentences beginning and ending indeces cannot overlap\n for i in range(1, len(chunks)):\n assert chunks[i]['location'][0] > chunks[i - 1]['location'][-1]", "def test_paragraph_offsets_present(self):\n text = \"This (a) is a good (b) test for (c) something like this.\"\"\"\n self.assertEqual((5, 19), self.regParser.paragraph_offsets(text, 0, 0))\n self.assertEqual((19, 32),\n self.regParser.paragraph_offsets(text, 0, 1))\n self.assertEqual((32, len(text)),\n self.regParser.paragraph_offsets(text, 0, 2))", "def _compute_position(input, index):\n line = 1\n col = 1\n eol = None # last end of line character\n for c in input[:index]:\n if c == '\\n' or c == '\\r':\n if eol is None or eol == c:\n eol = c\n line += 1\n col = 1\n else:\n # ignore second of '\\n\\r' and '\\r\\n' sequences\n eol = None\n else:\n col += 1\n return (line, col)", "def _get_accession_number(self, doc_description):\n print(\"doc_description: \", doc_description)\n access_num = None\n try:\n access_num = util.extract_text_between_expressions(doc_description, \"Acc-no:\", \"&nbsp\")\n except AssertionError:\n access_num = util.extract_text_between_expressions(doc_description, \"Acc-no: \", \" \")\n\n access_num = access_num.strip()\n assert len(access_num) == 20\n \n return access_num", "def _getIndice(self,center):\n # See the method addHealthCenter, the keys of self.centers\n # are simply 0, 1, 2, 3, ..., len(self.centers) - 1\n for index in self.centers.keys():\n if self.centers[index]==center:\n return index\n return -1", "def find_pos(self):\n self.y = 0\n for d in self.data:\n try:\n self.x = d.index('m')\n return\n except ValueError:\n self.y += 1", "def get_IA_position(self, maze):\n for y in range(len(maze)):\n for x in range(len(maze[y])):\n if maze[y][x] == self.letter:\n self.posx = x\n self.posy = y\n break\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check validity based on the ordinal value and the second line. Return true if the ordinal is valid and the second line is blank, indented, or starts with the next enumerator or an autoenumerator.
def is_enumerated_list_item(self, ordinal, sequence, format): if ordinal is None: return None try: next_line = self.state_machine.next_line() except EOFError: # end of input lines self.state_machine.previous_line() return 1 else: self.state_machine.previous_line() if not next_line[:1].strip(): # blank or indented return 1 result = self.make_enumerator(ordinal + 1, sequence, format) if result: next_enumerator, auto_enumerator = result try: if ( next_line.startswith(next_enumerator) or next_line.startswith(auto_enumerator) ): return 1 except TypeError: pass return None
[ "def validate_code_2(line: str) -> bool:\n try:\n (rules, code) = line.split(\":\")\n code = code.strip()\n except:\n return False\n\n ch = rules[-1]\n first, last = rules.split(\"-\")\n first = int(first) - 1\n last = int(last.split()[0]) - 1\n\n if first == last:\n print(\"cheeky\")\n return code[first] == ch\n\n # Use != between booleans for xor\n return (code[first] == ch) != (code[last] == ch)", "def is_valid(self):\n # Task 4.6\n if self.statement.conclusion != self.lines[-1].conclusion:\n return False\n for i in range(len(self.lines)):\n _map = {}\n if self.lines[i].justification is not None:\n if len(self.lines[i].justification) > i:\n return False\n if self.lines[i].rule is None:\n continue\n instance_i = self.instance_for_line(i)\n if not instance_i.is_instance_of(self.rules[self.lines[i].rule], _map):\n return False\n return True", "def _is_ingredient_heading_2(line):\n return line.strip() == '-------- ------------ --------------------------------'", "def isValidCursor(cursor):\n if not isinstance(cursor, dict):\n return False\n\n if set(cursor) != set([\"pos\", \"tail\", \"desiredCol\"]):\n return False\n\n if not isinstance(cursor[\"pos\"], list) or len(cursor[\"pos\"]) != 2:\n return False\n\n if not isinstance(cursor[\"tail\"], list) or len(cursor[\"tail\"]) != 2:\n return False\n\n for v in cursor[\"pos\"] + cursor[\"tail\"] + [cursor[\"desiredCol\"]]:\n if not isinstance(v, int) or v < 0:\n return False\n\n return True", "def validate_code(line: str) -> bool:\n try:\n (rules, code) = line.split(\":\")\n except:\n return False\n\n ch = rules[-1]\n min_occ, max_occ = rules.split(\"-\")\n min_occ = int(min_occ)\n max_occ = int(max_occ.split()[0])\n\n return code.count(ch) >= min_occ and code.count(ch) <= max_occ", "def _is_valid(self, value):\n # Check the instance has a 'choices' attribute\n if not hasattr(self, \"choices\"):\n raise Exception(\"A 'choices' attribute is needed by an 'Enum' \"\n \"control.\")\n if not isinstance(self.choices, tuple):\n raise Exception(\"A tuple of 'choices' is needed by an 'Enum' \"\n \"control.\")\n\n # If the value is not defined the control is valid or\n # check if the new value is in the enumerate structure\n if value is None or value in self.choices:\n return True\n else:\n return False", "def is_line_valid(self, line_number):\n # Task 4.6b\n if self.lines[line_number].is_assumption():\n if self.lines[line_number].formula in self.statement.assumptions:\n return True\n return False\n if self.lines[line_number].rule not in self.rules:\n return False\n if self.rule_for_line(line_number).is_specialization_of(self.lines[line_number].rule):\n for assumption in self.lines[line_number].assumptions:\n if line_number <= assumption:\n return False\n return True\n return False", "def validate_line_number(list_it, n):\n list_it.reset()\n\n # Find the length of the list\n length = get_length_of_list(list_it)\n\n # Check whether n is a valid line number\n return n <= length and n >= 1", "def is_valid_path(self, idx):\n start_idx = self.get_path_start(idx)\n valid = start_idx != self.INVALID_IDX\n return valid", "def _check_valid_indentation(self, lineno: int, line: str, left_stripped: str) -> None:\n if linelen := len(line):\n indent = linelen - len(left_stripped)\n expected_ind = 0 if line.startswith(('.', '+', '-', '$')) else self.indent\n if indent != expected_ind:\n diag = self.diags.indentation\n loc = self.make_source_range(' ' * indent, line, lineno)\n mess = f'Invalid indentation ({indent}), all regular (non-empty, non-parameter, non-seealso) text must be indented to {self.indent} columns'\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, loc, patch=Patch(loc, ' ' * expected_ind)\n )\n return", "def _check_valid_section_spacing(self, prevline: str, lineno: int) -> None:\n if prevline and not prevline.isspace():\n loc = self.make_source_range('', '', lineno)\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, self.diags.section_spacing,\n 'Missing empty line between sections, must have one before this section',\n loc, highlight=False, patch=Patch(loc, '\\n')\n )\n return", "def valid_move(self, next_loc: tuple) -> bool:\n cur_loc = self._location\n \n # handle blue piece moves\n if self._color == 'blue':\n\n # one space forward is valid\n if cur_loc[0] - next_loc[0] == 1 and next_loc[1] == cur_loc[1]:\n return True\n \n # one space sideways is valid\n elif abs(next_loc[1] - cur_loc[1]) == 1 and next_loc[0] == cur_loc[0]:\n return True\n \n # handle palace movement rules\n elif next_loc == (1, 4) and (cur_loc == (2, 3) or cur_loc == (2, 5)):\n return True\n elif cur_loc == (1, 4) and (next_loc == (0, 3) or next_loc == (0, 5)):\n return True\n return False\n \n # handle red pieces\n else:\n if next_loc[0] - cur_loc[0] == 1 and next_loc[1] == cur_loc[1]:\n return True\n elif abs(next_loc[1] - cur_loc[1]) == 1 and next_loc[0] == cur_loc[0]:\n return True\n \n # palace movements\n elif next_loc == (8, 4) and (cur_loc == (7, 3) or cur_loc == (7, 5)):\n return True\n elif cur_loc == (8, 4) and (next_loc == (9, 3) or next_loc == (9, 5)):\n return True\n return False", "def is_short_line_at(self, brd, x, y, dx, dy):\n # Avoid out-of-bounds errors\n if ((x + (brd.n - 1) * dx >= brd.w) or\n (y + (brd.n - 1) * dy < 0) or (y + (brd.n - 1) * dy >= brd.h)):\n return False\n # Get token at (x,y)\n t = brd.board[y][x]\n if t == 0:\n return False\n # Go through elements\n\n if not self.check_space_after: # We ONLY care if there is a blank space at end\n return False\n\n # Accounts for lines that are split in the middle, like 1 1 0 1\n split = False\n\n # Special check for horizontal: we don't care if there is 1 1 0 1 if the column below 0 is empty\n\n for i in range(1, brd.n):\n symbol = brd.board[y + i * dy][x + i * dx]\n if symbol != t:\n if symbol == 0:\n if y >= 1:\n if dx == 1 and dy == 0: # We don't care if this split line is unplayable, checks that\n if brd.board[y - 1][x + i] == 0:\n return False\n if split:\n return False\n if not split:\n split = True\n else:\n return False\n return True", "def hasIndentation(self, indentation, line):\n\t\n\t\treturn indentation == self.getIndentation(line)", "def isEnd(self, line):\r\n return self.startsWithAttribute(line)", "def _should_skip(self, line):\r\n return self._is_empty_line(line) or\\\r\n self._is_comment_line(line) or\\\r\n self._is_group_header_line(line) or\\\r\n self.delimiter not in line", "def check_next(self, token):\n\n if len(self.tokens) > 1 and self.tokens[1][0] == token:\n return True\n else:\n return False", "def is_line_valid(self, line):\n ret_val = self._is_line_valid(line)\n return ret_val", "def _check_valid_docstring_spacing(self) -> None:\n if self.Modifier.FLOATING in self.type_mod:\n return # floating docstring sections need not be checked for this\n\n end_line = self.extent.end.line + 1\n cursor_start = self.cursor.extent.start\n if end_line != cursor_start.line:\n # there is at least 1 (probably empty) line between the comment end and whatever it\n # is describing\n diag = self.diags.symbol_spacing\n mess = 'Invalid line-spacing between docstring and the symbol it describes. The docstring must appear immediately above its target'\n eloc = self.make_source_range('', '', end_line)\n floc = SourceRange.from_locations(self.make_source_location(end_line, 1), cursor_start)\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, eloc, highlight=False, patch=Patch(floc, '')\n )\n return", "def is_valid(self, node, lint_context):\n\n node_type = NodeType(node['type'])\n\n if node_type is NodeType.OPTION:\n # Remove & at head\n option_name = node['value'][1:]\n is_valid = option_name not in Abbreviations\n\n if not is_valid:\n self._make_description_by_option_name(option_name)\n\n return is_valid\n\n excmd_node = node\n is_set_cmd = excmd_node['ea']['cmd'].get('name') in SetCommandFamily\n\n if not is_set_cmd:\n return True\n\n option_expr = excmd_node['str'].split()[1]\n # Care `:set ft=vim` and `:set cpo&vim`, ...\n option_name = re.match(r'[a-z]+', option_expr).group(0)\n\n # After a \"set\" command, we can add an invert prefix \"no\" and \"inv\"\n # to options. For example, \"nowrap\" is an inverted option \"wrap\".\n is_valid = option_name not in AbbreviationsIncludingInvertPrefix\n\n if not is_valid:\n self._make_description_by_option_name(option_name)\n\n return is_valid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct and return the next enumerated list item marker, and an autoenumerator ("" instead of the regular enumerator). Return ``None`` for invalid (out of range) ordinals.
def make_enumerator(self, ordinal, sequence, format): #" if sequence == '#': enumerator = '#' elif sequence == 'arabic': enumerator = str(ordinal) else: if sequence.endswith('alpha'): if ordinal > 26: return None enumerator = chr(ordinal + ord('a') - 1) elif sequence.endswith('roman'): try: enumerator = roman.toRoman(ordinal) except roman.RomanError: return None else: # shouldn't happen raise ParserError('unknown enumerator sequence: "%s"' % sequence) if sequence.startswith('lower'): enumerator = enumerator.lower() elif sequence.startswith('upper'): enumerator = enumerator.upper() else: # shouldn't happen raise ParserError('unknown enumerator sequence: "%s"' % sequence) formatinfo = self.enum.formatinfo[format] next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix + ' ') auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' ' return next_enumerator, auto_enumerator
[ "def get_next_item(self):\n return # osid.assessment.Item", "def test_incorrect_start(start):\n with raises(TypeError):\n next(ienumerate([21], start))", "def get_next_id(self):\n try:\n next_item = next(self)\n except StopIteration:\n raise IllegalState('no more elements available in this list')\n except Exception: # Need to specify exceptions here!\n raise OperationFailed()\n else:\n return next_item", "def next( self ):\n\n try: \n value = self.__sequence[ self.__nextValue ]\n except IndexError:\n raise StopIteration\n else:\n self.__nextValue += 1\n return value", "def nextname(self, ea, ui=True):\n # don't count this item\n ea = Data.Data(ea).ea + Data.Data(ea).getSize()\n output = idaapi.BADADDR\n while ea < self.end_ea:\n d = Data.Data(ea)\n if d.getName():\n output = ea\n break\n ea += d.getSize()\n if ui: idaapi.jumpto(ea)\n return '%07X' % output", "def nextimmref(self, ea, ui=True):\n # don't count this item\n ea = Data.Data(ea).ea + Data.Data(ea).getSize()\n output = idaapi.BADADDR\n while ea < self.end_ea:\n d = Data.Data(ea)\n if d.isCode() and '#' in d.getOrigDisasm():\n disasm = d.getOrigDisasm()\n # check out the xrefs from the data, see if it references to them\n xrefs = d.getXRefsFrom()\n for xref in xrefs[0]:\n if Data.Data(xref).getName() in disasm:\n output = ea\n break\n for xref in xrefs[1]:\n if Data.Data(xref).getName() in disasm:\n output = ea\n break\n if output != idaapi.BADADDR:\n break\n ea += d.getSize()\n if ui: idaapi.jumpto(ea)\n return '%07X' % output", "def next(iterator, default=None): # real signature unknown; restored from __doc__\n pass", "def enumerate1(it):\n return ((n+1, x) for n, x in enumerate(it))", "def advance(self):\n assert(self.get_dotted_symbol() is not None)\n\n return LR1Item(self.p,\n self.index + 1,\n self.lookahead_set)", "def __next__(self):\r\n\t\tif self.postion >= len(self.letters):\r\n\t\t\traise StopIteration\r\n\t\tletter = self.letters[self.postion]\r\n\t\tself.postion += 1\r\n\t\treturn letter", "def getNextItem(self):\n if len(self.items) > 0:\n return self.items.pop(0)\n return None", "async def next(\n itr: AnyIterator[T1], default: Union[T2, Sentinel] = Sentinel.MISSING\n) -> Union[T1, T2]:\n try:\n if isinstance(itr, AsyncIterator):\n return await itr.__anext__()\n\n try:\n return builtins.next(itr)\n except StopIteration:\n raise StopAsyncIteration\n except StopAsyncIteration:\n if default is Sentinel.MISSING:\n raise\n return default", "def next(self):\n if self.next_value is not None:\n next_value = self.next_value\n self.next_value = None\n return next_value\n else:\n return next(self.iterator)", "def nextval(iterable, value):\n i = iterable.index(value)\n i = 0 if i >= lastind(iterable) else i+1\n return iterable[i]", "def next_inventory_item(self):\n self.current_item_index = (self.current_item_index + 1) % len(self.inventory.items)\n self.current_item = self.inventory.get_item_name(self.current_item_index)", "def select_next_item(self) -> int:\n if not self.is_opened():\n self.open()\n self.selected_item += 1\n if self.selected_item == len(self.items):\n self.selected_item = -1\n return self.selected_item", "def get_next_letter(coll: Collection, letter: str='r') -> int:\n s = list(map(lambda x: int(x[8:]), coll.distinct('label')))\n s.sort()\n return s[-1] + 1 if s else letter", "def next(self):\n self.assert_open()\n if not infolist_next(self._infolist):\n raise StopIteration()\n self._old_item.close()\n new_item = InfoListItem(self, infolist_fields(self._infolist))\n self._old_item = new_item\n return new_item", "def get_next_items(self, n):\n return # osid.assessment.Item", "def next(self):\n\n try:\n next_token = next(self.token_generator)\n # print(n)\n return next_token\n except StopIteration:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract & return field name from a field marker match.
def parse_field_marker(self, match): field = match.group()[1:] # strip off leading ':' field = field[:field.rfind(':')] # strip off trailing ':' etc. return field
[ "def parse_field_marker(self, match):\n field = match.string[1:] # strip off leading ':'\n field = field[:field.find(':')] # strip off trailing ':' etc.\n tokens = field.split()\n return tokens[0], tokens[1:] # first == name, others == args", "def _get_field_name(cls, rule_content):\n return rule_content.get(cls.fieldname, None)", "def getFieldName(self, *args) -> \"PyObject *\":\n return _coin.SoFieldContainer_getFieldName(self, *args)", "def _get_field_name(self):\n return self.field_name", "def __extractField(self, raw: dict, name: str):\n if not 'fields' in raw:\n return None\n fields = raw['fields']\n if not name in fields:\n return None\n return fields[name]", "def _get_field(self, line):\n field_name, _ = line.split(\",\", 1)\n field_name = field_name.strip()\n return field_name", "def extract_name(key):\n # we assume that the \"key\" is like \"(Full Name, blah...)\"\n fields = key.lstrip('(').rstrip(')').split(',')\n return fields[0]", "def get_field_name(content_disposition: str) -> str:\n parts = content_disposition.split(';')\n for part in parts:\n part_stripped = part.strip()\n search_result = re.search(\"^name=\\\"(.*)\\\"$\", part_stripped)\n if search_result:\n return search_result.group(1)", "def field_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"field_name\")", "def get_field(self, field_name):\n\n field_names = field_name.split('.')\n return _find_field(self.__msg, field_names)", "def parseField(self, text):\n fieldMatch = _fieldPartRe.match(text)\n if fieldMatch:\n modifier = fieldMatch.group(1)\n fieldName = fieldMatch.group(2)\n try:\n if not modifier:\n return self.fieldDict[fieldName]\n elif modifier == '*' * len(modifier):\n return fieldformat.AncestorLevelField(fieldName,\n len(modifier))\n elif modifier == '?':\n return fieldformat.AnyAncestorField(fieldName)\n elif modifier == '&':\n return fieldformat.ChildListField(fieldName)\n elif modifier == '#':\n match = _levelFieldRe.match(fieldName)\n if match and match.group(1) != '0':\n level = int(match.group(1))\n return fieldformat.DescendantCountField(fieldName,\n level)\n elif modifier == '!':\n return (self.parentFormats.fileInfoFormat.\n fieldDict[fieldName])\n except KeyError:\n pass\n return text", "def __getField(self, record, field):\n\t\t(offset, length) = (self.allFields[field].ffOffset, self.allFields[field].maxlength)\n\t\treturn record[offset:offset+length].strip()", "def _get_field(extras: dict, field_name: str):\n backcompat_prefix = \"extra__dataprep__\"\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix \"\n \"when using this method.\"\n )\n if field_name in extras:\n return extras[field_name] or None\n prefixed_name = f\"{backcompat_prefix}{field_name}\"\n return extras.get(prefixed_name) or None", "def _extract_field_with_regex(self, field):\n matched = re.search(field, self.text)\n if not matched:\n err_msg = f\"Failed to extract data with regex! => {field}\\n\"\n err_msg += f\"response body: {self.text}\\n\"\n logger.error(err_msg)\n raise exceptions.ExtractFailure(err_msg)\n\n return matched.group(1)", "def name(field: BaseField) -> str:\n return field.NAME", "def try_get_field(self, field_name: str) -> Optional[fields.Field]:\n prefix = field_name.split(\"$\")[0]\n if prefix not in self.field_prefix_map:\n return None\n\n field = self.field_prefix_map[prefix]\n if isinstance(field, fields.BaseTemplateField):\n # We use the regex here since we want to also match template fields.\n if \"$\" in field_name and not re.match(field.get_regex(), field_name):\n return None\n return field", "def GetNamedFieldInformation(self, vtkInformation, p_int, string):\n ...", "def _get_field_by_name(model, field):\n field_dict = {x.name: x for x in model._meta.get_fields()} # noqa\n return field_dict[field]", "def get_field(self, name):\n return self._fields[name]", "def field(self, tag):\n return self[self.index(tag)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of `node.option` and `node.option_argument` objects, parsed from an option marker match.
def parse_option_marker(self, match): optlist = [] optionstrings = match.group().rstrip().split(', ') for optionstring in optionstrings: tokens = optionstring.split() delimiter = ' ' firstopt = tokens[0].split('=', 1) if len(firstopt) > 1: # "--opt=value" form tokens[:1] = firstopt delimiter = '=' elif (len(tokens[0]) > 2 and ((tokens[0].startswith('-') and not tokens[0].startswith('--')) or tokens[0].startswith('+'))): # "-ovalue" form tokens[:1] = [tokens[0][:2], tokens[0][2:]] delimiter = '' if len(tokens) > 1 and (tokens[1].startswith('<') and tokens[-1].endswith('>')): # "-o <value1 value2>" form; join all values into one token tokens[1:] = [' '.join(tokens[1:])] if 0 < len(tokens) <= 2: option = nodes.option(optionstring) option += nodes.option_string(tokens[0], tokens[0]) if len(tokens) > 1: option += nodes.option_argument(tokens[1], tokens[1], delimiter=delimiter) optlist.append(option) else: raise MarkupError( 'wrong number of option tokens (=%s), should be 1 or 2: ' '"%s"' % (len(tokens), optionstring)) return optlist
[ "def argv(self):\n optlist = []\n for n in range(self.count):\n optlist.append(self.flag)\n if self.values is not None:\n optlist.append(self.values[n])\n return optlist", "def ParseOptions():\n parser = optparse.OptionParser()\n parser.add_option('--version_file', dest='version_file')\n parser.add_option('--outdir', dest='outdir')\n\n (options, _) = parser.parse_args()\n return options", "def getValuesFromOption(opt, cmd):\n values = []\n pat = opt + r'\\s*([\\w\\_\\/\\\\]+)'\n m = re.search(pat, cmd)\n while m:\n # found\n val = m.group(1)\n values.append(val)\n # remove the option-value pair\n cmd = re.sub(opt + '\\s*' + val, '', cmd)\n m = re.search(pat, cmd)\n return values", "def commandLineOptionsToList(stringOptions):\n return stringOptions.split()", "def options(self) -> List[OptionInfo]:\n return []", "def multiget(self, option, section = None):\n\n matches = []\n if section is None:\n section = self.default_section\n if self.cfg.has_option(section, option):\n yield self.cfg.get(section, option)\n option += \".\"\n matches = [o for o in self.cfg.options(section)\n if o.startswith(option) and o[len(option):].isdigit()]\n matches.sort()\n for option in matches:\n yield self.cfg.get(section, option)", "def _do_arg_parse(parser):\n m_index = index_(\"-m\", sys.argv[1:])\n c_index = index_(\"-c\", sys.argv[1:])\n\n if m_index == NOT_IN_LIST and c_index == NOT_IN_LIST:\n args_to_parse = sys.argv[1:]\n remainder = None\n\n elif m_index < c_index:\n args_to_parse = sys.argv[1:m_index+3]\n remainder = sys.argv[m_index+3:]\n\n elif c_index < m_index:\n args_to_parse = sys.argv[1:c_index+3]\n remainder = sys.argv[c_index+3:]\n\n opts = parser.parse_args(args_to_parse)\n\n if remainder is None:\n remainder = opts.remainder\n\n return opts, remainder", "def read_options(data: Any) -> List[CommandOption]:\n def __fn(doc_option: Dict[str, Any]) -> CommandOption:\n name = doc.read(doc_option, \"name\", doc.typed(str))\n description = doc.read(doc_option, \"description\", doc.typed(str))\n validate_meta(MetaType.OPTION, name, description)\n kind_key = doc.read(doc_option, \"type\", doc.typed(Union[str, int]))\n try:\n kind = CommandOptionType.from_str(kind_key) if isinstance(kind_key, str) else CommandOptionType(kind_key)\n except ValueError as err:\n raise InvalidOptionTypeError(\" \".join(\n [f\"{kind_key} is not a valid command option type.\",\n \"It must be between 1 and 9 (both inclusive).\"])) from err\n option = {\n \"name\": name,\n \"description\": description,\n \"type\": kind.value,\n }\n doc.read(doc_option, \"required\", doc.typed(bool, optional=True), to=option)\n if \"choices\" in doc_option and isinstance(doc_option[\"choices\"], list):\n choices = []\n for doc_choice in doc_option[\"choices\"]:\n if isinstance(doc_choice, (str, int)):\n doc_choice = {\"name\": doc_choice}\n if not isinstance(doc_choice, dict):\n raise doc.ValueTypeError(f\"Choice of unexpected type '{type(doc_choice).__name__}'\")\n choice_name = doc.read(doc_choice, \"name\", doc.typed(str))\n validate_length(\"Choice name\", choice_name)\n choices.append({\n \"name\": choice_name,\n \"value\": doc.read(doc_choice, [\"value\", \"name\"], doc.typed(Union[str, int]))\n })\n option[\"choices\"] = choices\n doc.read(doc_option, \"options\", doc.with_default(read_options, None), to=option)\n return option # type: ignore\n\n return __read_list_or_keyed(\"Option\", data, __fn)", "def launcher_argv(self, is_geopmctl):\n result = []\n result.extend(self.num_node_option())\n result.extend(self.exclude_list_option())\n result.extend(self.num_rank_option(is_geopmctl))\n if self.config and self.config.do_affinity:\n result.extend(self.affinity_option(is_geopmctl))\n result.extend(self.preload_option())\n result.extend(self.timeout_option())\n result.extend(self.time_limit_option())\n result.extend(self.job_name_option())\n result.extend(self.node_list_option())\n result.extend(self.host_file_option())\n result.extend(self.partition_option())\n result.extend(self.reservation_option())\n result.extend(self.performance_governor_option())\n return result", "def options(self, section: str) -> List[str]:", "def get_option_values(self):\n \n class CommandLineOptions(object):\n def __getattr__(self, name):\n # if an attribute can not be found, this is the last function called\n all_option_names=\", \".join(vars(self).keys())\n error_message=\"Unable to find option '{0}' in command line options.\\n\".format(name)\n error_message+=\"The available options are: {0}\".format(all_option_names)\n raise AttributeError(error_message)\n \n # get arguments from the command line (will not run again if already parsed)\n if not self._user_asked:\n self.ask_user()\n \n args=CommandLineOptions()\n for option in list(self._user_arguments.keys()) + list(self._arguments.keys()):\n option = re.sub(r'-', '_', option)\n value = self.get(option)\n setattr(args,option,value)\n \n return args", "def extract_option(L):\n for k in xrange(len(L)):\n e = L[k]\n if e[0] == '-':\n if e[1] == 'L': return ('L',None)\n try:\n return (e[1],L[k+1])\n except:\n print 'no data with option', e[1]\n return None\n return None", "def GetCommandLineOptions(self):\n return self.args_", "def gen_command_options(self, command):\n for option in self.opts[command]:\n yield option", "def _parse_arguments(\n arguments_ast: Optional[List[dict]]\n) -> List[\"ArgumentNode\"]:\n if arguments_ast:\n return [_parse_argument(argument) for argument in arguments_ast]\n return []", "def parse_extension_options(self, option_spec, datalines):\r\n node = nodes.field_list()\r\n newline_offset, blank_finish = self.nested_list_parse(\r\n datalines, 0, node, initial_state='ExtensionOptions',\r\n blank_finish=True)\r\n if newline_offset != len(datalines): # incomplete parse of block\r\n return 0, 'invalid option block'\r\n try:\r\n options = utils.extract_extension_options(node, option_spec)\r\n except KeyError, detail:\r\n return 0, ('unknown option: \"%s\"' % detail.args[0])\r\n except (ValueError, TypeError), detail:\r\n return 0, ('invalid option value: %s' % ' '.join(detail.args))\r\n except utils.ExtensionOptionError, detail:\r\n return 0, ('invalid option data: %s' % ' '.join(detail.args))\r\n if blank_finish:\r\n return 1, options\r\n else:\r\n return 0, 'option data incompletely parsed'", "def visitOption(self, opt, visitor):\n for arg in opt.args:\n arg.accept(visitor)", "def all_flag_args(flags, key, argc):\n match_indices = filter(lambda i: flags[i] == key, range(len(flags)))\n if argc == 1:\n return [flags[i + 1] for i in match_indices]\n elif argc > 1:\n return [flags[i + 1 : i + argc + 1] for i in match_indices]", "def match_options(self): # pragma: no cover", "def visitAll(self, visitor):\n for opt in self.m.options:\n for arg in opt.args:\n arg.accept(visitor)\n \n self.optionParam.accept(visitor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a directive then run its directive function.
def run_directive(self, directive, match, type_name, option_presets): if isinstance(directive, (FunctionType, MethodType)): from docutils.parsers.rst import convert_directive_function directive = convert_directive_function(directive) lineno = self.state_machine.abs_line_number() initial_line_offset = self.state_machine.line_offset indented, indent, line_offset, blank_finish \ = self.state_machine.get_first_known_indented(match.end(), strip_top=0) block_text = '\n'.join(self.state_machine.input_lines[ initial_line_offset : self.state_machine.line_offset + 1]) try: arguments, options, content, content_offset = ( self.parse_directive_block(indented, line_offset, directive, option_presets)) except MarkupError as detail: error = self.reporter.error( 'Error in "%s" directive:\n%s.' % (type_name, ' '.join(detail.args)), nodes.literal_block(block_text, block_text), line=lineno) return [error], blank_finish directive_instance = directive( type_name, arguments, options, content, lineno, content_offset, block_text, self, self.state_machine) try: result = directive_instance.run() except docutils.parsers.rst.DirectiveError as error: msg_node = self.reporter.system_message(error.level, error.msg, line=lineno) msg_node += nodes.literal_block(block_text, block_text) result = [msg_node] assert isinstance(result, list), \ 'Directive "%s" must return a list of nodes.' % type_name for i in range(len(result)): assert isinstance(result[i], nodes.Node), \ ('Directive "%s" returned non-Node object (index %s): %r' % (type_name, i, result[i])) return (result, blank_finish or self.state_machine.is_next_line_blank())
[ "def run_directive(self, directive, match, type_name, option_presets):\r\n if isinstance(directive, (FunctionType, MethodType)):\r\n from docutils.parsers.rst import convert_directive_function\r\n directive = convert_directive_function(directive)\r\n lineno = self.state_machine.abs_line_number()\r\n initial_line_offset = self.state_machine.line_offset\r\n indented, indent, line_offset, blank_finish \\\r\n = self.state_machine.get_first_known_indented(match.end(),\r\n strip_top=0)\r\n block_text = '\\n'.join(self.state_machine.input_lines[\r\n initial_line_offset : self.state_machine.line_offset + 1])\r\n try:\r\n arguments, options, content, content_offset = (\r\n self.parse_directive_block(indented, line_offset,\r\n directive, option_presets))\r\n except MarkupError, detail:\r\n error = self.reporter.error(\r\n 'Error in \"%s\" directive:\\n%s.' % (type_name,\r\n ' '.join(detail.args)),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n return [error], blank_finish\r\n directive_instance = directive(\r\n type_name, arguments, options, content, lineno,\r\n content_offset, block_text, self, self.state_machine)\r\n try:\r\n result = directive_instance.run()\r\n except docutils.parsers.rst.DirectiveError, error:\r\n msg_node = self.reporter.system_message(error.level, error.msg,\r\n line=lineno)\r\n msg_node += nodes.literal_block(block_text, block_text)\r\n result = [msg_node]\r\n assert isinstance(result, list), \\\r\n 'Directive \"%s\" must return a list of nodes.' % type_name\r\n for i in range(len(result)):\r\n assert isinstance(result[i], nodes.Node), \\\r\n ('Directive \"%s\" returned non-Node object (index %s): %r'\r\n % (type_name, i, result[i]))\r\n return (result,\r\n blank_finish or self.state_machine.is_next_line_blank())", "def _parse_directive(directive_ast: dict) -> \"DirectiveNode\":\n return DirectiveNode(\n name=_parse_name(directive_ast[\"name\"]),\n arguments=_parse_arguments(directive_ast[\"arguments\"]),\n location=_parse_location(directive_ast[\"loc\"]),\n )", "async def on_directive(self, directive: 'WorkerDirective'):\n pass", "def directive(self, name: str) -> Callable | None:\n if name in self._directive_cache:\n return self._directive_cache[name]\n if name not in self.directives:\n return None\n fullname = f'{self.name}:{name}'\n BaseDirective = self.directives[name]\n\n class DirectiveAdapter(BaseDirective): # type: ignore[valid-type,misc]\n def run(self) -> list[Node]:\n self.name = fullname\n return super().run()\n self._directive_cache[name] = DirectiveAdapter\n return DirectiveAdapter", "def ProcessDirectives(self, input):\n temp = input\n for directive in self.data.split('\\n'):\n directive = directive.split(',')\n temp = linesub(directive[0], directive[1], temp)\n return temp", "def connect_directive_node(self, name, f_visit, f_depart):\r\n self.builder._function_node.append((name, f_visit, f_depart))", "def parse(self, node):\n pm = getattr(self, \"parse_%s\"%node.__class__.__name__)\n pm(node)", "def test_hensonclidirective_sets_parser(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.arguments == ('henson.cli:parser',)", "def testDirectiveParser_parse_translate(self):\n translateDirective = \"xhr-src http://localhost\"\n cspTranslateDirective = DirectiveParser().parse(translateDirective)\n assert cspTranslateDirective == Directive(\"connect-src\", (URISourceExpression(\"http\", \"localhost\", None, None),))", "def run_parser(self, parser: ArgumentParser):", "def parse(self, node):\n parseMethod = getattr(self, \"parse%s\" % node.__class__.__name__)\n parseMethod(node)", "def _parse_directives(\n directives_ast: Optional[List[dict]]\n) -> List[\"DirectiveNode\"]:\n if directives_ast:\n return [_parse_directive(directive) for directive in directives_ast]\n return []", "def parse_directive(directive):\n\n whitelist = []\n blacklist = []\n substitutions = {}\n\n for segment in directive.split(\",\"):\n if segment.startswith(\"*=\"):\n scope = [\"*\"]\n elif \":\" in segment:\n start, stop = segment.strip(\"!\").split(\"=\")[0].split(\":\")\n scope = range(*sorted([int(start), int(stop) + 1]))\n else:\n start = int(segment.strip(\"!\").split(\"=\")[0])\n scope = range(start, start + 1)\n\n for code in scope:\n if \"=\" in segment:\n sub = int(segment.split(\"=\")[1])\n\n if segment.startswith(\"!\"):\n blacklist.append(code)\n substitutions[\"*\"] = sub\n else:\n substitutions[code] = sub\n elif segment.startswith(\"!\"):\n blacklist.append(code)\n else:\n whitelist.append(code)\n\n return whitelist, blacklist, substitutions", "def parse(self):\n for line in self.template_string.split('\\n'):\n split_line = tag_re.split(line)\n if len(split_line) > 1:\n for matched in split_line:\n mat = tag_re.search(matched)\n if mat:\n full_command = mat.group(0)\n cmd = mat.group(2).split()[0].strip() #get_comment_form etc\n if cmd == 'load':\n self.loaded_classes.append(full_command)\n else:\n if cmd not in DEFAULT_TAGS and cmd not in 'end'.join(DEFAULT_TAGS):\n self.template_calls.append(full_command)", "def parse_Element(self, node):\n name = node.tagName\n ignores = self.ignores\n if name in ignores:\n return\n attr = \"do_%s\" % name\n if hasattr(self, attr):\n handlerMethod = getattr(self, attr)\n handlerMethod(node)\n else:\n self.generic_parse(node)\n #if name not in self.generics: self.generics.append(name)", "def apply_visitor(visitor, decl_inst):\n\n fname = 'visit_' + \\\n decl_inst.__class__.__name__[:-2] # removing '_t' from class name\n if not hasattr(visitor, fname):\n raise visit_function_has_not_been_found_t(visitor, decl_inst)\n return getattr(visitor, fname)()", "def parse(self, stringDirective):\n \n # extract/translate directive type\n stringDirective = stringDirective.strip()\n if stringDirective == \"inline style base restriction\":\n return Directive.INLINE_STYLE_BASE_RESTRICTION()\n elif stringDirective == \"inline script base restriction\":\n return Directive.INLINE_SCRIPT_BASE_RESTRICTION()\n elif stringDirective == \"eval script base restriction\":\n return Directive.EVAL_SCRIPT_BASE_RESTRICTION()\n \n directiveParts = stringDirective.partition(\" \")\n if directiveParts[0] == stringDirective:\n return Directive.INVALID() # could not split as expected (no \" \")\n directiveType = directiveParts[0].strip().lower()\n if directiveType in self._typeTranslations:\n directiveType = self._typeTranslations[directiveType]\n if directiveType == \"\" or directiveType not in self._allowedTypes:\n return Directive.INVALID() # parsing error or type not allowed (e.g., report-uri or sandbox)\n\n # extract whitelisted source expressions\n whitelistedResources = directiveParts[2].strip().split()\n \n # handle 'none' in list\n # (list of length 0 might be invalid, but we handle it as 'none', too)\n if (\"'none'\" in map(lambda x: x.lower(), whitelistedResources)\n and len(whitelistedResources) > 1 \n and self._strict):\n return Directive.INVALID() # 'none' must be only resource if present\n \n # clean up URIs (and make unique set)\n validWhitelistedSourceExpressions = set([])\n for res in whitelistedResources:\n if res.lower() == \"'none'\":\n continue\n srcExpr = self._sourceExpressionParser.parse(res)\n # check some error conditions\n if srcExpr == SourceExpression.INVALID():\n if self._strict:\n return Directive.INVALID()\n else:\n continue\n if srcExpr == SourceExpression.UNSAFE_EVAL() and not directiveType in (\"script-src\", \"default-src\"):\n if self._strict:\n return Directive.INVALID()\n else:\n continue\n if srcExpr == SourceExpression.UNSAFE_INLINE() and not directiveType in (\"script-src\", \"style-src\", \"default-src\"):\n if self._strict:\n return Directive.INVALID()\n else:\n continue\n validWhitelistedSourceExpressions.add(srcExpr)\n return Directive(directiveType, validWhitelistedSourceExpressions)", "def compile_do(self):\n # write <do_statement>\n self.non_terminal_open(XML_DO_STATEMENT)\n # write <keyword> do <keyword>\n self.one_liner(XML_KEY_WORD, self.tokenizer.current_token)\n # advance to next token (subroutine call)\n self.tokenizer.advance()\n # write <identifier> name_of_func <identifier>\n self.one_liner(XML_IDENTIFIER, self.tokenizer.current_token)\n self.tokenizer.advance()\n # compile the subroutine call\n self.compile_subroutine_call()\n # write <symbol> ; <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n # write <do_statement>\n self.non_terminal_end(XML_DO_STATEMENT)\n self.tokenizer.advance()", "def parse(self, player, message):\n #test if the message match a command available for the player state\n matched = self.cmd_regex[player.get_state()].match(message)\n if matched:\n # execute the relative function\n cmd = matched.group(\"command\")\n arg = matched.group(\"arguments\") or ''\n getattr(self, Cmd.commands[cmd].fn)(player, arg)\n else:\n #self.game.log(\n # \"Unknown command <{}> for state {}.\"\n # .format(message, player.get_state()))\n info(player, \"<code>Arglebargle&nbsp;!?</code>\")", "def directives(self, directives):\n\n self._directives = directives" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse `datalines` for a field list containing extension options matching `option_spec`.
def parse_extension_options(self, option_spec, datalines): node = nodes.field_list() newline_offset, blank_finish = self.nested_list_parse( datalines, 0, node, initial_state='ExtensionOptions', blank_finish=True) if newline_offset != len(datalines): # incomplete parse of block return 0, 'invalid option block' try: options = utils.extract_extension_options(node, option_spec) except KeyError as detail: return 0, ('unknown option: "%s"' % detail.args[0]) except (ValueError, TypeError) as detail: return 0, ('invalid option value: %s' % ' '.join(detail.args)) except utils.ExtensionOptionError as detail: return 0, ('invalid option data: %s' % ' '.join(detail.args)) if blank_finish: return 1, options else: return 0, 'option data incompletely parsed'
[ "def parse_extension_options(self, option_spec, datalines):\r\n node = nodes.field_list()\r\n newline_offset, blank_finish = self.nested_list_parse(\r\n datalines, 0, node, initial_state='ExtensionOptions',\r\n blank_finish=True)\r\n if newline_offset != len(datalines): # incomplete parse of block\r\n return 0, 'invalid option block'\r\n try:\r\n options = utils.extract_extension_options(node, option_spec)\r\n except KeyError, detail:\r\n return 0, ('unknown option: \"%s\"' % detail.args[0])\r\n except (ValueError, TypeError), detail:\r\n return 0, ('invalid option value: %s' % ' '.join(detail.args))\r\n except utils.ExtensionOptionError, detail:\r\n return 0, ('invalid option data: %s' % ' '.join(detail.args))\r\n if blank_finish:\r\n return 1, options\r\n else:\r\n return 0, 'option data incompletely parsed'", "def parse_extension_attributes(self, attribute_spec, datalines, blankfinish):\n node = nodes.field_list()\n newlineoffset, blankfinish = self.nestedlistparse(\n datalines, 0, node, initialstate='FieldList',\n blankfinish=blankfinish)\n if newlineoffset != len(datalines): # incomplete parse of block\n return 0, 'invalid attribute block', blankfinish\n try:\n attributes = utils.extract_extension_attributes(node, attribute_spec)\n except KeyError, detail:\n return 0, ('unknown attribute: \"%s\"' % detail), blankfinish\n except (ValueError, TypeError), detail:\n return 0, ('invalid attribute value:\\n%s' % detail), blankfinish\n except utils.ExtensionAttributeError, detail:\n return 0, ('invalid attribute data: %s' % detail), blankfinish\n return 1, attributes, blankfinish", "def extract_extension_options(fields, options_spec, raise_fail=True, errors=[]):\n \"\"\"\n :Parameters:\n - `fields`: A list or field_list of fields with field_name, field_body pairs.\n - `options_spec`: Dictionary mapping known option names to a\n conversion function such as `int` or `float`.\n\n :Exceptions:\n - `UnknownOptionError` for unknown option names.\n - `DuplicateOptionError` for duplicate options.\n - `OptionValueError` for invalid option values (raised by conversion\n function).\n - `OptionTypeError` for invalid option value types (raised by conversion\n function).\n \"\"\"\n options = {}\n seen = [] # track seen names, raise on missing required fields\n for field in fields:\n\n field_name, field_body = field[0:2]\n name = extract_field_name(field_name)\n\n if not name in options_spec or not options_spec[name][0]:\n error = UnknownOptionError( name, ) # if explicitly disabled\n if raise_fail:\n raise error\n errors.append((field, error))\n continue\n\n spec = options_spec[name]\n\n # XXX:BVB: dont like this\n convertor = spec[0]\n required = not (len(spec)>1 and spec[1])\n append = len(spec)>2 and spec[2]\n\n if name in options and not append:\n error = DuplicateOptionError( name, )\n if raise_fail:\n raise error\n errors.append((field, error))\n continue\n\n if name not in seen:\n seen.append(name)\n\n if len(field_body):\n pass\n\n error = None\n try:\n if not callable(convertor):\n body = field_body[0]\n if len(convertor)==2:\n converted = list(parse_list(body, *convertor))\n elif len(convertor)==3:\n converted = list(parse_nested_list(body, *convertor))\n elif len(convertor)==4:\n converted = list(parse_nested_list_with_headers(body, *convertor))\n elif len(field_body):\n converted = convertor(field_body[0])\n else:\n converted = ''\n\n except ValueError, e:\n error = OptionValueError( name, field_body, e )\n except TypeError, e:\n error = OptionValueError( name, field_body, e )\n if error:\n if raise_fail:\n raise error\n errors.append((field, error))\n continue\n\n if append:\n if not name in options:\n options[name] = []\n if isinstance(converted, list):\n options[name].extend(converted)\n else:\n options[name].append(converted)\n else:\n options[name] = converted\n\n if len(options_spec) > len(seen):\n # Report missing fields\n names = options_spec.keys()\n [names.remove(name) for name in seen]\n for name in names:\n spec = options_spec[name]\n if len(spec)<2 or spec[1]:\n error = MissingOptionError(name,)\n if raise_fail:\n raise error\n errors.append((None, error))\n\n return options", "def test_multiple_extra_sections(self):\n class MySchema(Schema):\n foo = ListOption(\n item=DictOption(spec={'bar': IntOption()}))\n\n config = StringIO('[__main__]\\nfoo=d1\\n d2\\n d3\\n'\n '[d1]\\nbar=1\\n[d2]\\nbar=2\\n[d3]\\nbar=3')\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n self.assertTrue(parser.is_valid())", "def parse_delimited_data_lines(data_lines,delimiter=\"\\t\"):\n for line in data_lines: \n \n if line.startswith(\"#\"): \n continue \n if not line.strip(): \n continue \n \n fields = line.strip().split(delimiter) \n yield fields", "def read_options(data: Any) -> List[CommandOption]:\n def __fn(doc_option: Dict[str, Any]) -> CommandOption:\n name = doc.read(doc_option, \"name\", doc.typed(str))\n description = doc.read(doc_option, \"description\", doc.typed(str))\n validate_meta(MetaType.OPTION, name, description)\n kind_key = doc.read(doc_option, \"type\", doc.typed(Union[str, int]))\n try:\n kind = CommandOptionType.from_str(kind_key) if isinstance(kind_key, str) else CommandOptionType(kind_key)\n except ValueError as err:\n raise InvalidOptionTypeError(\" \".join(\n [f\"{kind_key} is not a valid command option type.\",\n \"It must be between 1 and 9 (both inclusive).\"])) from err\n option = {\n \"name\": name,\n \"description\": description,\n \"type\": kind.value,\n }\n doc.read(doc_option, \"required\", doc.typed(bool, optional=True), to=option)\n if \"choices\" in doc_option and isinstance(doc_option[\"choices\"], list):\n choices = []\n for doc_choice in doc_option[\"choices\"]:\n if isinstance(doc_choice, (str, int)):\n doc_choice = {\"name\": doc_choice}\n if not isinstance(doc_choice, dict):\n raise doc.ValueTypeError(f\"Choice of unexpected type '{type(doc_choice).__name__}'\")\n choice_name = doc.read(doc_choice, \"name\", doc.typed(str))\n validate_length(\"Choice name\", choice_name)\n choices.append({\n \"name\": choice_name,\n \"value\": doc.read(doc_choice, [\"value\", \"name\"], doc.typed(Union[str, int]))\n })\n option[\"choices\"] = choices\n doc.read(doc_option, \"options\", doc.with_default(read_options, None), to=option)\n return option # type: ignore\n\n return __read_list_or_keyed(\"Option\", data, __fn)", "def test_multiple_extra_sections(self):\n class MySchema(Schema):\n foo = ListOption(\n item=DictOption(spec={'bar': IntOption()}))\n\n config = StringIO('[__main__]\\nfoo=d1\\n d2\\n d3\\n'\n '[d1]\\nbar=1\\n[d2]\\nbar=2\\n[d3]\\nbar=3')\n parser = SchemaConfigParser(MySchema())\n parser.readfp(config)\n parser.parse_all()\n\n expected_sections = set(['d1', 'd2', 'd3'])\n extra_sections = parser.extra_sections\n self.assertEqual(expected_sections, extra_sections)", "def _parse_fields(self, unf_str):\n unf_str = unf_str.strip(self.BORDER_CHAR)\n _, desc_token = unf_str.split(\": \")\n fields = []\n suite_name = re.search(VPatterns.get_suite_name(), desc_token).group(0)\n fields.append(suite_name)\n fields.append(desc_token)\n return fields", "def parse_options(data: bytearray) -> Generator[BaseOption, None, None]:\n while data:\n kind = data[0]\n opt = _PARSE_KIND_TBL.get(kind, SizedOption).from_bytes(data)\n yield opt\n\n if opt is end_of_options:\n return", "def _read_lst_file(config: MutableMapping[str, Any]):\n cur_file = ReadMeta(\n filename=config[\"outputs\"][\"data_filename\"],\n input_start=config[\"inputs\"][\"start\"],\n input_stop1=config[\"inputs\"][\"stop1\"],\n input_stop2=config[\"inputs\"][\"stop2\"],\n input_stop3=config[\"inputs\"][\"stop3\"],\n input_stop4=config[\"inputs\"][\"stop4\"],\n input_stop5=config[\"inputs\"][\"stop5\"],\n binwidth=config[\"advanced\"][\"binwidth\"],\n use_sweeps=config[\"advanced\"][\"sweeps_as_lines\"],\n mirror_phase=config[\"advanced\"][\"phase\"],\n )\n cur_file.run()\n raw_data_obj = ReadData(\n filename=config[\"outputs\"][\"data_filename\"],\n start_of_data_pos=cur_file.start_of_data_pos,\n timepatch=cur_file.timepatch,\n is_binary=cur_file.is_binary,\n debug=config[\"advanced\"][\"debug\"],\n )\n raw_data = raw_data_obj.read_lst()\n if cur_file.is_binary:\n relevant_columns, dict_of_data = binary_parsing(cur_file, raw_data, config)\n else:\n relevant_columns, dict_of_data = ascii_parsing(cur_file, raw_data, config)\n lst_metadata = cur_file.lst_metadata\n fill_frac = (\n config[\"advanced\"][\"fill_frac\"]\n if cur_file.fill_fraction == -1.0\n else cur_file.fill_fraction\n )\n return relevant_columns, dict_of_data, lst_metadata, fill_frac", "def _extract_non_default_list(\n config_data: Dict, ctx: click.Context, field: str, process: Callable[[List], Any]\n) -> None:\n try:\n # Check if `field` was given in config file\n config_paths = config_data.pop(field)\n except KeyError:\n # No value for field was provided\n pass\n else:\n # Use config default if `field` was not provided as CLI argument\n if not ctx.params.get(field) and config_paths:\n if isinstance(config_paths, str):\n config_paths = [config_paths]\n ctx.params[field] = process(config_paths)", "def list_cmd_parser(buf):\n records = []\n\n # Assumption: Each record is separated by empty line.\n for record in buf.split('\\n\\n'):\n records.append(Record.parse(record))\n\n return records", "def parse_sparkDatasourceInfo_tag(spec):\n def parse_datasource(spec):\n toks = spec.split(\",\")\n dct = {}\n for tok in toks:\n k,v = tok.split(\"=\")\n dct[k] = v\n return dct\n toks = spec.split(\"\\n\")\n return [ parse_datasource(tok) for tok in toks ]", "def parse_dd(fp):\n expr = re.compile(r'[\\x0c]{0,1}(\\w+)\\*?[\\s\\t]*(\\d{1,2})[\\s\\t]*(.*?)'\n r'[\\s\\t]*\\(*(\\d+)\\s*[\\-–]\\s*(\\d+)\\)*\\s*$')\n with open(fp) as f:\n lines = (expr.match(x) for x in f)\n matches = filter(None, lines)\n groups = (x.groups() for x in matches)\n\n df = (pd.DataFrame(list(groups),\n columns=['field', 'width', 'desc', 'start', 'end'])\n .convert_objects(convert_numeric=True))\n return df", "def _parse_fields(self, unf_str):\n unf_str = unf_str.strip(self.BORDER_CHAR)\n unf_str = unf_str.lstrip(\"Test Case \")\n number, desc_token = unf_str.split(\": \")\n case_name = re.search(VPatterns.get_test_case_name(),\n desc_token).group(0)\n fields = []\n fields.append(case_name)\n fields.append(int(number))\n fields.append(desc_token)\n return fields", "def _add_fields(cls):\n for opt in CmdLine._flatten(cls._supported_options):\n if not opt.opt_name.isidentifier():\n raise CmdLineException(\"Specified option name '{}' must be \"\n \"a valid Python identifier\".\n format(opt.opt_name))\n if opt.opt_name in dir(CmdLine):\n raise CmdLineException(\"Specified option name '{}' clashes\".\n format(opt.opt_name))\n setattr(cls, opt.opt_name, opt.value)", "def resolve_validation_dataloaders(model: 'ModelPT'):\n if not _HAS_HYDRA:\n logging.error(\"This function requires Hydra/Omegaconf and it was not installed.\")\n exit(1)\n cfg = copy.deepcopy(model._cfg)\n dataloaders = []\n\n # process val_loss_idx\n if 'val_dl_idx' in cfg.validation_ds:\n cfg = OmegaConf.to_container(cfg)\n val_dl_idx = cfg['validation_ds'].pop('val_dl_idx')\n cfg = OmegaConf.create(cfg)\n else:\n val_dl_idx = 0\n\n # Set val_loss_idx\n model._val_dl_idx = val_dl_idx\n\n ds_key = resolve_dataset_name_from_cfg(cfg.validation_ds)\n\n if ds_key is None or val_dl_idx < 0:\n logging.debug(\n \"Could not resolve file path from provided config - {}. \"\n \"Disabling support for multi-dataloaders.\".format(cfg.validation_ds)\n )\n\n model.setup_validation_data(cfg.validation_ds)\n return\n\n ds_values = cfg.validation_ds[ds_key]\n\n if isinstance(ds_values, (list, tuple, ListConfig)):\n\n for ds_value in ds_values:\n if isinstance(ds_value, (dict, DictConfig)):\n # this is a nested dataset\n cfg.validation_ds = ds_value\n else:\n cfg.validation_ds[ds_key] = ds_value\n\n model.setup_validation_data(cfg.validation_ds)\n dataloaders.append(model._validation_dl)\n\n model._validation_dl = dataloaders\n if len(ds_values) > 0 and isinstance(ds_values[0], (dict, DictConfig)):\n # using the name of each of the nested dataset\n model._validation_names = [ds.name for ds in ds_values]\n else:\n model._validation_names = [parse_dataset_as_name(ds) for ds in ds_values]\n unique_names_check(name_list=model._validation_names)\n return\n\n else:\n model.setup_validation_data(cfg.validation_ds)\n model._validation_names = [parse_dataset_as_name(ds_values)]\n unique_names_check(name_list=model._validation_names)", "def iter_field_groups(input_file, line_group_separator='', field_separator='\\t', strip_spaces_at_ends=True,\r\n ignore_empty_groups=True, min_field_list_len=0, default_field_value=''):\r\n field_group = []\r\n with open(input_file) as f:\r\n for line in f:\r\n if strip_spaces_at_ends:\r\n line = line.strip()\r\n if line == line_group_separator:\r\n if field_group or (not ignore_empty_groups):\r\n yield field_group\r\n field_group = []\r\n else:\r\n fields = line.split(field_separator)\r\n if len(fields) < min_field_list_len:\r\n fields += [default_field_value] * (min_field_list_len - len(fields))\r\n field_group.append(fields)\r\n if field_group or (not ignore_empty_groups):\r\n yield field_group", "def parse_spec(spec):\n for row in spec:\n yield FrTest(**row)", "def _parseField(self, value, filename=None):\n if value is None:\n value = ''\n if filename is None:\n # Split the text into a list for diffs\n return value.splitlines()\n else:\n return [self.filenameTitle(filename)] + value.splitlines()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
RFC2822style field list item.
def rfc2822(self, match, context, next_state): fieldlist = nodes.field_list(classes=['rfc2822']) self.parent += fieldlist field, blank_finish = self.rfc2822_field(match) fieldlist += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=fieldlist, initial_state='RFC2822List', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning( 'RFC2822-style field list') return [], next_state, []
[ "def __getitem__(self, i: 'int const') -> \"SoField *\":\n return _coin.SoFieldList___getitem__(self, i)", "def parseField(f):\n k = f.id\n if f.has_value('alternate_name'):\n k = f.get_value('alternate_name') or f.id\n v = getattr(request, k, MARKER)\n if hasattr(v, 'edit'):\n # This is an encapsulated editor\n # call it\n encapsulated_editor_list.append(v)\n elif v is not MARKER:\n if k.startswith(field_prefix):\n # We only take into account\n # the object attributes\n k = k[field_prefix_len:]\n # Form: '' -> ERP5: None\n if v == '':\n v = None\n kw[k] = v", "def addField(self,field=\"\"):\r\n self._NMEAFieldList.append(field)", "def fields(self, *fields):\n if len(fields) == 0:\n return [el.split() for el in self]\n \n res = SList()\n for el in [f.split() for f in self]:\n lineparts = []\n\n for fd in fields:\n try:\n lineparts.append(el[fd])\n except IndexError:\n pass\n if lineparts:\n res.append(\" \".join(lineparts))\n \n return res", "def map_listcs(item):\n fields = item.split()\n\n fields = [x.split(\"=\", 1)[-1] for x in fields]\n\n return tuple( fields )", "def fields(self):\n yield from self._field_list", "def visit_list_item(self, node):\n if len(node.children)==0 or node.children[0].tagname not in \\\n ['paragraph', 'compact_paragraph']:\n self.extend_node_attrs(node, bias=0)\n\n self.visit_list_item_original(self, node)\n\n # For compound list items (e.g. bullet point with two paragraphs):\n # the second paragraph should be recorded as a paragraph, not as\n # a `list_item`\n node._ucomment_num_nodes = 0", "def _parseField(self, value, filename=None):\n if value is None:\n value = ''\n if filename is None:\n # Split the text into a list for diffs\n return value.splitlines()\n else:\n return [self.filenameTitle(filename)] + value.splitlines()", "def field(self, *args):\r\n return _osgDB.FieldReaderIterator_field(self, *args)", "def __getitem__(self, item_num):\n if self._type == self.TYPE_LIST:\n return self._val[item_num].resolve()\n else:\n raise TypeError(\"RFValue is not a list.\")", "def FieldHandle(self) -> _n_2_t_10:", "def print_field(field):\n\n print('\\n'.join([''.join(['{:4}'.format(str(item)) for item in row])\n for row in field]))\n print('---------------')", "def format_field(field):\n if field.tag < '010' and self.tag.isdigit():\n return field.getData()\n fielddata = ''\n for subfield in field.subfields:\n if subfield.code == '6':\n continue\n if not field.tag.startswith('6'):\n fielddata += ' {0}'.format(subfield.getData())\n else:\n if subfield.code not in ('v','x','y','z'):\n fielddata += ' {0}'.format(subfield.getData())\n else:\n fielddata += ' -- {0}'.format(subfield.getData())\n return fielddata.strip()", "def fillFieldList(self):\n count = self.dlgCtrls.listWhatToGrab.getItemCount()\n self.dlgCtrls.listWhatToGrab.removeItems(0, count)\n self.titles = [(\"\", \"\")]\n if self.filetype in PhonReader.supportedNames():\n self.titles.extend(lingex_structs.LingPhonExample.GRAB_FIELDS)\n elif self.filetype in InterlinReader.supportedNames():\n self.titles.extend(lingex_structs.LingInterlinExample.GRAB_FIELDS)\n elif self.filetype in DocReader.supportedNames():\n self.titles.append((WhatToGrab.WHOLE_DOC, \"Whole Document\"))\n elif self.filetype in CalcFileReader.supportedNames():\n for char in string.ascii_uppercase:\n self.titles.append(\n (char, \"%s %s\" % (theLocale.getText(\"Column\"), char)))\n if len(self.titles) > 1:\n stringList = [theLocale.getText(display)\n for dummy_key, display in self.titles]\n self.dlgCtrls.listWhatToGrab.addItems(tuple(stringList), 0)", "def define_nested_list_field(*args):\n\n # Deal with the optional subclass name\n largs=len(args)\n if largs == 1:\n subclass_name=\"AnonymousNestedListField\"\n efield=args[0]\n elif largs == 2:\n subclass_name=args[0]\n efield=args[1]\n else:\n raise TypeError(\"define_nested_list_field() missing or invalid arguments\")\n\n # The element_field must be a RawField sub-class\n if not inspect.isclass(efield) or not issubclass(efield,RawField):\n raise TypeError(\"'{}' is not a RawField or a sub-class\".format(efield))\n\n def _pytocl(v):\n if isinstance(v,str) or not isinstance(v,collections.Iterable):\n raise TypeError(\"'{}' is not a collection (list/seq/etc)\".format(v))\n nested=clingo.Function(\"\",[])\n for ev in reversed(v):\n nested=clingo.Function(\"\",[efield.pytocl(ev),nested])\n return nested\n\n def _get_next(raw):\n if raw.type != clingo.SymbolType.Function or raw.name != \"\":\n raise TypeError(\"'{}' is not a nested list\".format(raw))\n rlen = len(raw.arguments)\n if rlen == 0: return None\n if rlen == 2: return raw.arguments\n else:\n raise TypeError(\"'{}' is not a nested list\".format(raw))\n\n def _cltopy(raw):\n elements=[]\n result = _get_next(raw)\n while result:\n elements.append(efield.cltopy(result[0]))\n result = _get_next(result[1])\n return elements\n\n return type(subclass_name, (RawField,),\n { \"pytocl\": _pytocl,\n \"cltopy\": _cltopy})", "def _parse_fields(self, unf_str):\n pass", "def __init__(self, field_list):\n self.field_list = field_list\n self.data = None", "def field(self,name,occurrence=None):\n proto_field = self.fields_by_name[name]\n field_internal = self.fields[name]\n if isinstance(field_internal,list):\n if occurrence is None:\n occurrence = 0\n field_internal = field_internal[occurrence]\n return proto_field, field_internal", "def item_info(sf, row, field=None):\n fields = [item[0] for item in sf.fields[1:]]\n record = sf.record(row)\n if field:\n print(\"{}: {}\".format(fields[field], record[field]))\n else:\n for i, field in enumerate(fields):\n print(\"{} - {}: {}\".format(i, field, record[field]))", "def __setitem__(self, i: 'int const', value: 'SoField') -> \"void\":\n return _coin.SoFieldList___setitem__(self, i, value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Analyze the text `block` and return a table data structure. Given a plaintextgraphic table in `block` (list of lines of text; no whitespace padding), parse the table, construct and return the data necessary to construct a CALS table or equivalent. Raise `TableMarkupError` if there is any problem with the markup.
def parse(self, block): self.setup(block) self.find_head_body_sep() self.parse_table() structure = self.structure_from_cells() return structure
[ "def render_table(self, block):\n before = '<table>\\n<tr>\\n<td>'\n end = '</td>\\n</tr>\\n</table>'\n content = [\"</td>\\n<td>\".join(row) for row in block.data]\n content = \"</td>\\n</tr>\\n<tr>\\n<td>\".join(content)\n block.data = before + content + end\n return None", "def _parse_block(self,idx):\n block_tmp = self._block_list[idx]\n blocktype = self._paragraph_or_table[idx]\n paragraph_count = sum(self._paragraph_or_table[:idx+1])\n table_count = idx + 1 - paragraph_count\n df = DataFrame()\n # paragraph\n if blocktype==1:\n l_runText = [r.text for r in block_tmp.runs]\n l_runID = arange(len(l_runText))\n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['paragraph_ID'] = paragraph_count - 1 # 0-starting index \n # table\n if blocktype==0:\n row_count = 0\n for row in block_tmp.rows:\n cell_count = 0\n for cell in row.cells:\n cell_para_count = 0\n for p in cell.paragraphs:\n l_runText = [r.text for r in p.runs]\n l_runID = arange(len(l_runText)) \n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['table_ID'] = table_count - 1 # 0-starting index\n df['row_ID'] = row_count\n df['cell_ID'] = cell_count\n df['paragraph_ID'] = cell_para_count \n cell_para_count += 1\n cell_count += 1\n row_count += 1\n df['block_ID'] = idx\n self._block_dataframe_list[idx] = df", "def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element", "def pba(self, block_attributes, element=None):\r\n style = []\r\n aclass = ''\r\n lang = ''\r\n colspan = ''\r\n rowspan = ''\r\n block_id = ''\r\n\r\n if not block_attributes:\r\n return ''\r\n\r\n matched = block_attributes\r\n if element == 'td':\r\n m = re.search(r'\\\\(\\d+)', matched)\r\n if m:\r\n colspan = m.group(1)\r\n\r\n m = re.search(r'/(\\d+)', matched)\r\n if m:\r\n rowspan = m.group(1)\r\n\r\n if element == 'td' or element == 'tr':\r\n m = re.search(r'(%s)' % self.vertical_align_re, matched)\r\n if m:\r\n style.append(\"vertical-align:%s;\" % self.vAlign[m.group(1)])\r\n\r\n m = re.search(r'\\{([^}]*)\\}', matched)\r\n if m:\r\n style.append(m.group(1).rstrip(';') + ';')\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'\\[([^\\]]+)\\]', matched, re.U)\r\n if m:\r\n lang = m.group(1)\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'\\(([^()]+)\\)', matched, re.U)\r\n if m:\r\n aclass = m.group(1)\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'([(]+)', matched)\r\n if m:\r\n style.append(\"padding-left:%sem;\" % len(m.group(1)))\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'([)]+)', matched)\r\n if m:\r\n style.append(\"padding-right:%sem;\" % len(m.group(1)))\r\n matched = matched.replace(m.group(0), '')\r\n\r\n m = re.search(r'(%s)' % self.horizontal_align_re, matched)\r\n if m:\r\n style.append(\"text-align:%s;\" % self.hAlign[m.group(1)])\r\n\r\n m = re.search(r'^(.*)#(.*)$', aclass)\r\n if m:\r\n block_id = m.group(2)\r\n aclass = m.group(1)\r\n\r\n if self.restricted:\r\n if lang:\r\n return ' lang=\"%s\"' % lang\r\n else:\r\n return ''\r\n\r\n result = []\r\n if style:\r\n result.append(' style=\"%s\"' % \"\".join(style))\r\n if aclass:\r\n result.append(' class=\"%s\"' % aclass)\r\n if lang:\r\n result.append(' lang=\"%s\"' % lang)\r\n if block_id:\r\n result.append(' id=\"%s\"' % block_id)\r\n if colspan:\r\n result.append(' colspan=\"%s\"' % colspan)\r\n if rowspan:\r\n result.append(' rowspan=\"%s\"' % rowspan)\r\n return ''.join(result)", "def parse_block(block: str) -> str:\n try:\n match = pattern.search(block)\n charset, encoding, raw_text = match.groups()\n except AttributeError:\n # match is None so .groups fails\n raise ValueError(f\"Could not recognise format of: {block}\") from None\n\n if str.lower(encoding) == 'b':\n text = b64decode(raw_text)\n elif str.lower(encoding) == 'q':\n text = quopri.decodestring(raw_text)\n else:\n raise ValueError(f\"Unknown encoding '{encoding}'\") from None\n exit(1)\n\n decoded = text.decode(charset)\n return decoded", "def table(self, text):\r\n text = text + \"\\n\\n\"\r\n pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\\. ?\\n)?^(%(a)s%(c)s\\.? ?\\|.*\\|)\\n\\n'\r\n % {'s': self.table_span_re,\r\n 'a': self.align_re,\r\n 'c': self.c},\r\n re.S | re.M | re.U)\r\n return pattern.sub(self.fTable, text)", "async def parse_block(ctx, block):\n # some questions are inputted from macs and have weird apostrophes. Kill them, and empty newlines\n # also escape underscores so when shown as a question in discord, they do not format, and normalize iOS apostrophes\n rawlines = block.replace('´', '\\'').replace('\\n\\n', '\\n').replace('_', '\\\\_').replace('´', '\\'').split('\\n')\n lines = []\n for line in rawlines:\n if not line.lower().startswith('source:'):\n lines.append(line)\n print(lines)\n # check validity of input\n try:\n if len(lines) % 2:\n raise UserWarning('Ope, I didn\\'t get that. Try not to separate any questions from their answers')\n for i in range(len(lines)):\n if i % 2 and not lines[i].startswith('Answer: '):\n raise UserWarning('Answer did not start with \"Answer: \"\\n```' + lines[i] + '```')\n if (1 + i) % 2 and not lines[i].startswith('Question: '):\n raise UserWarning('Question did not start with \"Question: \"\\n```' + lines[i] + '```')\n except UserWarning as e:\n await ctx.send(e)\n return\n\n out = []\n while lines:\n out.append(parse_next(lines))\n\n await ctx.send(display(out))\n return out", "def find_block(self, block):\n startlines = {\n 'surcharge': ('Node Surcharge Summary', 9),\n 'depth': ('Node Depth Summary', 8),\n # todo:\n #'inflow':,\n #'flooding':,\n #'volume':,\n #'loading':,\n #'link_flow':,\n #'classification':,\n #'conduit_surcharge':,\n }\n\n\n blockstart, comment_lines = startlines[block]\n\n return self._find_line(blockstart) + comment_lines #b/c variable comment lines", "def _parse_table(self, node, state):\n if not self.tabular:\n logger.error(\"Called _parse_table without tabular activated.\")\n return state\n\n if node.tag == \"table\":\n table_idx = state[\"table\"][\"idx\"]\n stable_id = \"{}::{}:{}\".format(\n state[\"document\"].name, \"table\", state[\"table\"][\"idx\"]\n )\n # Create the Table in the DB\n parts = {}\n parts[\"document\"] = state[\"document\"]\n parts[\"stable_id\"] = stable_id\n parts[\"position\"] = table_idx\n parent = state[\"parent\"][node]\n if isinstance(parent, Cell):\n parts[\"section\"] = parent.table.section\n elif isinstance(parent, Section):\n parts[\"section\"] = parent\n else:\n raise NotImplementedError(\"Table is not within a Section or Cell\")\n\n state[\"context\"][node] = Table(**parts)\n\n # Local state for each table. This is required to support nested\n # tables\n state[\"table\"][table_idx] = {\n \"grid\": defaultdict(int),\n \"cell_pos\": 0,\n \"row_idx\": -1,\n \"col_idx\": 0,\n }\n\n # Increment table counter\n state[\"table\"][\"idx\"] += 1\n\n elif node.tag == \"tr\":\n if not isinstance(state[\"parent\"][node], Table):\n raise NotImplementedError(\"Table row parent must be a Table.\")\n\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"] = 0\n state[\"table\"][state[\"parent\"][node].position][\"row_idx\"] += 1\n\n elif node.tag in [\"td\", \"th\"]:\n if not isinstance(state[\"parent\"][node], Table):\n raise NotImplementedError(\"Cell parent must be a Table.\")\n\n if not state[\"table\"][state[\"parent\"][node].position][\"row_idx\"] >= 0:\n raise NotImplementedError(\"Table cell encountered before a table row.\")\n\n # calculate row_start/col_start\n while state[\"table\"][state[\"parent\"][node].position][\"grid\"][\n (\n state[\"table\"][state[\"parent\"][node].position][\"row_idx\"],\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"],\n )\n ]: # while a cell on the grid is occupied, keep moving\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"] += 1\n col_start = state[\"table\"][state[\"parent\"][node].position][\"col_idx\"]\n row_start = state[\"table\"][state[\"parent\"][node].position][\"row_idx\"]\n\n # calculate row_end/col_end\n row_end = row_start\n if \"rowspan\" in node.attrib:\n row_end += int(node.get(\"rowspan\")) - 1\n col_end = col_start\n if \"colspan\" in node.attrib:\n col_end += int(node.get(\"colspan\")) - 1\n\n # update grid with occupied cells\n for r, c in itertools.product(\n list(range(row_start, row_end + 1)), list(range(col_start, col_end + 1))\n ):\n state[\"table\"][state[\"parent\"][node].position][\"grid\"][(r, c)] = 1\n\n # construct cell\n parts = defaultdict(list)\n parts[\"document\"] = state[\"document\"]\n parts[\"table\"] = state[\"parent\"][node]\n parts[\"row_start\"] = row_start\n parts[\"row_end\"] = row_end\n parts[\"col_start\"] = col_start\n parts[\"col_end\"] = col_end\n parts[\"position\"] = state[\"table\"][state[\"parent\"][node].position][\n \"cell_pos\"\n ]\n stable_id = \"{}::{}:{}:{}:{}\".format(\n parts[\"document\"].name,\n \"cell\",\n parts[\"table\"].position,\n row_start,\n col_start,\n )\n parts[\"stable_id\"] = stable_id\n # Create the Cell in the DB\n state[\"context\"][node] = Cell(**parts)\n\n # Update position\n state[\"table\"][state[\"parent\"][node].position][\"col_idx\"] += 1\n state[\"table\"][state[\"parent\"][node].position][\"cell_pos\"] += 1\n\n return state", "def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '&#x27e8;' and '&#x27e9;'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '&#x27e8;'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'&#x27e9;'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht", "def make_dataframe(block_name, blocks):\n names = {} # store names corresponding to column ids\n all_rows = [] # store list of dicts of column_id: value\n for k, v in blocks.iteritems():\n # to hold table info for this file\n info = {}\n for line in v:\n # split around the #. parts[0] is the contents, parts[1] is the column header\n # (but note programs use diff conventions...)\n parts = [p.strip() for p in line.split('#')]\n data, comment = parts\n\n # for most blocks, we use the first part of parts[0] to ID what the row means\n # BUT this doens't work for all e.g. DCINFO\n id_not_first_blocks = [\"DCINFO\"]\n if block_name in id_not_first_blocks:\n pass\n else:\n col_id, contents = data.split()\n names[col_id] = comment\n info[col_id] = contents\n all_rows.append(info)\n # make a DataFrame for this block\n df = pd.DataFrame(all_rows, index=blocks.keys())\n # convert column IDs to string names\n df.rename(columns=names, inplace=True)\n df.reindex_axis(sorted(df.columns), axis=1)\n df.sort_index(inplace=True)\n print df\n return df", "def handleBlock(block):\n mlines = filter(lambda line : line.startswith('-'), block)\n plines = filter(lambda line : line.startswith('+'), block)\n mcount = len(mlines)\n pcount = len(plines)\n if mcount > pcount:\n plines.extend([''] * (mcount - pcount))\n elif pcount > mcount:\n mlines.extend([''] * (pcount - mcount))\n count = max(mcount, pcount)\n return [(mlines[i],plines[i]) for i in range(count)]", "def parseMT(self):\n print(\"starting\");\n ans = RowBox()\n if self.getStart():\n print(\"Found start\")\n nends = 0\n while self.checkNext(): \n print(\"Starting interpreter\")\n mb = self.nextRecord(True)\n if mb != None: #if this is true, parseMT terminates: we come here only once\n if self.recType == 1: # type LINE add all children of mb to ans\n for nmb in mb.c :\n if not nmb.isEmpty():\n ans.addChild(nmb)\n ## if(endct == 0) return ans;\n self.endct -= 1\n #elif self.subType == 1: # take last element of ans, put it in a rowbox, replace first of mb with the rowbox, finally insert mb in ans\n## used for adding exponent and index to elem\n #zb = ans.c.removeLast() #? is ans ever non-empty, here???\n #zbnew = RowBox()\n #zbnew.addChild(zb)\n #lb = mb\n #lb.c.remove(0)\n #lb.c.add(0, zbnew)\n #ans.addChild(mb)\n else: # add mb (as a block) as a single child of ans \n ans.addChild(mb)\n return ans #\n if self.recType == 0: #mb == None, if we find more than 6, stop\n nends += 1\n if nends > 6: \n return ans\n return ans #we've hit end of file", "def executeblock(self, block):\r\n \r\n block_text = \"\\n\\n\" + block.string\r\n line_number = block.start_row\r\n #self.options._update_loose(block.options)\r\n out_value = \"\"\r\n \r\n # This import should not be needed, but it works around a very\r\n # strange bug I encountered once.\r\n import cStringIO\r\n # create file-like string to capture output\r\n code_out = cStringIO.StringIO()\r\n code_err = cStringIO.StringIO()\r\n \r\n captured_exception = None\r\n # capture output and errors\r\n sys.stdout = code_out\r\n sys.stderr = code_err\r\n try:\r\n exec block_text in self.namespace\r\n except Exception, captured_exception:\r\n if isinstance(captured_exception, KeyboardInterrupt):\r\n raise captured_exception\r\n print >> sys.stderr, format_exc() \r\n \r\n # restore stdout and stderr\r\n sys.stdout = sys.__stdout__\r\n sys.stderr = sys.__stderr__\r\n \r\n out_value = code_out.getvalue()\r\n error_value = code_err.getvalue()\r\n \r\n code_out.close()\r\n code_err.close()\r\n\r\n if captured_exception: \r\n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \r\n print >> sys.stderr, error_value\r\n self.namespace = globals()\r\n self.namespace.update(locals())\r\n\r\n if out_value and not self.options.noecho:\r\n if self.options.outfilename == \"-\" :\r\n print >> sys.stderr, out_value\r\n else:\r\n print out_value\r\n if self.myshow:\r\n self.current_figure_list = self.myshow.figure_list[\r\n len(self.total_figure_list):]\r\n self.total_figure_list = self.myshow.figure_list\r\n\r\n #if self.options.silent:\r\n # error_value = \"\"\r\n \r\n return (block.start_row, block.string, out_value, error_value, \r\n self.current_figure_list)", "def executeblock(self, block):\n \n block_text = \"\\n\\n\" + block.string\n line_number = block.start_row\n #self.options._update_loose(block.options)\n out_value = \"\"\n \n # This import should not be needed, but it works around a very\n # strange bug I encountered once.\n import cStringIO\n # create file-like string to capture output\n code_out = cStringIO.StringIO()\n code_err = cStringIO.StringIO()\n \n captured_exception = None\n # capture output and errors\n sys.stdout = code_out\n sys.stderr = code_err\n try:\n exec block_text in self.namespace\n except Exception, captured_exception:\n if isinstance(captured_exception, KeyboardInterrupt):\n raise captured_exception\n print >> sys.stderr, format_exc() \n \n # restore stdout and stderr\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n \n out_value = code_out.getvalue()\n error_value = code_err.getvalue()\n \n code_out.close()\n code_err.close()\n\n if captured_exception: \n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \n print >> sys.stderr, error_value\n self.namespace = globals()\n self.namespace.update(locals())\n\n if out_value and not self.options.noecho:\n if self.options.outfilename == \"-\" :\n print >> sys.stderr, out_value\n else:\n print out_value\n if self.myshow:\n self.current_figure_list = self.myshow.figure_list[\n len(self.total_figure_list):]\n self.total_figure_list = self.myshow.figure_list\n\n #if self.options.silent:\n # error_value = \"\"\n \n return (block.start_row, block.string, out_value, error_value, \n self.current_figure_list)", "def test_block_in_inline():\r\n box = parse('''\r\n<style>\r\n p { display: inline-block; }\r\n span, i { display: block; }\r\n</style>\r\n<p>Lorem <em>ipsum <strong>dolor <span>sit</span>\r\n <span>amet,</span></strong><span><em>conse<i></i></em></span></em></p>''')\r\n box = build.inline_in_block(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n # No whitespace processing here.\r\n ('strong', 'Text', '\\n '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])])]),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse'),\r\n ('i', 'Block', [])])])])])])])])])\r\n\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [\r\n # Whitespace processing not done yet.\r\n ('strong', 'Text', '\\n ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])]),\r\n\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [])])])]),\r\n ('span', 'Block', [\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse')])])]),\r\n ('i', 'Block', []),\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [])])])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [])])])])])])", "def _get_block_plain_text(self, block):\n cursor = QtGui.QTextCursor(block)\n cursor.movePosition(QtGui.QTextCursor.StartOfBlock)\n cursor.movePosition(QtGui.QTextCursor.EndOfBlock,\n QtGui.QTextCursor.KeepAnchor)\n return cursor.selection().toPlainText()", "def parse_block(self, block, lineno, indent):\r\n tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))\r\n tree.future_features = frozenset()\r\n return tree", "def check_rst_block(block):\r\n publisher = docCore.Publisher( source_class = docIO.StringInput,\r\n destination_class = docIO.StringOutput )\r\n publisher.set_components('standalone', 'restructuredtext', 'pseudoxml')\r\n publisher.process_programmatic_settings(None, None, None)\r\n if block[0] == \"textBlock\":\r\n publisher.set_source(block[1], None)\r\n compiled_rst = publisher.reader.read(publisher.source,\r\n publisher.parser, publisher.settings)\r\n if compiled_rst.parse_messages:\r\n # FIXME: It would be nice to add the line number where the error \r\n # happened\r\n print >> sys.stderr, \"\"\"Error reading rst on literate comment line \r\nfalling back to plain text\"\"\"\r\n else:\r\n block[0] = \"rstBlock\"\r\n return block", "def parse_content_block(content_block, tm_, privkey=None, pubkey=None):\n stix_bindings = (t.CB_STIX_XML_10,\n t.CB_STIX_XML_101,\n t.CB_STIX_XML_11,\n t.CB_STIX_XML_111,\n \"urn:stix.mitre.org:xml:1.2\")\n\n binding = str(content_block.content_binding)\n if binding == 'application/x-pkcs7-mime':\n if not privkey or not pubkey:\n msg = \"Encrypted data found, but certificate or key not provided\"\n return (None, msg)\n\n inbuf = BIO.MemoryBuffer(BytesIO(content_block.content).read())\n s = SMIME.SMIME()\n try:\n s.load_key(str(privkey), str(pubkey))\n p7, data = SMIME.smime_load_pkcs7_bio(inbuf)\n buf = s.decrypt(p7)\n except SMIME.PKCS7_Error:\n return (None, \"Decryption Failed\")\n f = BytesIO(buf)\n new_block = f.read()\n f.close()\n return parse_content_block(tm_.ContentBlock.from_xml(new_block),\n tm_, privkey, pubkey)\n elif binding in stix_bindings:\n f = BytesIO(content_block.content)\n data = f.read()\n f.close()\n return (data, None)\n else:\n msg = 'Unknown content binding \"%s\"' % binding\n return (None, msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look for a head/body row separator line; store the line index.
def find_head_body_sep(self): for i in range(len(self.block)): line = self.block[i] if self.head_body_separator_pat.match(line): if self.head_body_sep: raise TableMarkupError( 'Multiple head/body row separators ' '(table lines %s and %s); only one allowed.' % (self.head_body_sep+1, i+1), offset=i) else: self.head_body_sep = i self.block[i] = line.replace('=', '-') if self.head_body_sep == 0 or self.head_body_sep == (len(self.block) - 1): raise TableMarkupError('The head/body row separator may not be ' 'the first or last line of the table.', offset=i)
[ "def parse_special_header(self, linenum, info):\n if linenum + 1 < len(self.lines) and \\\n self.lines[linenum].startswith(\"Index: \") and \\\n self.lines[linenum + 1] == self.INDEX_SEP:\n # This is an Index: header, which is common in CVS and Subversion,\n # amongst other systems.\n try:\n info['index'] = self.lines[linenum].split(None, 2)[1]\n except ValueError:\n raise DiffParserError(\"Malformed Index line\", linenum)\n linenum += 2\n\n return linenum", "def find_next_nearest_delimiter(self, line, index):\n while line[index] != self.delimiter and index < len(line)-1:\n index += 1\n return index", "def _find_linesep(self, s):\r\n if \"\\r\\n\" in s: # windows line ending\r\n self.linesep = \"\\r\\n\"\r\n else:\r\n self.linesep = \"\\n\"", "def find_head(self):\n switch = [0, 0]\n for row in self.sheet_r.rows:\n if switch == [1, 0]:\n break\n switch[0] = switch[1]\n\n try:\n if row[0].fill.start_color.index != '00000000':\n switch[1] = 1\n self.head_pos.append(row[0].row)\n else:\n switch[1] = 0\n except AttributeError:\n pass", "def get_line_identifier(self):", "def get_number_header_lines(docbody, page_break_posns):\n remaining_breaks = len(page_break_posns) - 1\n num_header_lines = empty_line = 0\n # pattern to search for a word in a line:\n p_wordSearch = re.compile(ur'([A-Za-z0-9-]+)', re.UNICODE)\n if remaining_breaks > 2:\n if remaining_breaks > 3:\n # Only check odd page headers\n next_head = 2\n else:\n # Check headers on each page\n next_head = 1\n keep_checking = 1\n while keep_checking:\n cur_break = 1\n if docbody[(page_break_posns[cur_break] +\n num_header_lines + 1)].isspace():\n # this is a blank line\n empty_line = 1\n\n if (page_break_posns[cur_break] + num_header_lines + 1) \\\n == (page_break_posns[(cur_break + 1)]):\n # Have reached next page-break: document has no\n # body - only head/footers!\n keep_checking = 0\n\n grps_headLineWords = \\\n p_wordSearch.findall(docbody[(page_break_posns[cur_break] +\n num_header_lines + 1)])\n cur_break = cur_break + next_head\n while (cur_break < remaining_breaks) and keep_checking:\n grps_thisLineWords = \\\n p_wordSearch.findall(docbody[(page_break_posns[cur_break] +\n num_header_lines + 1)])\n if empty_line:\n if len(grps_thisLineWords) != 0:\n # This line should be empty, but isn't\n keep_checking = 0\n else:\n if (len(grps_thisLineWords) == 0) or \\\n (len(grps_headLineWords) != len(grps_thisLineWords)):\n # Not same num 'words' as equivilent line\n # in 1st header:\n keep_checking = 0\n else:\n keep_checking = \\\n check_boundary_lines_similar(grps_headLineWords,\n grps_thisLineWords)\n # Update cur_break for nxt line to check\n cur_break = cur_break + next_head\n if keep_checking:\n # Line is a header line: check next\n num_header_lines = num_header_lines + 1\n empty_line = 0\n return num_header_lines", "def split_line(self):\n # coordinate of the # symbol or end of the line (-1) if not found\n hash_or_end = self.line.find(\"#\")\n temp = self.line[self.region_end:hash_or_end].strip(\" |\")\n self.coord_str = regex_paren.sub(\"\", temp)\n\n # don't want any meta_str if there is no metadata found\n if hash_or_end >= 0:\n self.meta_str = self.line[hash_or_end:]\n else:\n self.meta_str = \"\"", "def getline(self, bno):\r\n return self.breakpt[bno]['line']", "def _beginningOfContent(line: str) -> int:\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0", "def leyline_head_changer(self, leyline: 'Leyline') -> None:\n cell_id_list = []\n for cells in leyline.cell_list:\n cell_id_list.append(cells.id)\n numberof1s = cell_id_list.count(1)\n numberof2s = cell_id_list.count(2)\n if numberof1s >= (len(leyline.cell_list) / 2) and leyline.head == '@':\n leyline.head = 1\n elif numberof2s >= (len(leyline.cell_list) / 2) and leyline.head == '@':\n leyline.head = 2", "def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)", "def get_line_number(self):\n return self.line_number", "def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish", "def _get_line_numbers(self):\n\n output = ''\n row, col = self._text.index('end').split('.')\n i = 0\n for i in range(1, int(row) - 1):\n output += str(i) + '\\n'\n\n return output + str(i + 1)", "def get_containing_line(self, pos):\n _, col, lino = self.contentTextCtrl.PositionToXY(pos)\n left = pos - col\n return (left, left + self.contentTextCtrl.GetLineLength(lino))", "def indexByLineNumber(self,n):\n for idx in range(len(self.__data)):\n if self.__data[idx].lineno() == n:\n return idx\n raise IndexError,\"No line number %d\" % n", "def test_find_line(self):\n a = NRT.ConvToCsv()\n s= a.find_line(self.test2,\"Empty_Category20\")\n self.assertEqual(s, 38, \"Dont find the write line\")", "def _compute_position(input, index):\n line = 1\n col = 1\n eol = None # last end of line character\n for c in input[:index]:\n if c == '\\n' or c == '\\r':\n if eol is None or eol == c:\n eol = c\n line += 1\n col = 1\n else:\n # ignore second of '\\n\\r' and '\\r\\n' sequences\n eol = None\n else:\n col += 1\n return (line, col)", "def first_non_whitespace_index (line): \n return len (line) - len (line.lstrip ())", "def _get_line_number(file_lines, pattern):\n return next(i for i, line in enumerate(file_lines) if pattern in line) + 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start with a queue of upperleft corners, containing the upperleft corner of the table itself. Trace out one rectangular cell, remember it, and add its upperright and lowerleft corners to the queue of potential upperleft corners of further cells. Process the queue in toptobottom order, keeping track of how much of each text column has been seen. We'll end up knowing all the row and column boundaries, cell positions and their dimensions.
def parse_table(self): corners = [(0, 0)] while corners: top, left = corners.pop(0) if top == self.bottom or left == self.right \ or top <= self.done[left]: continue result = self.scan_cell(top, left) if not result: continue bottom, right, rowseps, colseps = result update_dict_of_lists(self.rowseps, rowseps) update_dict_of_lists(self.colseps, colseps) self.mark_done(top, left, bottom, right) cellblock = self.block.get_2D_block(top + 1, left + 1, bottom, right) cellblock.disconnect() # lines in cell can't sync with parent cellblock.replace(self.double_width_pad_char, '') self.cells.append((top, left, bottom, right, cellblock)) corners.extend([(top, right), (bottom, left)]) corners.sort() if not self.check_parse_complete(): raise TableMarkupError('Malformed table; parse incomplete.')
[ "def iter_cells_greater_than(self, row_zb: int, col_zb: int) \\\n -> Generator[Tuple[int, int], None, None]:\n # Cell above?\n if (row_zb > 0 and\n self.inequalities_down[row_zb - 1][col_zb] == BOTTOM_LT_TOP):\n other = row_zb - 1, col_zb\n yield other\n yield from self.iter_cells_greater_than(*other)\n # Cell below?\n if (row_zb < self.n - 1 and\n self.inequalities_down[row_zb][col_zb] == TOP_LT_BOTTOM):\n other = row_zb + 1, col_zb\n yield other\n yield from self.iter_cells_greater_than(*other)\n # Cell left?\n if (col_zb > 0 and\n self.inequalities_right[row_zb][col_zb - 1] == LEFT_GT_RIGHT):\n other = row_zb, col_zb - 1\n yield other\n yield from self.iter_cells_greater_than(*other)\n # Cell right?\n if (col_zb < self.n - 1 and\n self.inequalities_right[row_zb][col_zb] == LEFT_LT_RIGHT):\n other = row_zb, col_zb + 1\n yield other\n yield from self.iter_cells_greater_than(*other)", "def draw_grid(self, cell_edge_size):\r\n left = 0\r\n top = 0\r\n for row in range(1, self.game.num_of_rows + 1):\r\n for col in range(1, self.game.num_of_cols + 1):\r\n cell = self.grid[row][col]\r\n if cell.alive_next_round:\r\n cell.color = cell.alive_color\r\n cell.alive = True\r\n cell.alive_next_round = False\r\n else:\r\n cell.color = cell.dead_color\r\n cell.alive = False\r\n\r\n square = pygame.Rect(left, top, cell_edge_size, cell_edge_size)\r\n pygame.draw.rect(cell.surface, cell.color, square)\r\n left += cell_edge_size\r\n\r\n left = 0\r\n top += cell_edge_size", "def border(self):\n #pylint: disable=R0912\n # Too many branches (17/12)\n rstr = self.colors.get('border', u'')\n thoriz = self.glyphs.get('top-horiz', u'') * (self.width - 2)\n bhoriz = self.glyphs.get('bot-horiz', u'') * (self.width - 2)\n topright = self.glyphs.get('top-right', u'')\n botright = self.glyphs.get('bot-right', u'')\n for row in range(0, self.height):\n # top to bottom\n for col in range (0, self.width):\n # left to right\n if (col == 0) or (col == self.width - 1):\n rstr += self.pos(col, row)\n if (row == 0) and (col == 0):\n # top left\n rstr += self.glyphs.get('top-left', u'')\n elif (row == self.height - 1) and (col == 0):\n # bottom left\n rstr += self.glyphs.get('bot-left', u'')\n elif (row == 0):\n # top right\n rstr += self.glyphs.get('top-right', u'')\n elif (row == self.height - 1):\n # bottom right\n rstr += self.glyphs.get('bot-right', u'')\n elif col == 0:\n # left vertical line\n rstr += self.glyphs.get('left-vert', u'')\n elif col == self.width - 1:\n # right vertical line\n rstr += self.glyphs.get('right-vert', u'')\n elif (row == 0):\n # top row (column 1)\n if thoriz == u'':\n if topright != u'':\n # prepare for top-right, (horiz skipped)\n rstr += self.pos(self.width -1, row)\n else:\n # horizontal line\n rstr += thoriz\n # top-right,\n rstr += topright\n break\n elif (row == self.height - 1):\n # bottom row (column 1)\n if bhoriz == u'':\n if botright != u'':\n # prepare for bot-right, (horiz skipped)\n rstr += self.pos(self.width -1, row)\n else:\n # horizontal line\n rstr += bhoriz\n # top-right,\n rstr += botright\n break\n rstr += self.colors.get('border', u'')\n return rstr", "def create_neighbors(self):\n for row in self._currentGrid:\n for cell in row:\n row = cell.get_row()\n column = cell.get_column()\n if row == 0:\n # 1. upper left corner (3 neighbors)\n if column == 0:\n #print('upper left')\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # 2. rest of the top row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # upper right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n # middle row\n elif row < (self._rows - 1):\n #print('middle')\n # 1. middle left edge (5 neighbors)\n if column == 0:\n #print('middle left edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n # 2. rest of the middle row (8 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n # 3. middle right edge (5 neighbors)\n else:\n #print('middle right edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n # bottom row\n else:\n #print('lower')\n # 1. bottom left corner (3 neighbors)\n if column == 0:\n #print('lower left')\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # 2. rest of the bottom row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # bottom right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[row - 1][0])\n cell.add_neighbor(self._currentGrid[row][0])", "def add_corners(df):\n col_corners, col_ra_min, col_ra_max, col_dec_min, col_dec_max = [], [], [], [], []\n for idx, row in tqdm(df.iterrows(), desc=\"Adding corner coordinates\", total=len(df)):\n corners = mask_corners(row)\n # String serialization\n str_repr = \";\".join([\"{:.6f},{:.6f}\".format(corners[0][idx], corners[1][idx])\n for idx in range(len(corners[0]))])\n col_corners.append(str_repr)\n # Bounding box in equatorial coordinates\n ra_min, ra_max = corners[0].min(), corners[0].max()\n dec_min, dec_max = corners[1].min(), corners[1].max()\n col_ra_min.append(\"{:.6f}\".format(ra_min))\n col_ra_max.append(\"{:.6f}\".format(ra_max))\n col_dec_min.append(\"{:.6f}\".format(dec_min))\n col_dec_max.append(\"{:.6f}\".format(dec_max))\n df['corners'] = col_corners\n df['ra_min'] = col_ra_min\n df['ra_max'] = col_ra_max\n df['dec_min'] = col_dec_min\n df['dec_max'] = col_dec_max\n return df", "def iter_cells_less_than(self, row_zb: int, col_zb: int) \\\n -> Generator[Tuple[int, int], None, None]:\n # Cell above?\n if (row_zb > 0 and\n self.inequalities_down[row_zb - 1][col_zb] == TOP_LT_BOTTOM):\n other = row_zb - 1, col_zb\n yield other\n yield from self.iter_cells_less_than(*other)\n # Cell below?\n if (row_zb < self.n - 1 and\n self.inequalities_down[row_zb][col_zb] == BOTTOM_LT_TOP):\n other = row_zb + 1, col_zb\n yield other\n yield from self.iter_cells_less_than(*other)\n # Cell left?\n if (col_zb > 0 and\n self.inequalities_right[row_zb][col_zb - 1] == LEFT_LT_RIGHT):\n other = row_zb, col_zb - 1\n yield other\n yield from self.iter_cells_less_than(*other)\n # Cell right?\n if (col_zb < self.n - 1 and\n self.inequalities_right[row_zb][col_zb] == LEFT_GT_RIGHT):\n other = row_zb, col_zb + 1\n yield other\n yield from self.iter_cells_less_than(*other)", "def _cellborders(self, ix, iy, nx, ny, kwargs):\r\n\r\n ret = kwargs.copy()\r\n\r\n def corners(ret):\r\n \"Handle corners of table\"\r\n if ix == 0 and iy == 0:\r\n ret[\"corner_top_left\"] = self.corner_top_left\r\n if ix == nx and iy == 0:\r\n ret[\"corner_top_right\"] = self.corner_top_right\r\n if ix == 0 and iy == ny:\r\n ret[\"corner_bottom_left\"] = self.corner_bottom_left\r\n if ix == nx and iy == ny:\r\n ret[\"corner_bottom_right\"] = self.corner_bottom_right\r\n return ret\r\n\r\n def left_edge(ret):\r\n \"add vertical border along left table edge\"\r\n if ix == 0:\r\n ret[\"border_left\"] = bwidth\r\n return ret\r\n\r\n def top_edge(ret):\r\n \"add border along top table edge\"\r\n if iy == 0:\r\n ret[\"border_top\"] = bwidth\r\n return ret\r\n\r\n def right_edge(ret):\r\n \"add vertical border along right table edge\"\r\n if ix == nx:# and 0 < iy < ny:\r\n ret[\"border_right\"] = bwidth\r\n return ret\r\n\r\n def bottom_edge(ret):\r\n \"add border along bottom table edge\"\r\n if iy == ny:\r\n ret[\"border_bottom\"] = bwidth\r\n return ret\r\n\r\n def cols(ret):\r\n \"Adding vertical borders inside the table\"\r\n if 0 <= ix < nx:\r\n ret[\"border_right\"] = bwidth\r\n return ret\r\n\r\n def rows(ret):\r\n \"Adding horizontal borders inside the table\"\r\n if 0 <= iy < ny:\r\n ret[\"border_bottom\"] = bwidth\r\n return ret\r\n\r\n def head(ret):\r\n \"Add header underline\"\r\n if iy == 0:\r\n # put different bottom line for header\r\n ret[\"border_bottom\"] = bwidth\r\n ret[\"border_bottom_char\"] = headchar\r\n return ret\r\n\r\n\r\n # handle the various border modes\r\n border = self.border\r\n header = self.header\r\n\r\n bwidth = self.border_width\r\n headchar = self.header_line_char\r\n\r\n # use the helper functions to define various\r\n # table \"styles\"\r\n\r\n if border in (\"table\", \"tablecols\",\"cells\"):\r\n ret = bottom_edge(right_edge(top_edge(left_edge(corners(ret)))))\r\n if border in (\"cols\", \"tablecols\", \"cells\"):\r\n ret = cols(right_edge(left_edge(ret)))\r\n if border in (\"incols\"):\r\n ret = cols(ret)\r\n if border in (\"rows\", \"cells\"):\r\n ret = rows(bottom_edge(top_edge(ret)))\r\n if header and not border in (\"none\", None):\r\n ret = head(ret)\r\n\r\n return ret", "def test_custom_corners(self):\n tab = tabl.Tabl()\n tab.set_corner('*')\n string = tab.to_table([['a']])\n self.assertEqual('*-*\\n' + \\\n '|a|\\n' + \\\n '*-*\\n', string)", "def open_value_border_cells(self) -> None:\n\n for row in range(self.rows):\n for col in range(self.columns):\n\n # boolean condition variables:\n cond_if_cell_isnt_opnd = self.cells_list[row][col].value > 0 and \\\n self.cells_list[row][col].state == False\n\n if self.create_check_range_horiz_cells_cond(col):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row, col - 1)\n or\n self.create_open_value_border_cells_cond(row, col + 1)\n ):\n self.open_value_border_cell(row, col)\n\n if self.create_check_range_vert_cells_cond(row):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row - 1, col)\n or\n self.create_open_value_border_cells_cond(row + 1, col)\n ):\n self.open_value_border_cell(row, col)\n\n if self.create_check_range_cross_top_cells_cond(row, col):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row - 1, col - 1)\n or\n self.create_open_value_border_cells_cond(row - 1, col + 1)\n ):\n self.open_value_border_cell(row, col)\n\n if self.create_check_range_cross_bottom_cells_cond(row, col):\n\n if cond_if_cell_isnt_opnd and \\\n (\n self.create_open_value_border_cells_cond(row + 1, col - 1)\n or\n self.create_open_value_border_cells_cond(row + 1, col + 1)\n ):\n self.open_value_border_cell(row, col)", "def _cell_tree_traversal(self, start):\n queue = deque(\n chain(\n self.tiling.cells_in_col(start[0]), self.tiling.cells_in_row(start[1])\n )\n )\n visited = set([start])\n while queue:\n cell = queue.popleft()\n if cell not in visited:\n yield cell\n visited.add(cell)\n queue.extend(self.tiling.cells_in_row(cell[1]))\n queue.extend(self.tiling.cells_in_col(cell[0]))", "def reportBoxes(self):\n res = ''\n for indx in self.shapesTable:\n res += 'Value {0!s} has {1!s} boxes'.format(indx, len(self.shapesTable[indx].boxes))\n return res\n \n # Concurrent version with threadpool does not seem to work - only one thread seems to run at a time, and speed is much less than sequential version", "def get_cell(melb_grid, coordinates, X_coords,Y_coords):\r\n\r\n #Initialize labels for grid rows\r\n grid_rows = {1: 'A', 2: 'B', 3: 'C', 4: 'D'}\r\n\r\n list_match = []\r\n cell = \"\"\r\n\r\n # Case 1: tweet lies ALONG the boundaries on any cell;\r\n # If so, the tweet score will be added either to the left and/or the below adjacent cell\r\n if coordinates[0] in X_coords or coordinates[1] in Y_coords:\r\n for grid_box in melb_grid:\r\n if (coordinates[1] >= grid_box[3] and coordinates[1] <= grid_box[4]) \\\r\n and (coordinates[0] >= grid_box[1] and coordinates[0] <= grid_box[2]):\r\n list_match.append(grid_box[0]) #id\r\n\r\n #print(list_match)\r\n\r\n #case 1.1 - when the tweet point lies ON the intersecting points of 4 cells\r\n # select the left-below cell\r\n if(len(list_match)==4): #matches 4 grid boxes\r\n cell = sorted(list_match, reverse = False)[2]\r\n\r\n #case 1.2 - when the tweet point lies either ON intersecting points of B4,C4, C5\r\n # or ON intersecting points of C2, C3, D3 -- ASSUME tweet belongs to LEFT box\r\n elif(len(list_match)==3):\r\n cell = sorted(list_match, reverse = False)[0]\r\n\r\n #case 1.2 - when the tweet point lies ALONG the boundary connecting 2 grid cells:\r\n # select either left and/or below cell\r\n elif len(list_match) == 2:\r\n if list_match[0][1] == list_match[1][1]: #comparison between top and bottom boxes\r\n cell = max(sorted(list_match, reverse = False))\r\n elif list_match[0][0] == list_match[1][0]: #comparison between left and right boxes\r\n cell = min(sorted(list_match, reverse = False))\r\n elif len(list_match) == 1:\r\n cell = list_match[0]\r\n\r\n #Case 2: when the point doesn't lie on the grid lines but lies within each cell\r\n else:\r\n cell = (grid_rows[sum([1 if coordinates[1] < i else 0 for i in Y_coords])]\r\n + str(sum([1 if coordinates[0] > i else 0 for i in X_coords])))\r\n\r\n #for example: coordiztes[1] = -37.51\r\n #print(\"Tweet Cell \", cell)\r\n #To test, point [144.9,-37.8] should lie on C2 and not B2\r\n\r\n return cell", "def StringBoxRects():\r\n pass", "def tight_fit(tables, text_boxes):\n for i in range(len(tables)):\n boxes_contained = []\n for b in text_boxes:\n if compute_contain(b, tables[i]) > 0.5:\n boxes_contained.append(b)\n if len(boxes_contained) > 0:\n tables[i] = bounding_box(tables[i], boxes_contained)\n return tables", "def bottom_left_fill(data, width, upperbound, debug_mode=False, buffer=0):\n\n free_area = _create_rectangle(0, 0, width, upperbound) # set available area\n total_area = _create_rectangle(0, 0, width, upperbound)\n solns = []\n\n for i in data.data:\n i_id = i[0]\n i_w = i[1] + buffer\n i_h = i[2] + buffer\n\n poly_rep = Polygon.Shapes.Rectangle(i_w, i_h) # polygon representation of this shape, floating in space\n if debug_mode: #debugging method, step through placing one rectangle at a time\n x, y, triangles = no_fill_polygon(total_area, free_area, poly_rep, debug_mode=debug_mode)\n free_area = free_area - _create_rectangle(x, y, i_w, i_h) # calculate new free area\n free_area.simplify()\n filled_area = total_area - free_area\n\n view.view_debug(solns, triangles, filled_area, width, upperbound)\n else:\n x, y = no_fill_polygon(total_area, free_area, poly_rep,\n debug_mode=debug_mode) # calculate position of polygon\n free_area = free_area - _create_rectangle(x, y, i_w, i_h) # calculate new free area\n free_area.simplify()\n filled_area = total_area - free_area\n\n solns.append((i_id, x, y, i_w - buffer, i_h - buffer)) # add soln\n\n\n return main.Solution(solns)", "def countCornerRectangles(self, grid):\n if not grid or not len(grid[0]):\n return 0\n\n if len(grid) == 1 or len(grid[0]) == 1:\n return 0\n\n r, c = len(grid), len(grid[0])\n\n col_dict = collections.defaultdict(set)\n for j in range(c):\n for i in range(r):\n if grid[i][j] == 1:\n col_dict[j].add(i)\n ans = 0\n cols = list(col_dict.keys())\n for c1 in range(len(cols)):\n for c2 in range(0, c1):\n s1, s2 = col_dict[cols[c1]], col_dict[cols[c2]]\n ans += self.combination(len(s1.intersection(s2)), 2)\n\n return ans", "def _borders(self):\r\n nx, ny = self.ncols-1, self.nrows-1\r\n options = self.options\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n cell.reformat(**self._cellborders(ix,iy,nx,ny,options))", "def check_board(self, row, col, dirn):\n\n parts = {}\n if dirn == \"H\":\n left = self.anchor_strings[(row, col)][0]\n if left:\n letts = [(key, self.anchor_strings[key][1]) for key in self.anchor_strings\n if (self.anchor_strings[key][1] and key[0] == row\n and col <= key[1] < col + 9)]\n letts = sorted(letts, key=lambda x: x[0][1])\n if letts:\n parts = {max(t[0][1] + 1 - col, 0):\n chars[1] for t in letts for chars in enumerate(t[1])}\n else:\n left = self.anchor_strings[(row, col)][1]\n else:\n left = self.anchor_strings[(row, col)][2]\n if left:\n letts = [(key, self.anchor_strings[key][3]) for key in self.anchor_strings\n if (self.anchor_strings[key][3] and key[1] == col\n and row <= key[0] < row + 9)]\n letts = sorted(letts, key=lambda x: x[0][0])\n if letts:\n parts = {max(t[0][0] + 1 - row, 0):\n chars[1] for t in letts for chars in enumerate(t[1])}\n else:\n left = self.anchor_strings[(row, col)][3]\n\n if parts:\n words = self.get_list(left, self.graph.contains_lett_patt, [self.rack, parts])\n else:\n words = self.get_list(left, self.graph.contains_lett, [self.rack])\n words = [t for t in words if t[1] != left]\n return words", "def fixMasks(image, table_mask, column_mask):\r\n table_mask = table_mask.reshape(1024,1024).astype(np.uint8)\r\n column_mask = column_mask.reshape(1024,1024).astype(np.uint8)\r\n \r\n #get contours of the mask to get number of tables\r\n contours, table_heirarchy = cv2.findContours(table_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n \r\n table_contours = []\r\n #ref: https://www.pyimagesearch.com/2015/02/09/removing-contours-image-using-python-opencv/\r\n #remove bad contours\r\n\r\n #print(contours)\r\n\r\n for c in contours:\r\n # if the contour is bad, draw it on the mask\r\n\r\n\r\n #if not is_contour_bad(c):\r\n if cv2.contourArea(c) > 2000:\r\n table_contours.append(c)\r\n \r\n if len(table_contours) == 0:\r\n return None\r\n\r\n #ref : https://docs.opencv.org/4.5.2/da/d0c/tutorial_bounding_rects_circles.html\r\n #get bounding box for the contour\r\n \r\n table_boundRect = [None]*len(table_contours)\r\n for i, c in enumerate(table_contours):\r\n polygon = cv2.approxPolyDP(c, 3, True)\r\n table_boundRect[i] = cv2.boundingRect(polygon)\r\n \r\n #table bounding Box\r\n table_boundRect.sort()\r\n \r\n col_boundRects = []\r\n for x,y,w,h in table_boundRect:\r\n \r\n col_mask_crop = column_mask[y:y+h,x:x+w]\r\n \r\n #get contours of the mask to get number of tables\r\n contours, col_heirarchy = cv2.findContours(col_mask_crop, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n #get bounding box for the contour\r\n boundRect = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n polygon = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(polygon)\r\n \r\n #adjusting columns as per table coordinates\r\n boundRect[i] = (boundRect[i][0] + x ,\r\n boundRect[i][1] + y ,\r\n boundRect[i][2],\r\n boundRect[i][3])\r\n \r\n col_boundRects.append(boundRect)\r\n \r\n image = image[...,0].reshape(1024, 1024).astype(np.uint8)\r\n \r\n #draw bounding boxes\r\n color = (0,255,0)\r\n thickness = 4\r\n \r\n for x,y,w,h in table_boundRect:\r\n image = cv2.rectangle(image, (x,y),(x+w,y+h), color, thickness)\r\n \r\n return image, table_boundRect, col_boundRects", "def draw_board(self):\r\n for i in range(9):\r\n for j in range(9):\r\n # Draw black lines to demarkate the 'boxes'\r\n if j%3 == 0 and j != 0:\r\n pygame.draw.line(self.window, BLACK, ((j//3)*180, 0), ((j//3)*180, 540), 4)\r\n if i%3 == 0 and i != 0:\r\n pygame.draw.line(self.window, BLACK, (0, (i//3)*180), (540, (i//3)*180), 4)\r\n \r\n # Draw the cells \r\n self.cells[i][j].draw(BLACK, 1)\r\n\r\n # Don't draw the placeholder 0s on the grid\r\n if self.cells[i][j].value != 0:\r\n self.cells[i][j].display(self.cells[i][j].value, (21+(j*60), (16+(i*60))), (0, 0, 0))\r\n \r\n # Bottom most line\r\n pygame.draw.line(self.window, (0, 0, 0), (0, ((i+1) // 3) * 180), (540, ((i+1) // 3) * 180), 4)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
First determine the column boundaries from the top border, then process rows. Each row may consist of multiple lines; accumulate lines until a row is complete. Call `self.parse_row` to finish the job.
def parse_table(self): # Top border must fully describe all table columns. self.columns = self.parse_columns(self.block[0], 0) self.border_end = self.columns[-1][1] firststart, firstend = self.columns[0] offset = 1 # skip top border start = 1 text_found = None while offset < len(self.block): line = self.block[offset] if self.span_pat.match(line): # Column span underline or border; row is complete. self.parse_row(self.block[start:offset], start, (line.rstrip(), offset)) start = offset + 1 text_found = None elif line[firststart:firstend].strip(): # First column not blank, therefore it's a new row. if text_found and offset != start: self.parse_row(self.block[start:offset], start) start = offset text_found = 1 elif not text_found: start = offset + 1 offset += 1
[ "def __readGrid(self, textLines):\n\t\tcolsIndex = None\n\t\tfor line in textLines:\n\t\t\tline = line.split(\"#\",1)[0].rstrip() # We don't take in account the comments and whitespaces at the end\n\t\t\tif len(line) == 0: continue # If the line is empty, we can skip it\n\n\t\t\t\"\"\"Parse the first line\"\"\"\n\t\t\tif colsIndex == None:\n\t\t\t\tcolsIndex = [(0,len(line.split(\"_\",1)[0])-1)] # give the width of the first column of the lines\n\t\t\t\tif line[0] != \" \" : \n\t\t\t\t\traise ValueError(\"The first line should start with white spaces.\")\n\t\t\t\tfor char, nb in ((label, sum(1 for _ in group)) for label, group in gb(line)):\n\t\t\t\t\tif not char in \" _\":\n\t\t\t\t\t\traise ValueError(\"The first line should only contain white spaces and underscores.\")\n\t\t\t\t\tif char == \" \" and nb > 1 and len(colsIndex) > 1:\n\t\t\t\t\t\traise ValueError(\"The column separator between col \"+str(len(colsIndex)-1)+\" and col \"+str(len(colsIndex))+\" is too wide.\")\n\t\t\t\t\tif char == \"_\":\n\t\t\t\t\t\tcolsIndex.append(((colsIndex[-1][1]+1), (nb+colsIndex[-1][1]+1)))\n\t\t\t\tself.__l = len(colsIndex)-1\n\t\t\t\tself.__values[\"v\"] = [-1]*self.__l\n\t\t\t\tcontinue\n\n\t\t\t\"\"\"Prepare the parsing of other lines\"\"\"\n\t\t\t\"\"\"try:\n\t\t\t\tsplitted_line = [line[x:y] for x,y in colsIndex]\n\t\t\texcept Exception as e:\n\t\t\t\traise e\"\"\"\n\n\t\t\t\"\"\"Parse the last line\"\"\"\n\t\t\tif line[colsIndex[0][1]] != \"|\": \n\t\t\t\tself.__values[\"v\"] = [self.__strToVal(line[x:y],len(self.__barrier[\"v\"])) for x,y in colsIndex[1:]]\n\n\t\t\t\t\"\"\"Parse all the other lines\"\"\"\n\t\t\telse : \n\t\t\t\tbarrier = {\"v\":[], \"h\":[]}\n\t\t\t\tself.__values[\"h\"].append(self.__strToVal(line[0:colsIndex[0][1]], len(colsIndex)-1))\n\t\t\t\tfor x,y in colsIndex[1:] :\n\t\t\t\t\ts = line[x:y]\n\t\t\t\t\tif not (s[0] in \" _\") or len(list(gb(s))) > 1 :\n\t\t\t\t\t\traise ValueError(\"La grille a une erreur ligne \"+str(len(self.__values[\"h\"])))\n\n\t\t\t\t\tif s[0] == '_':\n\t\t\t\t\t\tbarrier[\"h\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"h\"].append(False)\n\n\t\t\t\t\tif line[y] == '|':\n\t\t\t\t\t\tbarrier[\"v\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"v\"].append(False)\n\n\t\t\t\tself.__barrier[\"h\"].append(barrier[\"h\"])\n\t\t\t\tbarrier[\"v\"].pop()\n\t\t\t\tself.__barrier[\"v\"].append(barrier[\"v\"])\n\n\t\tself.__barrier[\"h\"].pop()\n\t\tself.__h = len(self.__barrier[\"v\"])", "def _generate_rows(self):\n margin_str = ' ' * self.column_margin\n # Loop over each data row\n for n, data_row in enumerate(self.data):\n if self.use_row_separators and n > 0:\n # Add row separator before every row except the first\n self._text_lines.append(self._row_separator)\n # Create a list where each element is a cell, represented by\n # a list of lines with its contents\n cells = [\n col.get_cell(data_row[i]) for i, col in enumerate(self.columns)\n if i < len(data_row)\n ]\n # The size of the tallest cell\n max_lines = max(len(cell) for cell in cells) if cells else 1\n # Loop over the columns to do vertical alignment\n for i, col in enumerate(self.columns):\n # Calculate how many lines are \"missing\" from each cell\n # with respect to the tallest\n delta = max_lines - (len(cells[i]) if i < len(cells) else 0)\n if delta > 0:\n if col.v_alignment == Alignment.MIDDLE:\n # Insert half as many missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * (delta // 2)\n elif col.v_alignment == Alignment.BOTTOM:\n # Insert all missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * delta\n for m in range(max_lines):\n row = '│'\n for i, col in enumerate(self.columns):\n row += margin_str\n if i >= len(cells) or m >= len(cells[i]):\n row += col.get_empty_cell()\n else:\n row += cells[i][m]\n row += margin_str + '│'\n self._text_lines.append(row)\n self._text_lines.append(self._bottom)", "def _process_Raw_Lap_Rows(self):\n\n # For each racer we are going to add a list to to lapRowsTime and lapRowsPosition\n # lapRowsTime[0] will contain Racer #1's lap data (may be an empty string).\n\n # Example of self._columnHeaders.split()\n # ['___1___', '___2___', '___3___', '___4___', '___5___',\n # '___6___', '___7___', '___8___', '___9___', '___10__']\n split_columns = self._columnHeaders.split()\n\n max_racers = len(split_columns)\n for index in range(len(split_columns)):\n self.lapRowsTime.append([])\n self.lapRowsPosition.append([])\n\n '''\n For each _lapRowsRaw we are going to parse using a FIXED width, which we\n calculate using the _columnHeaders.\n NOTE - Split is not going to work because it trims: racerSplit = row.split()\n (the empty spaces have meaning).\n\n Need to parse the row\n Example:\n \"3/3.804 1/2.936 7/6.013 2/3.487 4/4.118 6/5.817 10/7.72 5/4.512 8/6.310 9/6.941\"\n Another Example:\n \" 1/20.27 3/20.87 2/19.54 \"\n\n Print line debugging\n Row:' 1/23.70 3/23.00 2/21.27 '\n raceIndex:2 lapWidth:6\n i: 17 '3/23.00'\n lapRowsTime:['27.50', '20.19', '21.93', '24.01', '20.81',\n '19.15', '21.15', '21.07', '21.00', '22.12', '20.87', '20.39']\n lapRowsPos:['3', '3', '2', '3', '3', '3', '3', '2', '2', '2', '3', '3']\n\n print\n print \"Row:'\" + row + \"'\"\n print \"racer_index:\" + str(racer_index) + \" lap_width:\" + str(lap_width)\n print \"index:\", index, \"'\" + row[index:index+lap_width + 1] + \"'\"\n print \"lapRowsTime:\" + str(self.lapRowsTime[racer_index])\n print \"lapRowsPos:\" + str(self.lapRowsPosition[racer_index])\n '''\n # WARNING Special Code - we use the columnHeaders to identify the fixed with\n # that the columns are using.\n # Explanation - Ignoring the first character [1:], find the next empty space.\n lap_width = self._columnHeaders[1:].find(' ') - 1\n\n # Walk through each line of the raw lap times and extract racers position and time.\n for row in self._lapRowsRaw:\n\n index = 1\n racer_index = 0\n while index < len(row):\n if (racer_index >= max_racers):\n raise Exception(\"Error in the _lapRowsRaw resulting in\" +\n \" incorrect parsing (laps for more racers than expected\")\n pos, lap = self._parse_Lap(row[index:index + lap_width + 1])\n\n self.lapRowsPosition[racer_index].append(pos)\n self.lapRowsTime[racer_index].append(lap)\n\n index += lap_width + 2 # +2 to skip the ' ' space.\n racer_index += 1\n\n '''\n Example - note that the white spaces only extend as far right as the last race on the line\n ___1___ ___2___ ___3___ ___4___ ___5___ ___6___ ___7___ ___8___ ___9___ ___10__\n 5/35.95 1/26.24 4/30.95 2/27.01 3/29.63\n 1/17.48 4/18.05 2/17.83 3/17.74\n 1/17.14 2/17.25 3/19.69\n 1/17.61 2/17.11\n 1/20.71\n ------- ------- ------- ------- ------- ------- ------- ------- ------- -------\n '''\n # Create empty records for the rest of the racers in this row\n # even if it ended early.\n while racer_index < max_racers:\n self.lapRowsPosition[racer_index].append('')\n self.lapRowsTime[racer_index].append('')\n racer_index += 1", "def parse_lines(self, lines):\n row_tree = None\n for line in lines:\n if not line:\n continue\n m = self.row_regex.search(line)\n if m is None:\n continue\n groups = list(m.groups())\n name_raw = str(groups.pop(0))\n name = name_raw.strip()\n name = re.sub('\\s+', ' ', name)\n data = [float(v.strip()) for v in groups]\n row = RowData(\n name, **{n: d for n, d in zip(self.data_col_names, data)})\n self.rows.append(row)\n\n ind = self.subcategory_indention\n depth = int((len(name_raw)-len(name_raw.lstrip(ind)))/len(ind))\n\n if row_tree is None:\n row_tree = [row]\n elif len(row_tree) < depth:\n raise ValueError(\n 'A hirarchical level was skipped! Found element of '\n 'depth {}. However parent element is of depth '\n '{}.'.format(depth, len(row_tree)-1))\n elif len(row_tree) >= depth:\n row_tree = row_tree[:depth]\n try:\n parent_row = row_tree[-1]\n RowData.set_child_parent_relation(row, parent_row)\n except IndexError:\n pass\n row_tree += [row]", "def _fill_next_rows(self, rows: _Rows, line: int) -> _Rows:\n unmerged_rows = {}\n\n for column, cell in enumerate(rows[line]):\n if isinstance(cell, TableCell) and cell.rowspan > 1:\n nb_lines = cell.rowspan - 1\n lines = [cell]\n if \"\\n\" in cell:\n lines = cell.replace(\"\\n\", \"<fg=default;bg=default>\\n</>\").split(\n \"\\n\"\n )\n if len(lines) > nb_lines:\n nb_lines = cell.count(\"\\n\")\n\n rows[line][column] = TableCell(\n lines[0], colspan=cell.colspan, style=cell.style\n )\n\n # Create a two dimensional dict (rowspan x colspan)\n placeholder = dict(\n [(k, {}) for k in range(line + 1, line + 1 + nb_lines)]\n )\n for k, v in unmerged_rows.items():\n if k in placeholder:\n for l, m in unmerged_rows[k].items():\n if l in placeholder[k]:\n placeholder[k][l].update(m)\n else:\n placeholder[k][l] = m\n else:\n placeholder[k] = v\n\n unmerged_rows = placeholder\n\n for unmerged_row_key, _ in unmerged_rows.items():\n value = \"\"\n if unmerged_row_key - line < len(lines):\n value = lines[unmerged_row_key - line]\n\n unmerged_rows[unmerged_row_key][column] = TableCell(\n value, colspan=cell.colspan, style=cell.style\n )\n if nb_lines == unmerged_row_key - line:\n break\n\n for unmerged_row_key, unmerged_row in unmerged_rows.items():\n # we need to know if unmerged_row will be merged or inserted into rows\n if (\n unmerged_row_key < len(rows)\n and isinstance(rows[unmerged_row_key], list)\n and (\n (\n self._get_number_of_columns(rows[unmerged_row_key])\n + self._get_number_of_columns(\n list(unmerged_rows[unmerged_row_key].values())\n )\n )\n <= self._number_of_columns\n )\n ):\n # insert cell into row at cell_key position\n for cell_key, cell in unmerged_row.items():\n rows[unmerged_row_key].insert(cell_key, cell)\n else:\n row = self._copy_row(rows, unmerged_row_key - 1)\n for column, cell in unmerged_row.items():\n if len(cell):\n row[column] = unmerged_row[column]\n\n rows.insert(unmerged_row_key, row)\n\n return rows", "def _initial_Processing_Raw_Lines(self):\n pacedata_included = None # If there is pace data, this will be used as a counter.\n\n lapData = False\n for index in range(len(self._singleRaceLines)):\n\n line = self._singleRaceLines[index]\n\n # Look for the column data\n if not lapData and (line.find('__10') != -1):\n # ___1___ ___2___ ___3___ ___4___ ___5___\n if not line.find('__1__'):\n raise Exception(\"The column header data spilled into a new line\")\n self._columnHeaders = line.strip('\\r\\n')\n lapData = True\n\n # Check to see if pace data is mixed in - this is a strong indicator.\n index = self._singleRaceLines.index(line)\n\n pacedata_included = self._check_for_pace_data(index)\n\n # Get the laps in row format\n elif lapData:\n # If we are the end of the lap data\n if (line.find('-----') != -1):\n # Example: ' ------- ------- ------- ------- '\n index += 2 # WARNING - This is for additional laps logic below.\n break\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n # Special code for dealing with pace data.\n if (pacedata_included is None): # Common case (no pace data)\n # Warning - we dont want to blanket strip this (white space matters)\n self._lapRowsRaw.append(line.strip('\\r\\n'))\n else: # Special case (pace data mixed in).\n if (pacedata_included % 3 == 0):\n self._lapRowsRaw.append(line.strip('\\r\\n'))\n pacedata_included += 1\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n # Get race header data.\n if not lapData:\n # 3/17.20 2/20.37 10/18.1 1/20.19\n self._raceHeaderData_RAW.append(line)\n\n # ===================================================\n # Check to see if there additional racer data - (MORE THAN 10 RACERS)\n # ===================================================\n\n # Starting at the index, lets look for another column row.\n found_additional_laps = False\n additional_lap_index = 0\n\n for trail_index in range(index, len(self._singleRaceLines)):\n line = self._singleRaceLines[trail_index].strip('\\r\\n')\n\n if ((not found_additional_laps) and (line.find('__11__') != -1)):\n found_additional_laps = True\n self._columnHeaders += line\n if (pacedata_included is not None):\n pacedata_included = 0\n\n elif found_additional_laps:\n if (line.find('-----') != -1):\n # Indicates there is no more data\n # Example: ' ------- ------- ------- ------- '\n break\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n # Special code for dealing with pace data.\n if (pacedata_included is None): # Common case (no pace data)\n self._lapRowsRaw[additional_lap_index] += line\n additional_lap_index += 1\n else: # Special case (pace data mixed in)\n if (pacedata_included % 3 == 0):\n self._lapRowsRaw[additional_lap_index] += line\n additional_lap_index += 1\n pacedata_included += 1\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++", "def _process_row(self, row):\n # Must be overridden.", "def _readRowData(self, unit_data, file_line):\r\n end_line = int(unit_data[file_line].strip())\r\n file_line += 1\r\n try:\r\n # Load the geometry data\r\n for i in range(file_line, end_line + file_line):\r\n chain = unit_data[i][0:10].strip()\r\n elev = unit_data[i][10:20].strip()\r\n rough = unit_data[i][20:30].strip()\r\n\r\n self.row_data['main'].addRow(\r\n {rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.ROUGHNESS: rough},\r\n # We don't need to make backup copies here. If it fails the\r\n # load fails anyway and this will just really slow us down\r\n no_copy=True\r\n )\r\n\r\n except NotImplementedError:\r\n logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')\r\n raise\r\n\r\n return end_line + file_line", "def sort_into_lines(result):\n # the sorted data will be grouped into each line\n lines_of_table = {}\n wait_list = []\n column_wait_list = []\n current_bottom = 0\n for cell in result:\n if cell[0] == 1: # if this is a row title\n cells_in_line = [cell]\n current_bottom = cell[4]\n current_top = cell[2]\n no_row_title = [[-1, -1, -1, -1, -1, -1, \"-1\"]]\n no_row_bottom = 0\n for c in wait_list: # handling wait_list\n if c[4] - current_top < 5:\n if c[0] == 3:\n no_row_bottom = no_row_bottom + c[4]\n no_row_title.append(c)\n else:\n column_wait_list.append(c)\n else:\n cells_in_line.append(c)\n if len(column_wait_list) > 0:\n top = column_wait_list[0][2]\n column_title = [column_wait_list[0]]\n lines_of_table[top] = column_title\n for col in column_wait_list[1:]:\n if abs(top - col[2]) < 0.6 * (col[4] - col[2]):\n lines_of_table[top].append(col)\n else:\n top = col[2]\n column_title = [col]\n lines_of_table[top] = column_title\n if no_row_title.__len__() > 1:\n lines_of_table[no_row_bottom / no_row_title.__len__()] = no_row_title\n lines_of_table[current_bottom] = cells_in_line\n wait_list = []\n else: # have to decide which row it belongs to\n if current_bottom == 0: # if no row has been detected, then go to wait list\n wait_list.append(cell)\n else: # if there is one active row, check whether belongs to it or not\n if abs(current_bottom - cell[4]) < 0.6 * (cell[4] - cell[2]):\n lines_of_table[current_bottom].append(cell)\n else:\n wait_list.append(cell)\n return lines_of_table", "def _row_iter(self, upper_row):\n row = [x-1 for x in upper_row[1:]]\n row_len = len(row)\n pos = 0\n while pos >= 0:\n if pos == row_len:\n yield row[:]\n pos -= 1\n continue\n # If it would create an invalid entry, backstep\n if ( pos > 0 and (row[pos] >= row[pos-1] \\\n or (self._strict and row[pos] == row[pos-1]-1)) ) \\\n or row[pos] >= upper_row[pos] \\\n or (self._k is not None and row[pos] >= self._k):\n row[pos] = upper_row[pos+1] - 1\n pos -= 1\n continue\n row[pos] += 1\n pos += 1", "def break_rows(rows):\n rows_to_break = []\n for i in rows:\n try:\n for j in range(NUM_COLUMNS):\n if not BOARD[i][j].full:\n raise MyException\n except MyException:\n continue\n insert_sorted(i, rows_to_break)\n if rows_to_break:\n num_rows_to_break = len(rows_to_break)\n rows_to_break.append(0)\n blit_rect = BOARD_RECT.inflate(- BORDER_DISTANCE * 2, - BORDER_DISTANCE * 2)\n blit_rect.move_ip(0, rows_to_break[0] * ROW_STEP)\n for k in range(num_rows_to_break):\n dist = rows_to_break[k] - rows_to_break[k + 1]\n for i in reversed(range(rows_to_break[k + 1] + 2 + k, rows_to_break[k] + 1 + k)):\n for j in range(NUM_COLUMNS):\n BOARD[i][j].full = BOARD[i - k - 1][j].full\n blit_rect.move_ip(0, - dist * ROW_STEP)\n blit_rect.height = dist * ROW_STEP - BORDER_DISTANCE * 2\n SCREEN.blit(SCREEN, blit_rect.move(0, (k + 1) * ROW_STEP), blit_rect)\n for i in range(num_rows_to_break + 1):\n for j in range(NUM_COLUMNS):\n BOARD[i][j].full = False\n blit_rect.height = num_rows_to_break * ROW_STEP\n blit_rect.top = SQUARE_OFFSET[1]\n SCREEN.blit(BACKGROUND, blit_rect, blit_rect)\n pygame.display.update(BOARD_RECT)", "def process_rows(self, row_fn, init_fn=None, final_fn=None):\n self._impl.process_rows(row_fn, init_fn, final_fn)", "def _readRowData(self, unit_data, file_line):\r\n end_line = int(unit_data[file_line].strip())\r\n file_line += 1\r\n try:\r\n # Load the geometry data\r\n for i in range(file_line, end_line + file_line):\r\n chain = unit_data[i][0:10].strip()\r\n elev = unit_data[i][10:20].strip()\r\n rough = unit_data[i][20:30].strip()\r\n panel = unit_data[i][30:35].strip()\r\n rpl = unit_data[i][35:40].strip()\r\n bank = unit_data[i][40:50].strip()\r\n east = unit_data[i][50:60].strip()\r\n north = unit_data[i][60:70].strip()\r\n deact = unit_data[i][70:80].strip()\r\n special = unit_data[i][80:90].strip()\r\n\r\n if east == '':\r\n east = None\r\n if north == '':\r\n north = None\r\n if rpl == '':\r\n rpl = 1.000\r\n\r\n self.row_data['main'].addRow(\r\n {rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.ROUGHNESS: rough,\r\n rdt.RPL: rpl, rdt.PANEL_MARKER: panel, rdt.BANKMARKER: bank,\r\n rdt.EASTING: east, rdt.NORTHING: north,\r\n rdt.DEACTIVATION: deact, rdt.SPECIAL: special\r\n },\r\n # We don't need to make backup copies here. If it fails the\r\n # load fails anyway and this will just really slow us down\r\n no_copy=True\r\n )\r\n\r\n except NotImplementedError:\r\n logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')\r\n raise\r\n\r\n return end_line + file_line", "def check_columns(self, lines, first_line, columns):\r\n # \"Infinite\" value for a dummy last column's beginning, used to\r\n # check for text overflow:\r\n columns.append((sys.maxint, None))\r\n lastcol = len(columns) - 2\r\n # combining characters do not contribute to the column width\r\n lines = [strip_combining_chars(line) for line in lines]\r\n\r\n for i in range(len(columns) - 1):\r\n start, end = columns[i]\r\n nextstart = columns[i+1][0]\r\n offset = 0\r\n for line in lines:\r\n if i == lastcol and line[end:].strip():\r\n text = line[start:].rstrip()\r\n new_end = start + len(text)\r\n columns[i] = (start, new_end)\r\n main_start, main_end = self.columns[-1]\r\n if new_end > main_end:\r\n self.columns[-1] = (main_start, new_end)\r\n elif line[end:nextstart].strip():\r\n raise TableMarkupError('Text in column margin '\r\n 'in table line %s.' % (first_line+offset+1),\r\n offset=first_line+offset)\r\n offset += 1\r\n columns.pop()", "def split_columns(row):\n cells = split_rows(row,vertical=False)\n merged_cells = []\n for C in cells:\n\n C.sort(key=lambda x:(top(x),left(x)))\n for i in range(len(C)-1):\n merge_text_fields(C[-2-i],C[-1])\n\n if verbose and len(C)>1:\n print(\"\\n------ Unexpected merge: perhaps a bad value for vskip? ------\")\n print(C[-1]['text'])\n\n merged_cells.append(C[-1])\n\n return merged_cells", "def validate_lines(grid, expected_height, expected_width):\n # String of exceptions that will be built as/if they occur.\n reports = \"\"\n valid_chars = (\"X\", \".\")\n try: \n # List of offenses and specific locations.\n bad_chars = []\n for row in range(len(grid)):\n # Check last character of each line is a \"\\n\"\n if grid[row][-1] != \"\\n\":\n bad_chars.append(\"Line %s does not end with \\n\" % str(row + 1))\n for char in range(len(grid[row]) - 1):\n # Check all other characters are valid.\n if grid[row][char] not in valid_chars:\n bad_chars.append(grid[row][char]) \n # True if bad_chars isn't empty. \n if bad_chars:\n raise BadCharacter(bad_chars)\n except BadCharacter as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n # List of offenses and specific locations.\n bad_lines = []\n for row in range(len(grid)):\n # Ignore last element as should be \"\\n\". Checked previously.\n actual_width = len(grid[row]) - 1 \n if actual_width < expected_width or actual_width > expected_width: \n bad_lines.append((actual_width, expected_width, row + 1))\n # True if bad_lines isn't empty.\n if bad_lines:\n raise BadLineLength(tuple(bad_lines)) \n except BadLineLength as error:\n reports += str(error)\n \n # Store actual height \n actual_height = len(grid)\n \n try:\n if actual_height > expected_height:\n raise TooManyLines(actual_height, expected_height)\n except TooManyLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n if actual_height < expected_height:\n raise TooFewLines(actual_height, expected_height) \n except TooFewLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n # True if reports isn't empty. \n if reports:\n print \"File format is invalid. Errors found:\\n\"\n print reports\n else:\n print \"File format okay\\n\"", "def _process_Raw_Header_Rows(self):\n\n #\n # Step 1 - is to get the general race information.\n #\n if len(self._raceHeaderData_RAW) < 5:\n raise Exception(\"The header for this race is malformed:%s\" % self._raceHeaderData_RAW)\n self.date = self._parse_Header_Date(self._raceHeaderData_RAW[0])\n\n self.trackName = self._raceHeaderData_RAW[2].strip()\n\n race_class_raw, self.roundNumber, self.raceNumber = \\\n self._parse_Class_And_Race_Data(self._raceHeaderData_RAW[4])\n\n # Extract the main event and main event round info from the class data.\n # Example: race classes often contain information like \"Mod Buggy A-main\"\n self.raceClass, self.mainEvent, self.mainEventRoundNum, self.mainEventParsed = \\\n self._parse_Class_Main_Event_Info(race_class_raw)\n\n #\n # Step 2 - is to process the general race results for each racer.\n #\n individualResult = self._raceHeaderData_RAW[7:-1]\n finalRacePosition = 0\n\n '''\n We tackle this part in several distinct peices.\n\n 1. Starting with the line:\n 'Fname RacerLastName\\t\\t\\t#9 \\t\\t26\\t\\t 8:07.943\\t\\t 17.063\\t\\t 6.008\\n'\n\n 2. We break up the line based on the '#'\n 'Fname RacerLastName' and '#9 \\t\\t26\\t\\t 8:07.943\\t\\t 17.063\\t\\t 6.008\\n'\n\n 3. Then we perform a split on the rest of the data\n ['#9', '26', '8:07.943', '17.063', '6.008']\n\n We must do additional checking because the final three columns are not\n guaranteed to be there.\n '''\n for line in individualResult:\n carnum_start_index = line.rfind(\"#\")\n finalRacePosition += 1\n driver = line[:carnum_start_index].strip()\n\n # Cut off the racer names to simplify things.\n racedata = line[carnum_start_index:]\n lineList = racedata.split()\n\n carRaw = lineList[0]\n if (carRaw[0] != '#'):\n raise Exception(\"Incorrect format for header data, execting a '#' in the car number, line: \" + line)\n car = int(carRaw[1:])\n\n laps = int(lineList[1])\n\n # WARNING - The following fields may not be present.\n racetime = lineList[2]\n if (line.find(':') <= 0): # Checking to see if the racer even has a race time.\n racetime = ''\n\n fastlap = ''\n behind = ''\n if (len(lineList) >= 4):\n fastlap = lineList[3]\n if len(lineList) == 5:\n behind = lineList[4]\n\n self.raceHeaderData.append({\"Driver\": driver,\n \"Car#\": car,\n \"Laps\": laps,\n \"RaceTime\": racetime,\n \"Fast Lap\": fastlap,\n \"Behind\": behind,\n \"Final Position\": finalRacePosition})", "def test_parse_row(self):\n rows = produce_rows_lst()\n parsed_rows = [AS_Rank_Website_Parser()._parse_row(row) for row in rows]\n\n # The row should only have 5 elements that represents the 5 columns#\n for parsed_row in parsed_rows:\n assert len(parsed_row) == 5\n\n # Each element within the row should be a string\n for elem in parsed_row:\n assert isinstance(elem, str)\n\n # The fourth element (country) should only have 2 letters\n assert len(parsed_row[3]) == 2\n\n # Verify that the elements that should be numbers are numbers\n assert parsed_row[0].isdigit()\n assert parsed_row[1].isdigit()\n assert parsed_row[4].isdigit()", "def _generate_lines(self):\r\n for iy in range(self.nrows):\r\n cell_row = [col[iy] for col in self.worktable]\r\n # this produces a list of lists, each of equal length\r\n cell_data = [cell.get() for cell in cell_row]\r\n cell_height = min(len(lines) for lines in cell_data)\r\n for iline in range(cell_height):\r\n yield ANSIString(\"\").join(_to_ansi(celldata[iline] for celldata in cell_data))", "def get_lines_in_reading_order(trp_lines: Iterable[trp.Line]) -> List[trp.Line]:\n columns = []\n lines = []\n\n for item in trp_lines:\n column_found = False\n bbox_left = item.geometry.boundingBox.left\n bbox_right = item.geometry.boundingBox.left + item.geometry.boundingBox.width\n bbox_centre = (bbox_left + bbox_right) / 2\n for index, column in enumerate(columns):\n column_centre = (column[\"left\"] + column[\"right\"]) / 2\n if (bbox_centre > column[\"left\"] and bbox_centre < column[\"right\"]) or (\n column_centre > bbox_left and column_centre < bbox_right\n ):\n # BBox appears inside the column\n lines.append([index, item])\n column_found = True\n break\n if not column_found:\n columns.append(\n {\n \"left\": item.geometry.boundingBox.left,\n \"right\": item.geometry.boundingBox.left + item.geometry.boundingBox.width,\n }\n )\n lines.append([len(columns) - 1, item])\n lines.sort(key=lambda x: x[0])\n return list(map(lambda x: x[1], lines))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the text `lines` of a row, parse it and append to `self.table`. The row is parsed according to the current column spec (either `spanline` if provided or `self.columns`). For each column, extract text from each line, and check for text in column margins. Finally, adjust for insignificant whitespace.
def parse_row(self, lines, start, spanline=None): if not (lines or spanline): # No new row, just blank lines. return if spanline: columns = self.parse_columns(*spanline) span_offset = spanline[1] else: columns = self.columns[:] span_offset = start self.check_columns(lines, start, columns) row = self.init_row(columns, start) for i in range(len(columns)): start, end = columns[i] cellblock = lines.get_2D_block(0, start, len(lines), end) cellblock.disconnect() # lines in cell can't sync with parent cellblock.replace(self.double_width_pad_char, '') row[i][3] = cellblock self.table.append(row)
[ "def __readGrid(self, textLines):\n\t\tcolsIndex = None\n\t\tfor line in textLines:\n\t\t\tline = line.split(\"#\",1)[0].rstrip() # We don't take in account the comments and whitespaces at the end\n\t\t\tif len(line) == 0: continue # If the line is empty, we can skip it\n\n\t\t\t\"\"\"Parse the first line\"\"\"\n\t\t\tif colsIndex == None:\n\t\t\t\tcolsIndex = [(0,len(line.split(\"_\",1)[0])-1)] # give the width of the first column of the lines\n\t\t\t\tif line[0] != \" \" : \n\t\t\t\t\traise ValueError(\"The first line should start with white spaces.\")\n\t\t\t\tfor char, nb in ((label, sum(1 for _ in group)) for label, group in gb(line)):\n\t\t\t\t\tif not char in \" _\":\n\t\t\t\t\t\traise ValueError(\"The first line should only contain white spaces and underscores.\")\n\t\t\t\t\tif char == \" \" and nb > 1 and len(colsIndex) > 1:\n\t\t\t\t\t\traise ValueError(\"The column separator between col \"+str(len(colsIndex)-1)+\" and col \"+str(len(colsIndex))+\" is too wide.\")\n\t\t\t\t\tif char == \"_\":\n\t\t\t\t\t\tcolsIndex.append(((colsIndex[-1][1]+1), (nb+colsIndex[-1][1]+1)))\n\t\t\t\tself.__l = len(colsIndex)-1\n\t\t\t\tself.__values[\"v\"] = [-1]*self.__l\n\t\t\t\tcontinue\n\n\t\t\t\"\"\"Prepare the parsing of other lines\"\"\"\n\t\t\t\"\"\"try:\n\t\t\t\tsplitted_line = [line[x:y] for x,y in colsIndex]\n\t\t\texcept Exception as e:\n\t\t\t\traise e\"\"\"\n\n\t\t\t\"\"\"Parse the last line\"\"\"\n\t\t\tif line[colsIndex[0][1]] != \"|\": \n\t\t\t\tself.__values[\"v\"] = [self.__strToVal(line[x:y],len(self.__barrier[\"v\"])) for x,y in colsIndex[1:]]\n\n\t\t\t\t\"\"\"Parse all the other lines\"\"\"\n\t\t\telse : \n\t\t\t\tbarrier = {\"v\":[], \"h\":[]}\n\t\t\t\tself.__values[\"h\"].append(self.__strToVal(line[0:colsIndex[0][1]], len(colsIndex)-1))\n\t\t\t\tfor x,y in colsIndex[1:] :\n\t\t\t\t\ts = line[x:y]\n\t\t\t\t\tif not (s[0] in \" _\") or len(list(gb(s))) > 1 :\n\t\t\t\t\t\traise ValueError(\"La grille a une erreur ligne \"+str(len(self.__values[\"h\"])))\n\n\t\t\t\t\tif s[0] == '_':\n\t\t\t\t\t\tbarrier[\"h\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"h\"].append(False)\n\n\t\t\t\t\tif line[y] == '|':\n\t\t\t\t\t\tbarrier[\"v\"].append(True)\n\t\t\t\t\telse :\n\t\t\t\t\t\tbarrier[\"v\"].append(False)\n\n\t\t\t\tself.__barrier[\"h\"].append(barrier[\"h\"])\n\t\t\t\tbarrier[\"v\"].pop()\n\t\t\t\tself.__barrier[\"v\"].append(barrier[\"v\"])\n\n\t\tself.__barrier[\"h\"].pop()\n\t\tself.__h = len(self.__barrier[\"v\"])", "def parse_lines(self, lines):\n row_tree = None\n for line in lines:\n if not line:\n continue\n m = self.row_regex.search(line)\n if m is None:\n continue\n groups = list(m.groups())\n name_raw = str(groups.pop(0))\n name = name_raw.strip()\n name = re.sub('\\s+', ' ', name)\n data = [float(v.strip()) for v in groups]\n row = RowData(\n name, **{n: d for n, d in zip(self.data_col_names, data)})\n self.rows.append(row)\n\n ind = self.subcategory_indention\n depth = int((len(name_raw)-len(name_raw.lstrip(ind)))/len(ind))\n\n if row_tree is None:\n row_tree = [row]\n elif len(row_tree) < depth:\n raise ValueError(\n 'A hirarchical level was skipped! Found element of '\n 'depth {}. However parent element is of depth '\n '{}.'.format(depth, len(row_tree)-1))\n elif len(row_tree) >= depth:\n row_tree = row_tree[:depth]\n try:\n parent_row = row_tree[-1]\n RowData.set_child_parent_relation(row, parent_row)\n except IndexError:\n pass\n row_tree += [row]", "def check_columns(self, lines, first_line, columns):\r\n # \"Infinite\" value for a dummy last column's beginning, used to\r\n # check for text overflow:\r\n columns.append((sys.maxint, None))\r\n lastcol = len(columns) - 2\r\n # combining characters do not contribute to the column width\r\n lines = [strip_combining_chars(line) for line in lines]\r\n\r\n for i in range(len(columns) - 1):\r\n start, end = columns[i]\r\n nextstart = columns[i+1][0]\r\n offset = 0\r\n for line in lines:\r\n if i == lastcol and line[end:].strip():\r\n text = line[start:].rstrip()\r\n new_end = start + len(text)\r\n columns[i] = (start, new_end)\r\n main_start, main_end = self.columns[-1]\r\n if new_end > main_end:\r\n self.columns[-1] = (main_start, new_end)\r\n elif line[end:nextstart].strip():\r\n raise TableMarkupError('Text in column margin '\r\n 'in table line %s.' % (first_line+offset+1),\r\n offset=first_line+offset)\r\n offset += 1\r\n columns.pop()", "def insert_lines(self, row=None, lines=1):\n if row is None:\n row = self.term_cursor[1]\n else:\n row = self.scrollregion_start\n\n if lines == 0:\n lines = 1\n\n while lines > 0:\n self.term.insert(row, self.empty_line())\n self.term.pop(self.scrollregion_end)\n lines -= 1", "def _generate_rows(self):\n margin_str = ' ' * self.column_margin\n # Loop over each data row\n for n, data_row in enumerate(self.data):\n if self.use_row_separators and n > 0:\n # Add row separator before every row except the first\n self._text_lines.append(self._row_separator)\n # Create a list where each element is a cell, represented by\n # a list of lines with its contents\n cells = [\n col.get_cell(data_row[i]) for i, col in enumerate(self.columns)\n if i < len(data_row)\n ]\n # The size of the tallest cell\n max_lines = max(len(cell) for cell in cells) if cells else 1\n # Loop over the columns to do vertical alignment\n for i, col in enumerate(self.columns):\n # Calculate how many lines are \"missing\" from each cell\n # with respect to the tallest\n delta = max_lines - (len(cells[i]) if i < len(cells) else 0)\n if delta > 0:\n if col.v_alignment == Alignment.MIDDLE:\n # Insert half as many missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * (delta // 2)\n elif col.v_alignment == Alignment.BOTTOM:\n # Insert all missing lines at the top\n cells[i][0:0] = [col.get_empty_cell()] * delta\n for m in range(max_lines):\n row = '│'\n for i, col in enumerate(self.columns):\n row += margin_str\n if i >= len(cells) or m >= len(cells[i]):\n row += col.get_empty_cell()\n else:\n row += cells[i][m]\n row += margin_str + '│'\n self._text_lines.append(row)\n self._text_lines.append(self._bottom)", "def assert_lines_in_text(text, lines,\n remove_white_spaces=True, remove_newline=True):\n filtered_lines = lines\n if remove_white_spaces:\n text = text.replace(\" \", \"\")\n filtered_lines = filtered_lines.replace(\" \", \"\")\n if remove_newline:\n text = text.replace(os.linesep,\"\")\n filtered_lines = filtered_lines.replace(os.linesep,\"\")\n assert text.find(filtered_lines) >= 0, \\\n \"Lines:\\n %s\\n are not found\" % (lines)", "def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)", "def _wrap(self, availWidth):\n\n self._lines = []\n minWidthRequired = 0\n\n if len(self._prewrapLines) == 0:\n return minWidthRequired\n\n spaceWidth = self._fontManager.textWidth(\" \", self._fontSize)\n\n tempLines = self._prewrapLines\n currentTempLine = 0\n #logger.debug(\"TableText::_wrap> availWidth: \" + str(availWidth) + \", tempLines: \" + str(tempLines))\n for currentTempLine, tempLine in enumerate(tempLines):\n tempLineWidth = self._fontManager.textWidth(tempLine, self._fontSize)\n #logger.debug(\"TableText::_wrap> tempLine: \" + tempLine + \", tempLineWidth: \" + str(tempLineWidth))\n\n if tempLineWidth <= availWidth:\n # easy case: the entire line fits within availWidth\n\n #logger.debug(\"TableText::_wrap> tempLineWidth <= availWidth\")\n self._lines.append(tempLine)\n minWidthRequired = tempLineWidth\n else:\n # the line needs to be wrapped in order to fit in availWidth\n # break the line into tokens, each token is a word or number or a punctuation character\n\n tempWords = re.split(\"(\\W)\", tempLine)\n totalLinesHeight = len(self._lines) * self._lineHeight\n while len(tempWords) > 0 and totalLinesHeight < self._maxCellHeight:\n #logger.debug(\"TableText::_wrap> starting new line. Words left: \" + str(tempWords))\n currentLineWords = []\n remainingWidth = availWidth\n\n fillingCurrentLine = True\n # TODO: remove any leading spaces\n\n while fillingCurrentLine:\n tempWord = tempWords.pop(0)\n\n # reportlab doesn't handle \\t character. replace with space\n if tempWord == '\\t':\n tempWord = ' '\n\n #start = time.time()\n tempWordWidth = self._fontManager.textWidth(tempWord, self._fontSize)\n #finish = time.time()\n #stringWidthTimes.append(finish-start)\n\n\n #addSpace = False\n #logger.debug(\"TableText::_wrap> word: \" + tempWord + \", wordWidth: \" + str(tempWordWidth) + \", remainingWidth: \" + str(remainingWidth))\n if len(currentLineWords) > 0:\n tempWordWidth = tempWordWidth + spaceWidth\n #addSpace = True\n\n if tempWordWidth <= remainingWidth:\n # temp word can fit in the remaining space\n #logger.debug(\"TableText::_wrap> can fit within remaining space\")\n\n #if addSpace:\n #\tcurrentLineWords.append(\" \")\n currentLineWords.append(tempWord)\n remainingWidth = remainingWidth - tempWordWidth\n elif tempWordWidth <= availWidth:\n # temp word cannot fit in the remaining space, but can fit on a new line\n #logger.debug(\"TableText::_wrap> cannot fit within remaining space, but can fit on next line\")\n\n tempWords.insert(0, tempWord)\n remainingWidth = 0\n fillingCurrentLine = False\n else:\n # temp word cannot fit in the remaining space, nor can it fit on a new line\n # hard-break a segment off the word that will fit in the remaining space\n #logger.debug(\"TableText::_wrap> cannot fit within remaining space, and cannot fit on next line\")\n\n #if addSpace:\n #\tremainingWidth = remainingWidth - spaceWidth\n firstSegment, restOfWord = self._wrapWord(tempWord, remainingWidth, wordWidth = tempWordWidth)\n #logger.debug(\"TableText::_wrap> broke word \" + tempWord + \" into: \" + firstSegment + \" and \" + restOfWord)\n tempWords.insert(0, restOfWord)\n #if addSpace:\n #\tcurrentLineWords.append(\" \")\n currentLineWords.append(firstSegment)\n fillingCurrentLine = False\n\n if len(tempWords) == 0:\n # we're done filling the current line, given that there are no more words\n fillingCurrentLine = False\n\n currentLine = \"\".join(currentLineWords)\n self._lines.append(currentLine)\n totalLinesHeight = len(self._lines) * self._lineHeight\n minWidthRequired = max(minWidthRequired, availWidth - remainingWidth)\n\n # check to see if we need to truncate the cell's contents\n if (len(self._lines) * self._lineHeight) >= self._maxCellHeight:\n break\n\n if (currentTempLine + 1) < len(tempLines):\n # we truncated\n percentageShown = (100.0 * float(currentTempLine) / float(len(tempLines)))\n logger.info(\"TableText::_wrap> truncated cell contents. %s%% shown.\" % percentageShown)\n # TODO: this needs to be internationalized\n self._lines.append(\"... Truncated. %s%% shown.\" % percentageShown)\n\n logger.debug(\"TableText::_wrap> minWidthRequired: \" + str(minWidthRequired) + \", self._lines: \" + str(self._lines))\n return minWidthRequired", "def remove_lines(self, row=None, lines=1):\n if row is None:\n row = self.term_cursor[1]\n else:\n row = self.scrollregion_start\n\n if lines == 0:\n lines = 1\n\n while lines > 0:\n self.term.pop(row)\n self.term.insert(self.scrollregion_end, self.empty_line())\n lines -= 1", "def parse_row(line):\n\n if type(line) is not str:\n raise TypeError('line must be a non-empty string.')\n if not line.strip():\n raise ValueError('line must be a non-empty string.')\n\n row_instructions = line[line.index(':') + 2 :]\n\n number = _find_first_num(line)\n\n side = None\n if re.search('rs|right side', line, re.IGNORECASE):\n side = 'RS'\n elif re.search('ws|wrong side', line, re.IGNORECASE):\n side = 'WS'\n\n row = Row([Annotation(row_instructions)], number, side)\n\n if re.search(IN_ROW_REPEAT_REGEX, line, re.IGNORECASE):\n return Row(parse_in_row_repeat(row_instructions), number, side)\n\n return row", "def test_multi_line(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),\n ('|', ' Column One ', '|', ' ', '|', ' ', '|'),\n ]\n assert actual == expected", "def makeTextCell(table, span, widths, heights, use_headers):\n width = getTotalSpanWidth(span, widths)\n height = getTotalSpanHeight(span, heights)\n text_row = span[0][0]\n text_column = span[0][1]\n text = table[text_row][text_column]\n\n lines = text.split(\"\\n\")\n for i in range(len(lines)):\n width_difference = width - len(lines[i])\n lines[i] = lines[i] + lineBreak(width_difference, \" \")\n\n height_difference = height - len(lines)\n empty_lines = []\n for i in range(0, height_difference):\n empty_lines.append(lineBreak(width, \" \"))\n lines.extend(empty_lines)\n\n output = [\"+\" + lineBreak(width, \"-\") + \"+\"]\n for i in range(0, height):\n output.append(\"|\" + lines[i] + \"|\")\n\n if use_headers and span[0][0] == 0:\n symbol = \"=\"\n else:\n symbol = \"-\"\n output.append(\"+\" + lineBreak(width, symbol) + \"+\")\n\n text = \"\\n\".join(output)\n row_count = getSpanRowCount(span)\n column_count = getSpanColumnCount(span)\n cell = Cell(text, text_row, text_column, row_count, column_count)\n\n return cell", "def _fill_next_rows(self, rows: _Rows, line: int) -> _Rows:\n unmerged_rows = {}\n\n for column, cell in enumerate(rows[line]):\n if isinstance(cell, TableCell) and cell.rowspan > 1:\n nb_lines = cell.rowspan - 1\n lines = [cell]\n if \"\\n\" in cell:\n lines = cell.replace(\"\\n\", \"<fg=default;bg=default>\\n</>\").split(\n \"\\n\"\n )\n if len(lines) > nb_lines:\n nb_lines = cell.count(\"\\n\")\n\n rows[line][column] = TableCell(\n lines[0], colspan=cell.colspan, style=cell.style\n )\n\n # Create a two dimensional dict (rowspan x colspan)\n placeholder = dict(\n [(k, {}) for k in range(line + 1, line + 1 + nb_lines)]\n )\n for k, v in unmerged_rows.items():\n if k in placeholder:\n for l, m in unmerged_rows[k].items():\n if l in placeholder[k]:\n placeholder[k][l].update(m)\n else:\n placeholder[k][l] = m\n else:\n placeholder[k] = v\n\n unmerged_rows = placeholder\n\n for unmerged_row_key, _ in unmerged_rows.items():\n value = \"\"\n if unmerged_row_key - line < len(lines):\n value = lines[unmerged_row_key - line]\n\n unmerged_rows[unmerged_row_key][column] = TableCell(\n value, colspan=cell.colspan, style=cell.style\n )\n if nb_lines == unmerged_row_key - line:\n break\n\n for unmerged_row_key, unmerged_row in unmerged_rows.items():\n # we need to know if unmerged_row will be merged or inserted into rows\n if (\n unmerged_row_key < len(rows)\n and isinstance(rows[unmerged_row_key], list)\n and (\n (\n self._get_number_of_columns(rows[unmerged_row_key])\n + self._get_number_of_columns(\n list(unmerged_rows[unmerged_row_key].values())\n )\n )\n <= self._number_of_columns\n )\n ):\n # insert cell into row at cell_key position\n for cell_key, cell in unmerged_row.items():\n rows[unmerged_row_key].insert(cell_key, cell)\n else:\n row = self._copy_row(rows, unmerged_row_key - 1)\n for column, cell in unmerged_row.items():\n if len(cell):\n row[column] = unmerged_row[column]\n\n rows.insert(unmerged_row_key, row)\n\n return rows", "def __create_lines_table(self):\r\n i = 0\r\n rows = []\r\n cols = []\r\n self.__add_item('number', self.tab1, i, 0, cols)\r\n self.__add_item('last stop', self.tab1, i, 1, cols)\r\n self.__add_item('route', self.tab1, i, 2, cols)\r\n self.__add_item('frequency', self.tab1, i, 3, cols)\r\n self.__add_item('bus capacity', self.tab1, i, 4, cols)\r\n rows.append(cols)\r\n i += 1\r\n for line in self.simulation.lines:\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(0), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[0] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[0], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n cols = []\r\n self.__add_item(line.number, self.tab1, i, 0, cols)\r\n self.__add_item(line.last_stop_name(1), self.tab1, i, 1, cols)\r\n _route = [stop.name.encode(\"utf-8\") for stop in line.routes[1] if stop.name != \"P\"]\r\n self.__add_item(_route, self.tab1, i, 2, cols)\r\n self.__add_item(line.frequencies[1], self.tab1, i, 3, cols)\r\n self.__add_item(line.bus_capacity, self.tab1, i, 4, cols)\r\n i += 1\r\n rows.append(cols)", "def test_no_padding_no_borders(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n table.inner_column_border = False\n table.outer_border = False\n table.padding_left = 0\n table.padding_right = 0\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('Row One ', 'Two', 'Three'),\n ('Column One', ' ', ' '),\n ]\n assert actual == expected", "def _rehighlight_lines(self, lines):\r\n if self.document() is None:\r\n return\r\n for line in lines:\r\n block = self.document().findBlockByNumber(line)\r\n self.rehighlightBlock(block)", "def validate_lines(grid, expected_height, expected_width):\n # String of exceptions that will be built as/if they occur.\n reports = \"\"\n valid_chars = (\"X\", \".\")\n try: \n # List of offenses and specific locations.\n bad_chars = []\n for row in range(len(grid)):\n # Check last character of each line is a \"\\n\"\n if grid[row][-1] != \"\\n\":\n bad_chars.append(\"Line %s does not end with \\n\" % str(row + 1))\n for char in range(len(grid[row]) - 1):\n # Check all other characters are valid.\n if grid[row][char] not in valid_chars:\n bad_chars.append(grid[row][char]) \n # True if bad_chars isn't empty. \n if bad_chars:\n raise BadCharacter(bad_chars)\n except BadCharacter as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n # List of offenses and specific locations.\n bad_lines = []\n for row in range(len(grid)):\n # Ignore last element as should be \"\\n\". Checked previously.\n actual_width = len(grid[row]) - 1 \n if actual_width < expected_width or actual_width > expected_width: \n bad_lines.append((actual_width, expected_width, row + 1))\n # True if bad_lines isn't empty.\n if bad_lines:\n raise BadLineLength(tuple(bad_lines)) \n except BadLineLength as error:\n reports += str(error)\n \n # Store actual height \n actual_height = len(grid)\n \n try:\n if actual_height > expected_height:\n raise TooManyLines(actual_height, expected_height)\n except TooManyLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n try:\n if actual_height < expected_height:\n raise TooFewLines(actual_height, expected_height) \n except TooFewLines as error:\n reports += \"\\t\" + str(error) + \"\\n\"\n \n # True if reports isn't empty. \n if reports:\n print \"File format is invalid. Errors found:\\n\"\n print reports\n else:\n print \"File format okay\\n\"", "def split_rows(sentences, column_names):\r\n new_sentences = []\r\n texts=[]\r\n root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']\r\n start = [dict(zip(column_names, root_values))]\r\n for sentence in sentences:\r\n info=[]\r\n rows = sentence.split('\\n')\r\n sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']\r\n sentence = start + sentence\r\n new_sentences.append(sentence)\r\n if \"newdoc id\" in rows[0]: # beginnings of new docs\r\n info.append(rows[1])\r\n info.append(rows[2])\r\n texts.append(info)\r\n else:\r\n info.append(rows[0])\r\n info.append(rows[1])\r\n texts.append(info)\r\n return new_sentences, texts", "def parse(self, lines):\n self.reset()\n if type(lines) is str:\n lines = lines.split(\"\\n\")\n\n line_no = 0\n for line in lines:\n line_no += 1\n\n # Block begin?\n m, block_class = self.is_block_begin(line)\n if block_class:\n new_block = block_class(line_no, m.group(1))\n self.push_block(switch=self.add_element(new_block))\n continue\n # Block end?\n m = self.is_block_end(line)\n if m:\n self.pop_block(m.group(1))\n continue\n\n m = self.RE_EXEC.search(line)\n if m:\n element = exec_t(line_no, stmt=m.group(2), indent=m.end(1))\n else:\n element = line_t(line_no, line)\n\n # Regular line\n self.add_element(element)", "def test_single_line(style):\n row = ['Row One Column One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [18, 3, 5], 1)]\n expected = [\n ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'),\n ]\n assert actual == expected" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for text in column margins and text overflow in the last column. Raise TableMarkupError if anything but whitespace is in column margins. Adjust the end value for the last column if there is text overflow.
def check_columns(self, lines, first_line, columns): # "Infinite" value for a dummy last column's beginning, used to # check for text overflow: columns.append((sys.maxsize, None)) lastcol = len(columns) - 2 # combining characters do not contribute to the column width lines = [strip_combining_chars(line) for line in lines] for i in range(len(columns) - 1): start, end = columns[i] nextstart = columns[i+1][0] offset = 0 for line in lines: if i == lastcol and line[end:].strip(): text = line[start:].rstrip() new_end = start + len(text) columns[i] = (start, new_end) main_start, main_end = self.columns[-1] if new_end > main_end: self.columns[-1] = (main_start, new_end) elif line[end:nextstart].strip(): raise TableMarkupError('Text in column margin ' 'in table line %s.' % (first_line+offset+1), offset=first_line+offset) offset += 1 columns.pop()
[ "def check_columns(self, lines, first_line, columns):\r\n # \"Infinite\" value for a dummy last column's beginning, used to\r\n # check for text overflow:\r\n columns.append((sys.maxint, None))\r\n lastcol = len(columns) - 2\r\n # combining characters do not contribute to the column width\r\n lines = [strip_combining_chars(line) for line in lines]\r\n\r\n for i in range(len(columns) - 1):\r\n start, end = columns[i]\r\n nextstart = columns[i+1][0]\r\n offset = 0\r\n for line in lines:\r\n if i == lastcol and line[end:].strip():\r\n text = line[start:].rstrip()\r\n new_end = start + len(text)\r\n columns[i] = (start, new_end)\r\n main_start, main_end = self.columns[-1]\r\n if new_end > main_end:\r\n self.columns[-1] = (main_start, new_end)\r\n elif line[end:nextstart].strip():\r\n raise TableMarkupError('Text in column margin '\r\n 'in table line %s.' % (first_line+offset+1),\r\n offset=first_line+offset)\r\n offset += 1\r\n columns.pop()", "def text_error_cols(text): \n po = ParseOptions(min_null_count=0, max_null_count=999)\n en_dir = Dictionary() # open the dictionary only once\n sent = Sentence(text, en_dir, po)\n linkages = sent.parse()\n if sent.null_count() == 0 :\n return []\n else:\n error_cols=[]\n iws=[]\n for lkg in linkages:\n words=[w for w in lkg.words()]\n #desc(lkg)\n for k,w in enumerate(words):\n if is_no_link_ward(w):\n if k in iws:\n break\n else:\n iws.append(k)\n js=text_words2col_begin_end(text,words)\n error_cols.append(js[k-1])\n return error_cols", "def calcColWidth(self):", "def RetainHorizontalSpacing(self, first_column, depth):\n previous = self.previous_token\n if not previous:\n return\n\n if previous.is_pseudo:\n previous = previous.previous_token\n if not previous:\n return\n\n cur_lineno = self.lineno\n prev_lineno = previous.lineno\n if previous.is_multiline_string:\n prev_lineno += previous.value.count('\\n')\n\n if (cur_lineno != prev_lineno or\n (previous.is_pseudo and previous.value != ')' and\n cur_lineno != previous.previous_token.lineno)):\n self.spaces_required_before = (\n self.column - first_column + depth * style.Get('INDENT_WIDTH'))\n return\n\n cur_column = self.column\n prev_column = previous.column\n prev_len = len(previous.value)\n\n if previous.is_pseudo and previous.value == ')':\n prev_column -= 1\n prev_len = 0\n\n if previous.is_multiline_string:\n prev_len = len(previous.value.split('\\n')[-1])\n if '\\n' in previous.value:\n prev_column = 0 # Last line starts in column 0.\n\n self.spaces_required_before = cur_column - (prev_column + prev_len)", "def rewrap(self) -> None:\n self.measured_widths = self.colwidth[:]\n for cell in self.cells:\n cell.wrap(width=self.cell_width(cell, self.colwidth))\n if not cell.wrapped:\n continue\n if cell.row is None or cell.col is None:\n msg = 'Cell co-ordinates have not been set'\n raise ValueError(msg)\n width = math.ceil(max(column_width(x) for x in cell.wrapped) / cell.colspan)\n for col in range(cell.col, cell.col + cell.colspan):\n self.measured_widths[col] = max(self.measured_widths[col], width)", "def test_inside_column(self):\n col1, col2, col3 = st.columns([2.5, 1.5, 0.5])\n\n with col1:\n st.text_input(\"foo\")\n\n all_deltas = self.get_all_deltas_from_queue()\n\n # 5 elements will be created: 1 horizontal block, 3 columns, 1 widget\n self.assertEqual(len(all_deltas), 5)\n text_input_proto = self.get_delta_from_queue().new_element.text_input\n\n self.assertEqual(text_input_proto.label, \"foo\")", "def test_columns_with_large_gap(self):\n\n columns = st.columns(3, gap=\"LARGE\")\n\n all_deltas = self.get_all_deltas_from_queue()\n\n horizontal_block = all_deltas[0]\n columns_blocks = all_deltas[1:4]\n\n # 4 elements will be created: 1 horizontal block, 3 columns, each receives \"large\" gap arg\n self.assertEqual(len(all_deltas), 4)\n self.assertEqual(horizontal_block.add_block.horizontal.gap, \"large\")\n self.assertEqual(columns_blocks[0].add_block.column.gap, \"large\")\n self.assertEqual(columns_blocks[1].add_block.column.gap, \"large\")\n self.assertEqual(columns_blocks[2].add_block.column.gap, \"large\")", "def EllipsisMiddleTruncate(text, available_space, line_length):\n ...", "def _isEndOfRow(self):\r\n\t\tinfo=self.copy()\r\n\t\tinfo.expand(textInfos.UNIT_CHARACTER)\r\n\t\treturn info._rangeObj.getText(-1)==u'\\u0007'", "def test_unsupported_columns(self):\n self.dlg.set_focus()\n table = self.dlg.Table\n self.assertRaises(NotImplementedError, table.column_count)\n self.assertRaises(NotImplementedError, table.get_column, 0)", "def _check_valid_docstring_spacing(self) -> None:\n if self.Modifier.FLOATING in self.type_mod:\n return # floating docstring sections need not be checked for this\n\n end_line = self.extent.end.line + 1\n cursor_start = self.cursor.extent.start\n if end_line != cursor_start.line:\n # there is at least 1 (probably empty) line between the comment end and whatever it\n # is describing\n diag = self.diags.symbol_spacing\n mess = 'Invalid line-spacing between docstring and the symbol it describes. The docstring must appear immediately above its target'\n eloc = self.make_source_range('', '', end_line)\n floc = SourceRange.from_locations(self.make_source_location(end_line, 1), cursor_start)\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, eloc, highlight=False, patch=Patch(floc, '')\n )\n return", "def test_console_width_is_positive():\n assert console.columns() > 0", "def typeset(self, container, text_align, line_spacing, last_descender,\n last_line=False, force=False):\n document = container.document\n\n # drop spaces (and empty spans) at the end of the line\n while len(self) > 0:\n last_span = self[-1]\n if last_span and last_span.ends_with_space:\n self.cursor -= last_span.space.width\n self.pop()\n else:\n break\n else: # abort if the line is empty\n return last_descender\n\n descender = min(glyph_span.span.descender(container)\n for glyph_span in self)\n if last_descender is None:\n advance = max(glyph_span.span.ascender(container)\n for glyph_span in self)\n else:\n advance = line_spacing.advance(self, last_descender, container)\n container.advance(advance)\n self.advance = advance\n\n container.advance(- descender)\n for glyph_span in self:\n glyph_span.span.before_placing(container)\n container.advance(descender)\n\n # horizontal displacement\n left = self.indent\n\n if self._has_tab or text_align == TextAlign.JUSTIFY and last_line:\n text_align = 'left'\n extra_space = self.width - self.cursor\n if text_align == TextAlign.JUSTIFY:\n # TODO: padding added to spaces should be prop. to font size\n nr_spaces = sum(glyph_span.number_of_spaces for glyph_span in self)\n if nr_spaces > 0:\n add_to_spaces = extra_space / nr_spaces\n for glyph_span in self:\n if glyph_span.number_of_spaces > 0:\n glyph_span.space.width += add_to_spaces\n elif text_align == TextAlign.CENTER:\n left += extra_space / 2.0\n elif text_align == TextAlign.RIGHT:\n left += extra_space\n\n canvas = container.canvas\n cursor = container.cursor\n current_annotation = AnnotationState(container)\n for span, glyph_and_widths in group_spans(self):\n try:\n width = canvas.show_glyphs(left, cursor, span, glyph_and_widths,\n container)\n except InlineFlowableException:\n ascender = span.ascender(document)\n if ascender > 0:\n top = cursor - ascender\n else:\n inline_height = span.virtual_container.height\n top = cursor - span.descender(document) - inline_height\n span.virtual_container.place_at(container, left, top)\n width = span.width\n current_annotation.update(span, left, width)\n left += width\n current_annotation.place_if_any()\n container.advance(- descender)\n return descender", "def realign_punctuated_text(df, text, skip_1st=0, margin=2):\n # Built-in str.split doesn't retain starting/trailing spaces correctly.\n # Probably would be fine but just keep this since it took a while to get\n # right and I don't want to break it.\n words = re.split(' ', text)\n rows = []\n start_i = 0\n for i, chunk in df.iterrows():\n chunk_words = re.split(' ', chunk.text)\n length = len(chunk_words)\n punct_words = words[start_i:start_i + length + margin]\n suff = ' '.join(chunk_words[-2:])\n scores = []\n bigrams = zip(punct_words[skip_1st:], punct_words[skip_1st + 1:])\n # Avoid list comp so we can exit early if we find a perfect match.\n for j, gram in enumerate(bigrams):\n score = fuzz.ratio(suff, ' '.join(gram).lower())\n if score == 100:\n argmax = j\n break\n scores.append(score)\n else:\n argmax = np.argmax(scores)\n if max(scores) < 80:\n warnings.warn(\n 'Max score < 80. Your rows may have gotten misaligned '\n f'at row {i}: {chunk.text}'\n )\n punct_len = skip_1st + argmax + 2\n rows.append(' '.join(words[start_i:start_i + punct_len]))\n start_i += punct_len\n\n new_df = pd.DataFrame(rows, columns=['text'])\n return pd.concat((new_df, df.reset_index()[['start', 'duration']].copy()),\n axis=1)", "def column_offset_validation(arguments):\n inputfile = arguments[1]\n header = inputfile.readline()\n splitter = arguments[4]\n attributesCount = len(header.split(splitter))\n operands = arguments[0].split(',')\n hasheader = arguments[3]\n\n if hasheader:\n for operand in operands:\n\n # if you are here the column offset can be a integer or string\n if operand[1:].isdecimal():\n data_error_handler(operand, attributesCount, arguments)\n else:\n # This block of code is executed for float or string\n if operand[1:] not in header:\n print(f'column reference {operand} entered is incorrect')\n free_resources(arguments)\n sys.exit(-1)\n\n else:\n # no header so setting the file pointer back to first line\n # if inputtype != None: (while going back is an option in files not for stdin)\n # inputfile.seek(0)\n for operand in operands:\n if operand[1:].isdecimal():\n data_error_handler(operand, attributesCount, arguments)\n else:\n print(\n f'column reference {operand} cannot be a string, perhaps you forgot to pass \"-h\" arg')\n free_resources(arguments)\n sys.exit(-1)\n return header", "def test_split_columns_invalid_values():\n with pytest.raises(ValueError):\n split_columns(\"example\", -1)\n\n with pytest.raises(ValueError):\n split_columns(\"example\", -200)\n\n with pytest.raises(ValueError):\n split_columns(\"example\", 0)\n\n with pytest.raises(ValueError):\n split_columns(\"example\", 200)", "def test_no_padding_no_borders(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n table.inner_column_border = False\n table.outer_border = False\n table.padding_left = 0\n table.padding_right = 0\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('Row One ', 'Two', 'Three'),\n ('Column One', ' ', ' '),\n ]\n assert actual == expected", "def _borders(self):\r\n nx, ny = self.ncols-1, self.nrows-1\r\n options = self.options\r\n for ix, col in enumerate(self.worktable):\r\n for iy, cell in enumerate(col):\r\n cell.reformat(**self._cellborders(ix,iy,nx,ny,options))", "def _check_valid_indentation(self, lineno: int, line: str, left_stripped: str) -> None:\n if linelen := len(line):\n indent = linelen - len(left_stripped)\n expected_ind = 0 if line.startswith(('.', '+', '-', '$')) else self.indent\n if indent != expected_ind:\n diag = self.diags.indentation\n loc = self.make_source_range(' ' * indent, line, lineno)\n mess = f'Invalid indentation ({indent}), all regular (non-empty, non-parameter, non-seealso) text must be indented to {self.indent} columns'\n self.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, loc, patch=Patch(loc, ' ' * expected_ind)\n )\n return", "def truncate_like_pd_max_colwidth(x: any) -> str:\n max_colwidth = pd.get_option(\"display.max_colwidth\")\n if max_colwidth is None:\n return x\n else:\n s = str(x)\n if len(s) <= max_colwidth:\n return s\n else:\n return s[:max_colwidth - 3] + '...'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extend the list values of `master` with those from `newdata`. Both parameters must be dictionaries containing list values.
def update_dict_of_lists(master, newdata): for key, values in list(newdata.items()): master.setdefault(key, []).extend(values)
[ "def update_dictargs( list_of_dicts, master_dict, issuer = 'alberta_treasury' ):\n key, default_dict = create_default_dictargs( issuer = issuer )\n if master_dict.get( key, None ) is None:\n master_dict[ key ] = list()\n for append_dict in list_of_dicts:\n d = dict( default_dict.items() + append_dict.items() )\n master_dict[ key ].append( d )\n return master_dict", "def update_data(self, new_dict):\n\n self.data.update(new_dict)", "def updateWith(self,new=None):\n assert isinstance(new, Chemplate)\n for id in new.data:\n self.data[id] = new.data[id]", "def extend(self, list):", "def _merge_dict(self,\n base_items,\n new_items,\n list_extend=True,\n yml_multilines=False):\n if isinstance(new_items, dict):\n for key, value in new_items.items():\n if isinstance(value, dict):\n base_items[key] = self._merge_dict(\n base_items=base_items.get(key, {}),\n new_items=value,\n list_extend=list_extend\n )\n elif (not isinstance(value, int) and (\n ',' in value or (\n '\\n' in value and not yml_multilines))):\n base_items[key] = re.split(',|\\n', value)\n base_items[key] = [\n i.strip() for i in base_items[key] if i\n ]\n elif isinstance(value, list):\n if isinstance(base_items.get(key), list) and list_extend:\n base_items[key].extend(value)\n else:\n base_items[key] = value\n elif isinstance(value, (tuple, set)):\n le = list_extend # assigned for pep8\n if isinstance(base_items.get(key), tuple) and le:\n base_items[key] += tuple(value)\n elif isinstance(base_items.get(key), list) and le:\n base_items[key].extend(list(value))\n else:\n base_items[key] = value\n else:\n base_items[key] = new_items[key]\n elif isinstance(new_items, list):\n if list_extend:\n base_items.extend(new_items)\n else:\n base_items = new_items\n return base_items", "def merge(self, another_list: object) -> None:\n # for loop to iterate through data to add/append Dynamic Array\n for i in range(another_list.size):\n self.append(another_list.data[i])\n return", "def extend(self, datasets: Iterable[_TypeMultiBlockLeaf]) -> None:\n # Code based on collections.abc\n if isinstance(datasets, MultiBlock):\n for key, data in zip(datasets.keys(), datasets):\n self.append(data, key)\n else:\n for v in datasets:\n self.append(v)", "def master_info(self, master_info):\n if master_info is None:\n raise ValueError(\"Invalid value for `master_info`, must not be `None`\")\n\n self._master_info = master_info", "def update(self, new_body_values):\n self.data = merge_dicts(self.data, new_body_values)\n return self", "def deep_merge_lists(original, incoming):\n common_length = min(len(original), len(incoming))\n for idx in range(common_length):\n if isinstance(original[idx], dict) and isinstance(incoming[idx], dict):\n deep_merge_dicts(original[idx], incoming[idx])\n\n elif isinstance(original[idx], list) and isinstance(incoming[idx], list):\n deep_merge_lists(original[idx], incoming[idx])\n\n else:\n original[idx] = incoming[idx]\n\n for idx in range(common_length, len(incoming)):\n original.append(incoming[idx])", "def splitMasterData(mdata):\n # Create empty dictionaries with keys\n print(\"Splitting master data into subsets...\", end = ' ')\n learndata = {}\n testdata = {}\n for i in mdata.keys():\n learndata[i] = []\n testdata[i] = []\n\n for i in range(len(mdata['subset'])):\n if mdata['subset'][i] == 'learn':\n for x in mdata:\n learndata[x].append(mdata[x][i])\n elif mdata['subset'][i] == 'test':\n for x in mdata:\n testdata[x].append(mdata[x][i])\n elif mdata['subset'][i] == 'exclude':\n pass\n else:\n print(\"Invalid subset read(valid= 'learn', 'test', or 'exclude')\")\n print(mdata['subset'][i])\n sys.exit()\n print(\"Done!\")\n return testdata, learndata", "def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict:\n if isinstance(config, str):\n import json\n config = json.loads(config)\n properties = self.all_properties()\n config['fields'] = config.get('fields', dict())\n fields = config['fields']\n\n d_color = defaults.get('color', 'white')\n d_icon = defaults.get('icon', 'icons:default')\n\n if delete_orphan_fields:\n exist = {p.name() for p in properties}\n unexist = set(fields.keys()) - exist\n for name in unexist:\n del fields[name]\n\n for p in properties:\n field = fields.get(p.name(), {'show_in_search': False,\n 'combine_fields': False,\n 'number_of_rules': 0,\n 'glossaries': [],\n 'use_in_network_search': False,\n 'case_sensitive': False,\n 'show_as_link': 'text',\n 'blacklists': [],\n 'show_in_result': 'no',\n 'rule_extractor_enabled': False,\n 'search_importance': 1,\n 'group_name': '',\n 'show_in_facets': False,\n 'predefined_extractor': 'none',\n 'rule_extraction_target': ''})\n config['fields'][p.name()] = field\n field['screen_label'] = ' '.join(p.label())\n field['description'] = '\\n'.join(p.definition())\n field['name'] = p.name()\n\n # color\n if 'color' not in field:\n color = self.__merge_close_ancestor_color(p, fields, attr='color')\n field['color'] = color if color else d_color\n # icon\n if 'icon' not in field:\n icon = self.__merge_close_ancestor_color(p, fields, attr='icon')\n field['icon'] = icon if icon else d_icon\n # type\n if isinstance(p, OntologyObjectProperty):\n field['type'] = 'kg_id'\n else:\n try:\n field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges())))\n except StopIteration:\n field['type'] = None\n return config", "def mastered_instruments(self, mastered_instruments):\n\n self._mastered_instruments = mastered_instruments", "def test_override_custom(self):\n\n default = {\n 'list': ['two']\n }\n\n override1 = {\n 'list': append('three', 'four')\n }\n\n override2 = {\n 'list': prepend('one')\n }\n\n merged = merge(default, override1, override2)\n\n self.assertEquals(merged['list'], ['one', 'two', 'three', 'four'])", "def new_data(self):\n\t\tif len(self.data) == 10:\n\t\t\tfor i in range(5):\n\t\t\t\ttt = self.data[5+i]\n\t\t\t\tself.data_new.append(tt)", "def override_master_items(master_name, spread=False):\n script = 'try\\n'\n page_nums = [2, 3] if spread else [1]\n for num in page_nums:\n script += (\n f'override (every item of master page items of page {num}'\n 'whose item layer\\'s name is \"Work\") destination page '\n f'page {num}\\n')\n script += 'end try'\n return wrap_and_run(script)", "def merge(self, other) -> None:\n if other.new:\n raise ValueError(\"This patch should not have a .new set.\")\n if not other.old:\n raise ValueError(\"No data in .old\")\n self.old = other.old + self.old\n self.old_hash = get_sha256(self.old)", "def update_symbol_master(self):\n new_symbol_master = self.pull_symbol_master()\n ts = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n \n # first check if symbolmaster exists\n if not os.path.exists(self.symbol_master_filepath):\n \n # if it currently does not exist, create one and update\n new_symbol_master['updateTimeLocal'] = ts\n self.final_symbol_master = new_symbol_master\n self.final_symbol_master.to_feather(self.symbol_master_filepath)\n \n else: \n # pull existing symbol master\n current_symbol_master = pd.read_feather(self.symbol_master_filepath)\n\n # find difference between old and new\n current_body = current_symbol_master[self.symbol_master_cols]\n new_body = new_symbol_master[self.symbol_master_cols]\n check_rows = new_body.merge(current_body, how='outer', indicator=True)\n new_rows = check_rows[check_rows['_merge'] == 'left_only'].copy()\n new_rows.drop('_merge', axis=1, inplace=True)\n\n # update new rows\n if len(new_rows) > 0:\n new_rows['entryTimeLocal'] = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n existing_symbol_master = current_symbol_master[self.symbol_master_cols + ['entryTimeLocal']]\n final_symbol_master = pd.concat([existing_symbol_master, new_rows], axis=0)\n final_symbol_master['updateTimeLocal'] = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('Number of new symbols appended: {}'.format(len(new_rows)))\n else:\n final_symbol_master = current_symbol_master[self.symbol_master_cols + ['entryTimeLocal']].copy()\n final_symbol_master['updateTimeLocal'] = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print('No new symbols appended')\n\n # save final symbol master as feather file\n self.final_symbol_master = final_symbol_master\n self.final_symbol_master.reset_index().to_feather(self.symbol_master_filepath)\n \n # reset index\n self.final_symbol_master.reset_index(drop=True, inplace=True)\n return self.final_symbol_master", "def updateLists(self):\r\n\r\n self.parentOf = self.relationType.sourceDataTypeNames\r\n self.childOf = self.relationType.targetDataTypeNames", "def nfvi_kube_rootca_host_update_list(self, new_list):\n self._nfvi_kube_rootca_host_update_list = new_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the state machine on `input_lines`. Return results (a list). Reset `self.line_offset` and `self.current_state`. Run the beginningoffile transition. Input one line at a time and check for a matching transition. If a match is found, call the transition method and possibly change the state. Store the context returned by the transition method to be passed on to the next transition matched. Accumulate the results returned by the transition methods in a list. Run the endoffile transition. Finally, return the accumulated results.
def run(self, input_lines, input_offset=0, context=None, input_source=None, initial_state=None): self.runtime_init() if isinstance(input_lines, StringList): self.input_lines = input_lines else: self.input_lines = StringList(input_lines, source=input_source) self.input_offset = input_offset self.line_offset = -1 self.current_state = initial_state or self.initial_state if self.debug: print(( '\nStateMachine.run: input_lines (line_offset=%s):\n| %s' % (self.line_offset, '\n| '.join(self.input_lines))), file=self._stderr) transitions = None results = [] state = self.get_state() try: if self.debug: print('\nStateMachine.run: bof transition', file=self._stderr) context, result = state.bof(context) results.extend(result) while True: try: try: self.next_line() if self.debug: source, offset = self.input_lines.info( self.line_offset) print(( '\nStateMachine.run: line (source=%r, ' 'offset=%r):\n| %s' % (source, offset, self.line)), file=self._stderr) context, next_state, result = self.check_line( context, state, transitions) except EOFError: if self.debug: print(( '\nStateMachine.run: %s.eof transition' % state.__class__.__name__), file=self._stderr) result = state.eof(context) results.extend(result) break else: results.extend(result) except TransitionCorrection as exception: self.previous_line() # back up for another try transitions = (exception.args[0],) if self.debug: print(( '\nStateMachine.run: TransitionCorrection to ' 'state "%s", transition %s.' % (state.__class__.__name__, transitions[0])), file=self._stderr) continue except StateCorrection as exception: self.previous_line() # back up for another try next_state = exception.args[0] if len(exception.args) == 1: transitions = None else: transitions = (exception.args[1],) if self.debug: print(( '\nStateMachine.run: StateCorrection to state ' '"%s", transition %s.' % (next_state, transitions[0])), file=self._stderr) else: transitions = None state = self.get_state(next_state) except: if self.debug: self.error() raise self.observers = [] return results
[ "def run(self, input_lines, input_offset=0, context=None,\r\n input_source=None, initial_state=None):\r\n self.runtime_init()\r\n if isinstance(input_lines, StringList):\r\n self.input_lines = input_lines\r\n else:\r\n self.input_lines = StringList(input_lines, source=input_source)\r\n self.input_offset = input_offset\r\n self.line_offset = -1\r\n self.current_state = initial_state or self.initial_state\r\n if self.debug:\r\n print >>self._stderr, (\r\n u'\\nStateMachine.run: input_lines (line_offset=%s):\\n| %s'\r\n % (self.line_offset, u'\\n| '.join(self.input_lines)))\r\n transitions = None\r\n results = []\r\n state = self.get_state()\r\n try:\r\n if self.debug:\r\n print >>self._stderr, '\\nStateMachine.run: bof transition'\r\n context, result = state.bof(context)\r\n results.extend(result)\r\n while True:\r\n try:\r\n try:\r\n self.next_line()\r\n if self.debug:\r\n source, offset = self.input_lines.info(\r\n self.line_offset)\r\n print >>self._stderr, (\r\n u'\\nStateMachine.run: line (source=%r, '\r\n u'offset=%r):\\n| %s'\r\n % (source, offset, self.line))\r\n context, next_state, result = self.check_line(\r\n context, state, transitions)\r\n except EOFError:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: %s.eof transition'\r\n % state.__class__.__name__)\r\n result = state.eof(context)\r\n results.extend(result)\r\n break\r\n else:\r\n results.extend(result)\r\n except TransitionCorrection, exception:\r\n self.previous_line() # back up for another try\r\n transitions = (exception.args[0],)\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: TransitionCorrection to '\r\n 'state \"%s\", transition %s.'\r\n % (state.__class__.__name__, transitions[0]))\r\n continue\r\n except StateCorrection, exception:\r\n self.previous_line() # back up for another try\r\n next_state = exception.args[0]\r\n if len(exception.args) == 1:\r\n transitions = None\r\n else:\r\n transitions = (exception.args[1],)\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: StateCorrection to state '\r\n '\"%s\", transition %s.'\r\n % (next_state, transitions[0]))\r\n else:\r\n transitions = None\r\n state = self.get_state(next_state)\r\n except:\r\n if self.debug:\r\n self.error()\r\n raise\r\n self.observers = []\r\n return results", "def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n % (state.__class__.__name__, transitions)), file=self._stderr)\r\n for name in transitions:\r\n pattern, method, next_state = state.transitions[name]\r\n match = pattern.match(self.line)\r\n if match:\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: Matched transition '\r\n '\"%s\" in state \"%s\".'\r\n % (name, state.__class__.__name__)), file=self._stderr)\r\n return method(match, context, next_state)\r\n else:\r\n if self.debug:\r\n print((\r\n '\\nStateMachine.check_line: No match in state \"%s\".'\r\n % state.__class__.__name__), file=self._stderr)\r\n return state.no_match(context, transitions)", "def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n % (state.__class__.__name__, transitions))\r\n for name in transitions:\r\n pattern, method, next_state = state.transitions[name]\r\n match = pattern.match(self.line)\r\n if match:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: Matched transition '\r\n '\"%s\" in state \"%s\".'\r\n % (name, state.__class__.__name__))\r\n return method(match, context, next_state)\r\n else:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: No match in state \"%s\".'\r\n % state.__class__.__name__)\r\n return state.no_match(context, transitions)", "def process(self, lines):\n for line in lines:\n self._process_line(line)", "def build_from(lines:[str]) -> [object]:\n lines = iter(lines)\n current_line = None\n while True:\n try:\n line = next(lines).strip()\n except StopIteration:\n break\n if not line: break\n if REG_CHARACTER.match(line): # new line\n if current_line:\n yield current_line\n try:\n character, content, refs = parse_line(line)\n except TypeError: # parse_line returned None ?!\n print(f\"ERROR: parse_line didn't parse '{line}'\")\n current_line = Line(character.strip(), content.strip(), refs)\n else: # continuation of previous line\n # print('CURRENT LINE:', current_line)\n # print(' :', line)\n current_line.content += '\\n' + line\n if current_line:\n yield current_line", "def lex(self, line):\n\n # only add line if we are in a continuation or line is not empty\n if self.continuation is True or line.strip() != '':\n self.line += line\n\n self.continuation = False\n # keep running states until out of data or we need a continuation\n while self.continuation is False and len(self.line) > 0:\n for token in self.state():\n if token.ident == Lexer.error.ident:\n yield token\n # reset state on error\n self._reset()\n return\n yield token", "def parse_initial_state_transitions(lines: List[Line]) -> Tuple[Dict[str, Line], List[Line]]:\n remaining_lines = []\n initial_state_names = {}\n\n for line in lines:\n m = re.fullmatch(r'^\\[\\*\\]\\s+-{1,2}>\\s+(\\w+)\\s*(.*)', line.text)\n if not m:\n remaining_lines.append(line)\n continue\n\n name, trailing_text = m.groups()\n assert name not in initial_state_names, f'Duplicate initial transition for state {name} in {line}'\n assert not trailing_text, f'Additional text after initial transition in {line}: {line.orig_text}'\n initial_state_names[name] = line\n\n return initial_state_names, remaining_lines", "def parse_lines(self, lines):\n raise NotImplementedError(self.__class__)", "def parse(self, lines):\n self.reset()\n if type(lines) is str:\n lines = lines.split(\"\\n\")\n\n line_no = 0\n for line in lines:\n line_no += 1\n\n # Block begin?\n m, block_class = self.is_block_begin(line)\n if block_class:\n new_block = block_class(line_no, m.group(1))\n self.push_block(switch=self.add_element(new_block))\n continue\n # Block end?\n m = self.is_block_end(line)\n if m:\n self.pop_block(m.group(1))\n continue\n\n m = self.RE_EXEC.search(line)\n if m:\n element = exec_t(line_no, stmt=m.group(2), indent=m.end(1))\n else:\n element = line_t(line_no, line)\n\n # Regular line\n self.add_element(element)", "def _eagerly_parse_lines(self, lines, skeleton_regex, event_parsers, events, time=None):\n\n # Recompile all regex so that they work on bytes rather than strings.\n # This simplifies the rest of the code while allowing the raw output\n # from a process to be fed\n def encode(string):\n return string.encode('ascii')\n\n events = list(map(encode, events))\n event_parsers = {\n encode(event): parser\n for event, parser in event_parsers.items()\n }\n\n # Only add an extra iterator and tuple unpacking if that is strictly\n # necessary, as it comes with a performance cost\n time_is_provided = time is not None\n skel_search = skeleton_regex.search\n if time_is_provided:\n lines = zip(time, lines)\n drop_filter = lambda line: not skel_search(line[1])\n else:\n drop_filter = lambda line: not skel_search(line)\n\n # First, get rid of all the lines coming before the trace\n lines = itertools.dropwhile(drop_filter, lines)\n\n # Appending to lists is amortized O(1). Inside the list, we store\n # tuples since they are:\n # 1) the most compact Python representation of a product type\n # 2) output directly by regex.search()\n skeleton_data = []\n events_data = {\n **{event: (None, None) for event in events},\n **{\n event: (parser.bytes_regex.search, [])\n for event, parser in event_parsers.items()\n },\n }\n available_events = set()\n\n begin_time = None\n end_time = None\n time_type = getattr(np, self.HEADER_FIELDS['__timestamp'])\n\n # THE FOLLOWING LOOP IS A THE MOST PERFORMANCE-SENSITIVE PART OF THAT\n # CLASS, APPLY EXTREME CARE AND BENCHMARK WHEN MODIFYING\n # Best practices:\n # - resolve all dotted names ahead of time\n # - minimize the amount of local variables. Prefer anonymous\n # expressions\n # - Catch exceptions for exceptional cases rather than explicit check\n\n # Pre-lookup methods out of the loop to speed it up\n append = list.append\n group = self._RE_MATCH_CLS.group\n groups = self._RE_MATCH_CLS.groups\n nextafter = np.nextafter\n inf = math.inf\n line_time = 0\n parse_time = '__timestamp' in skeleton_regex.groupindex.keys()\n\n for line in lines:\n prev_time = line_time\n if time_is_provided:\n line_time, line = line\n\n match = skel_search(line)\n # Stop at the first non-matching line\n try:\n event = group(match, '__event')\n line_time = time_type(group(match, '__timestamp'))\n # Assume only \"time\" is not in the regex. Keep that out of the hot\n # path since it's only needed in rare cases (like nesting parsers)\n except IndexError:\n # If we are supposed to parse time, let's re-raise the\n # exception\n if parse_time:\n raise\n else:\n # Otherwise, make sure \"event\" is defined so that we only\n # go a match failure on \"time\"\n event # pylint: disable=pointless-statement\n # The line did not match the skeleton regex, so skip it\n except TypeError:\n if b'EVENTS DROPPED' in line:\n raise DroppedTraceEventError('The trace buffer got overridden by new data, increase the buffer size to ensure all events are recorded')\n # Unknown line, could be coming e.g. from stderr\n else:\n continue\n\n # Do a global deduplication of timestamps, across all\n # events regardless of the one we will parse. This ensures\n # stable results and joinable dataframes from multiple\n # parser instance.\n if line_time <= prev_time:\n line_time = nextafter(prev_time, inf)\n\n if begin_time is None:\n begin_time = line_time\n\n # If we can parse it right away, let's do it now\n try:\n search, data = events_data[event]\n append(\n data,\n # Add the fixedup time\n groups(search(line)) + (line_time,)\n )\n # If we don't have a parser for it yet (search == None),\n # just store the line so we can infer its parser later\n except TypeError:\n # Add the fixedup time and the full line for later\n # parsing as well\n append(\n skeleton_data,\n groups(match) + (line_time, line)\n )\n # We are not interested in that event, but we still remember the\n # pareseable events\n except KeyError:\n available_events.add(event)\n\n # This should have been set on the first line.\n # Note: we don't raise the exception if no events were asked for, to\n # allow creating dummy parsers without any line\n if begin_time is None and events:\n raise ValueError('No lines containing events have been found')\n\n end_time = line_time\n available_events.update(\n event\n for event, (search, data) in events_data.items()\n if data\n )\n\n events_df = {}\n for event, parser in event_parsers.items():\n try:\n # Remove the tuple data from the dict as we go, to free memory\n # before proceeding to the next event to smooth the peak memory\n # consumption\n _, data = events_data.pop(event)\n except KeyError:\n pass\n else:\n decoded_event = event.decode('ascii')\n df = self._make_df_from_data(parser.regex, data, ['__timestamp'])\n # Post-process immediately to shorten the memory consumption\n # peak\n df = self._postprocess_df(decoded_event, parser, df)\n events_df[decoded_event] = df\n\n # Compute the skeleton dataframe for the events that have not been\n # parsed already. It contains the event name, the time, and potentially\n # the fields if they are needed\n skeleton_df = self._make_df_from_data(skeleton_regex, skeleton_data, ['__timestamp', 'line'])\n # Drop unnecessary columns that might have been parsed by the regex\n to_keep = {'__event', '__fields', 'line'}\n skeleton_df = skeleton_df[sorted(to_keep & set(skeleton_df.columns))]\n # Make the event column more compact\n skeleton_df['__event'] = skeleton_df['__event'].astype('category', copy=False)\n # This is very fast on a category dtype\n available_events.update(skeleton_df['__event'].unique())\n\n available_events = {event.decode('ascii') for event in available_events}\n return (events_df, skeleton_df, (begin_time, end_time), available_events)", "def run(self, s):\n state = self.init_state\n for c in s:\n state = self.transition(state, c)\n return state", "def process_lines(self, lines, file):\n return lines", "def process_ops_input(self):\n input_data = self.text()\n if (self.local_state == State.GOTO_LINE):\n self.goto_line.emit(int(input_data))\n else:\n self.search.emit(input_data)", "def input(self, i):\n self.i_count += 1\n rlist = self.rules.get(self.state, [])\n for (test, dst, action, tag) in rlist + self.rules.get(None, []): # Rules starting from None are added to all states\n t_info = TransitionInfo(self.state, dst, self.i_count, None)\n result = test(i, t_info) if callable(test) else test == i\n t_info = t_info._replace(result=result)\n if result:\n if dst is not None: # Transitions ending in None stay in the same state\n self.state = dst\n # Run the action after the state change so it could override the end state (e.g. pop state from a stack)\n out = action(i, t_info) if callable(action) else action\n # Be sure to trace the actual end state after `action` is done\n self.tracer(i, TraceInfo(t_info, test, action, tag, out, self.state))\n return out\n self.tracer(i, TraceInfo(t_info, test, action, tag, None, self.state))\n\n return self.unrecognized(i, self.state, self.i_count)", "def update_line_search(self):\n # Collect information on a forward evaluation that just took place\n alpha_try = self.load_vector(\"alpha\") # step length\n f_try = self.load_vector(\"f_try\") # misfit for the trial model\n\n # Update the line search with a new step length and misfit value\n self._line_search.step_count += 1\n self._line_search.update_search_history(step_len=alpha_try,\n func_val=f_try)\n\n # Calculate a new step length based on the current step length and its\n # corresponding misfit.\n alpha, status = self._line_search.calculate_step_length()\n\n # Note: if status is 'PASS' then `alpha` represents the step length of\n # the lowest misfit in the line search and we reconstruct `m_try` w/ it\n if status.upper() in [\"PASS\", \"TRY\"]:\n # Create a new trial model based on search direction, step length\n # and the initial model vector\n _m = self.load_vector(\"m_new\")\n _p = self.load_vector(\"p_new\")\n\n # Sets the latest trial model using the current `alpha` value\n m_try = _m.copy()\n m_try.update(vector=_m.vector + alpha * _p.vector)\n logger.info(\"line search model 'm_try' parameters: \")\n m_try.check()\n elif status.upper() == \"FAIL\":\n # Failed line search skips over costly vector manipulations\n m_try = None\n\n return m_try, alpha, status", "def step(self):\n if self.__global_state != DFAGlobalState.START:\n raise RuntimeError('DFA is not started!')\n\n if len(self.__input_list) > 0:\n ch = self.__input_list[0]\n transit_to = self.__find_current_state_transition(ch)\n if transit_to:\n self.__logging_list.add_event(DfaLoggingEvent(self.__current_state, ch, transit_to))\n self.__current_state = transit_to\n self.__input_list = self.__input_list[1:]\n else:\n self.__logging_list.set_error(f'no transition for symbol \"{ch}\" in state \"{self.__current_state}\"')\n self.halt()\n return\n else:\n if self.__current_state not in self.__dfa_dict['end_states']:\n self.__logging_list.set_error(f'input string ended at non end state \"{self.__current_state}\"')\n self.halt()", "def run(self):\n print (\"Worker is now running at step {} with step_size {} starting \"\n \"at time {}\".format(self.step, self.step_size, self.start_time))\n # read in the entries for this step\n processed = 0\n for line in self.inputf.xreadlines():\n entry = self.process_line(line)\n\n processed += 1\n if (processed % 1) == 0:\n print \"Processed {} entries\".format(processed)\n\n # if we are moving beyond this timestep, then wait for\n # more data from the master\n if entry['step'] > self.step:\n self.upload_data()\n time.sleep(UPLOAD_WAIT)\n self.get_master_updates()\n\n # now update the skyline using this point\n self.update_skyline(entry)\n self.inputf.close()\n self.upload_data()\n req = requests.get(self.master_url + \"/worker_done\")\n req.raise_for_status()", "def expectedRuns(lineup):\n transitionsMatrices = list(map(lambda Batter: Batter.transitionMatrixSimple(), lineup))\n return simulateMarkovChain(transitionsMatrices)[:, 216]", "def processLines(self, lines):\n\n for line in lines:\n if len(line) == 0:\n continue\n\n if line[-1] == \"\\r\":\n line = line[:-1]\n\n # Automatically make P10 protocols have their lines parsed\n # differently\n lineobj = IRCLine(line, self.protocol.p10)\n\n #debug output\n if self.config[\"etc\"][\"debug\"]:\n self.log(line, \"<<<\")\n\n if lineobj.verb == \"ERROR\":\n #If ERROR is sent, it's already fatal.\n raise IOError\n\n #Handle server commands\n try:\n for impl in self.s2scommands[lineobj.verb]:\n try:\n impl(cod, lineobj)\n except KeyError as e:\n continue\n except Exception as e:\n if not self.config[\"etc\"][\"production\"]:\n self.servicesLog(\"%s %s %s\" %(type(e), e.message, lineobj))\n traceback.print_exc(file=sys.stdout)\n continue\n except KeyError:\n pass", "def _parse_line(\n self, line: str, handler_lookup: Dict[str, Callable[[str, Path], str]],\n path_file: Optional[Path] = None,\n ) -> List[str]:\n lines: List[str] = []\n if '{cte}' in line and self.state == self.state_auto: # end\n self.end()\n elif '{cts}' in line: # start\n self.start_auto()\n matches = [text_match for text_match in handler_lookup if text_match in line]\n if len(matches) == 1:\n lines.extend(handler_lookup[matches[0]](line, path_file))\n else:\n logger.error('Could not parse: {line}', line=line)\n lines.append(line)\n self.end()\n elif self.state == self.state_user:\n lines.append(line)\n # else: discard the lines in the auto-section\n return lines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return current state object; set it first if `next_state` given.
def get_state(self, next_state=None): if next_state: if self.debug and next_state != self.current_state: print(( '\nStateMachine.get_state: Changing state from ' '"%s" to "%s" (input line %s).' % (self.current_state, next_state, self.abs_line_number())), file=self._stderr) self.current_state = next_state try: return self.states[self.current_state] except KeyError: raise UnknownStateError(self.current_state)
[ "def get_state(self, next_state=None):\r\n if next_state:\r\n if self.debug and next_state != self.current_state:\r\n print >>self._stderr, (\r\n '\\nStateMachine.get_state: Changing state from '\r\n '\"%s\" to \"%s\" (input line %s).'\r\n % (self.current_state, next_state,\r\n self.abs_line_number()))\r\n self.current_state = next_state\r\n try:\r\n return self.states[self.current_state]\r\n except KeyError:\r\n raise UnknownStateError(self.current_state)", "def go_to_state(self, next_state):\n for t in self.transitions:\n if t.next_state == None:\n t.next_state = next_state\n return self.root", "def next_state(self):\r\n s = max(self.states)\r\n self.states.remove(s)\r\n return s[1]", "def get_next_state(self, index_next_state):\n raise NotImplementedError()", "def next_state(self, state, move):\n\n pass", "def estimate_next_state(self):\n return self.__transition_function(self.__state)", "def next_available_state(self) -> 'State':\n i = len(self.states) - 1\n while i >= 0:\n if self.states[i].is_ready():\n num_incomplete = self.states[i].num_incomplete_deps()\n if num_incomplete == 0:\n # This is perfect; no need to look for the best match.\n return self.states[i]\n i -= 1\n return None", "def get_state(self, state_name: str):\n if state_name == self.start.name:\n return self.start\n for state in self.state_list:\n if state.name == state_name:\n return state\n return None", "def get_starting_state(self):\n\t\treturn self._current_state # state 0", "def calculate_next_state(self):\n self.current_step = self.current_step + 1\n self.current_state = self.game.next_state(current_state=self.current_state, actions=self.next_action)", "def get_next(current):\n for index,value in enumerate(STATES):\n if value == current:\n if index == len(STATES)-1:\n return STATES[0]\n else:\n return STATES[index+1]", "def gen_next_state(self, direction):\r\n # Find the current zero-location (blank space).\r\n zero_row = self.zero_location[0]\r\n zero_col = self.zero_location[1]\r\n\r\n # Store the zero location values for our swap tile calculations.\r\n swap_row = zero_row\r\n swap_col = zero_col\r\n\r\n # Find the value in the appropriate direction.\r\n if direction == 'up':\r\n swap_row -= 1\r\n if direction == 'down':\r\n swap_row += 1\r\n if direction == 'left':\r\n swap_col -= 1\r\n if direction == 'right':\r\n swap_col += 1\r\n\r\n # Move the zero-location in the direction specified,\r\n # swapping with the number in the location it moves to.\r\n new_puzzle = np.copy(self.puzzle)\r\n new_puzzle[zero_row, zero_col], new_puzzle[swap_row, swap_col] = (\r\n new_puzzle[swap_row, swap_col], new_puzzle[zero_row, zero_col]\r\n )\r\n\r\n # Create the new state.\r\n path_cost = self.g_cost + 1\r\n predecessor_state = self\r\n next_state = PuzzleState(new_puzzle, path_cost, predecessor_state)\r\n\r\n # Set the predecessor's direction being moved.\r\n next_state.action_from_predecessor = direction\r\n\r\n return next_state", "def move_next(self, prev_state):\n new_state = SceneState.objects.create(scene=prev_state.scene,\n previous_state=prev_state, target_singular=prev_state.target_singular)\n new_state.rectangles = prev_state.rectangles.all()\n new_state.selected_rectangles = prev_state.selected_rectangles.all()\n return new_state", "def state(self, as_tuple = False):\n if as_tuple:\n return self.current_state\n else:\n return self.legal_states.index(self.current_state)", "def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state", "def find_next_state(self, state, action):\n reward = -1 # set all rewards default is -1\n if state == self.states[0] or state == self.states[2]:\n if action == 1: # take right action\n next_state = state + 1\n else: # take left action\n next_state = max(0, state - 1)\n else:\n if action == 1:\n next_state = state - 1\n else:\n next_state = state + 1\n\n if next_state == self.end_state: # reach the terminal state\n reward = 0\n\n return next_state, reward", "def get_state(self):\n if self.state:\n return self.state\n\n from timon.state import TMonState\n self.state = state = TMonState(self.cfg['statefile'], config=self)\n return state", "def from_state(self):\n return self._from_state", "def _state(self, thread_id):\n while len(self._states) <= thread_id:\n self._states.append(State(self._l2c, self.config.state_config(thread_id)))\n return self._states[thread_id]", "def next_state(self, state: State, jointaction: JointAction) -> State:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load `self.line` with the `n`'th next line and return it.
def next_line(self, n=1): try: try: self.line_offset += n self.line = self.input_lines[self.line_offset] except IndexError: self.line = None raise EOFError return self.line finally: self.notify_observers()
[ "def read_line(file_path, n):\n return linecache.getline(file_path, n)", "def NthLineOfFile( fname, n = 0 ):\n with open( fname ) as f:\n while n > 0:\n f.readline()\n n -= 1\n return f.readline().strip()", "def _next_line(self):\n self.current_line += 1\n return next(self.fh).rstrip(\"\\n\")", "def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line", "def nth_item(line, n: int = 0):\n return line.split()[n]", "def next_line(self, oldLine):\n nextLine = ''\n #nextLine += self.rule[neighbor.index('0' + oldLine[:2])]\n for i in range(len(oldLine) - 2):\n nextLine += self.rule[neighbor.index(oldLine[i:i+3])]\n if len(self.lines) == 1: # if we work on the second line\n nextLine = self.rule[neighbor.index(oldLine[-2:] + '0')] +\\\n nextLine +\\\n self.rule[neighbor.index(oldLine[-2:] + '0')]\n else:\n nextLine = oldLine[0] + nextLine + oldLine[-1]\n #nextLine += self.rule[neighbor.index(oldLine[-2:] + '0')]\n return nextLine", "def gotoLine(self, n):\n self.fileIndex = n", "def NextLine(self):\n self.lineNum = self.lineNum + 1\n self.linePos = 0", "def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line", "def next(self, n = 1):\n return NonStandardInteger(self.non_st_part, self.st_part + n, self.non_st_ring)", "def readline(self) -> Optional[str]:\n # N-Triples lines end in either CRLF, CR, or LF\n # Therefore, we can't just use f.readline()\n if not self.buffer:\n # type error: Item \"None\" of \"Union[TextIO, StreamReader, None]\" has no attribute \"read\"\n buffer = self.file.read(bufsiz) # type: ignore[union-attr]\n if not buffer:\n return None\n self.buffer = buffer\n\n while True:\n m = r_line.match(self.buffer)\n if m: # the more likely prospect\n self.buffer = self.buffer[m.end() :]\n return m.group(1)\n else:\n # type error: Item \"None\" of \"Union[TextIO, StreamReader, None]\" has no attribute \"read\"\n buffer = self.file.read(bufsiz) # type: ignore[union-attr]\n if not buffer and not self.buffer.isspace():\n # Last line does not need to be terminated with a newline\n buffer += \"\\n\"\n elif not buffer:\n return None\n self.buffer += buffer", "def getNext(self):\n line = self._file.readline()\n if line:\n return tuple(line.strip('\\n').split('\\t'))\n else: \n return None", "def next_line(self):\n self.line = next(self.lines) # Will raise StopIteration when there are no more lines\n self.values = self.line.split()", "def line(n, rule):\n return lambda l, i: (\n i['lineno'] == (n if n >= 0 else i['nlines'] + n) and rule(l, i)\n )", "def next_line(rule):\n return shift_line(1, rule)", "def next(self):\n # apply implicit line ending conversion\n line = self.readline()\n if line:\n return line\n else:\n raise StopIteration", "def getline(self, bno):\r\n return self.breakpt[bno]['line']", "def nth(n, generator):\n return next(itertools.islice(generator, n-1, n))", "def __getitem__(self, n):\n if not (0 <= n < self.nrows):\n raise ValueError('0 >= row > %d, but %d given.'\n % (self.nrows, n))\n return self._sheet.row(n)", "def goto_recnum(self, n):\n if n == -1:\n self.fp.seek(0, 2)\n else:\n self.fp.seek(n * self.blocksize, 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return 1 if the next line is blank or nonexistant.
def is_next_line_blank(self): try: return not self.input_lines[self.line_offset + 1].strip() except IndexError: return 1
[ "def non_blank_lines(thing):\n \n count = 0\n for line in thing:\n if line.strip():\n count += 1\n return count", "def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line", "def _not_empty_line(line):\n return len(line) > 0", "def first_line_number(self):\n self._assert_buffer_not_empty()\n return 1", "def _is_empty_line(self, line):\r\n return re.match('\\s*$', line) is not None", "def count_last_empty_lines(s):\n cnt = 0\n lines = s.splitlines()\n lines.reverse()\n for l in lines:\n if re.match(\"^\\s*$\", l):\n cnt += 1\n else:\n return cnt\n return cnt", "def equal_num(line):\n\treturn not (line.count('1') > len(line) // 2 or line.count('0') > len(line) // 2)", "def have_trailing_newline(line):\n\treturn line[-1] == '\\n' or line[-1] == '\\r' or line[-2:] == '\\r\\n'", "def _next_line(self):\n self.current_line += 1\n return next(self.fh).rstrip(\"\\n\")", "def count_never_executed(self):\n lineno = self.firstlineno\n counter = 0\n for line in self.source:\n if self.sourcelines.get(lineno) == 0:\n if not self.blank_rx.match(line):\n counter += 1\n lineno += 1\n return counter", "def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line", "def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)", "def _should_skip(self, line):\r\n return self._is_empty_line(line) or\\\r\n self._is_comment_line(line) or\\\r\n self._is_group_header_line(line) or\\\r\n self.delimiter not in line", "def first_non_whitespace_index (line): \n return len (line) - len (line.lstrip ())", "def has_next(self):\n\n return self.index < len(self.string)", "def get_n_lines(input_path):\r\n count = 0\r\n with open(input_path, \"r\") as input_file:\r\n for line in input_file:\r\n if line.strip() == \"\":\r\n print(\"WARN: Found empty line while counting lines, will not count.\")\r\n continue\r\n count += 1\r\n return count", "def _beginningOfContent(line: str) -> int:\n m = _INDENT_RE.match(line)\n if m and m.group(1) is not None:\n return m.start(1)\n else:\n return 0", "def should_count_spines(line):\n return line != \"\" and line != config.MEASURE_SYMBOL", "def mylen(self):\n self.ilen=len(self.line)\n \n itab = 0 # flag if tab before 1st non-whitespace character\n self.ifnb = 0\n isig = 0\n while self.ifnb < self.ilen and isig == 0:\n if string.find(string.whitespace,self.line[self.ifnb]) <> -1:\n if self.line[self.ifnb] == '\\t':\n itab = 1\n self.ifnb += 1\n else:\n isig=1\n \n return itab", "def _getStartExcludingNewlines(self, line_list):\n\n for count, item in enumerate(line_list):\n item = item.strip()\n if item != \"\":\n return count\n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load `self.line` with the `n`'th previous line and return it.
def previous_line(self, n=1): self.line_offset -= n if self.line_offset < 0: self.line = None else: self.line = self.input_lines[self.line_offset] self.notify_observers() return self.line
[ "def previous(self, n = 1):\n return NonStandardInteger(self.non_st_part, self.st_part - n, self.non_st_ring)", "def prev_line(rule):\n return shift_line(-1, rule)", "def read_line(file_path, n):\n return linecache.getline(file_path, n)", "def NthLineOfFile( fname, n = 0 ):\n with open( fname ) as f:\n while n > 0:\n f.readline()\n n -= 1\n return f.readline().strip()", "def undo(self, n=1):\n if not self.history:\n return None\n\n if len(self.history) < n:\n n = len(self.history)\n\n entries = list(self.rl_history.entries)\n\n self.history.entries = self.history[:-n]\n\n self.reevaluate()\n\n self.rl_history.entries = entries", "def gotoLine(self, n):\n self.fileIndex = n", "def next_line(self):\n line = self.lines[self.cur_line]\n self.cur_line += 1\n\n if self.cur_line >= len(self.lines):\n self.eop = True\n\n return line", "def shift_line(n, rule, skip_comments=True):\n def wrap(line, info):\n old_index = info['line_index']\n new_index = old_index + n\n\n if 0 <= new_index < info['nlines']:\n new_lineno, new_line = info['lines'][new_index]\n info['line_index'] = new_index\n old_lineno, info['lineno'] = info['lineno'], new_lineno\n res = rule(new_line, info)\n info['lineno'], info['line_index'] = old_lineno, old_index\n return res\n return False\n\n return wrap", "def get_line_number(self):\n return self.line_number", "def _next_line(self):\n self.current_line += 1\n return next(self.fh).rstrip(\"\\n\")", "def nth_item(line, n: int = 0):\n return line.split()[n]", "def up(self, n_lines=1):\n self.down(-n_lines)", "def _take_previous_line_pos(self, pos):\r\n\t\t(row, col) = self.view.rowcol(pos.begin())\r\n\t\tpoint = self.view.text_point(row - 1, col)\r\n\t\treturn sublime.Region(point, point)", "def tail(filepath, n):\n tail = []\n with open(filepath, 'rt') as fin:\n for line in fin:\n tail.append(line.strip())\n\n return tail[len(tail) - n:]\n #return tail[-n:] ---this was the orignal code", "def getline(self, bno):\r\n return self.breakpt[bno]['line']", "def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line", "def get_timestep(self, n):\n if n < 0:\n for h in self.real_handles:\n h.seek(0, SEEK_END)\n for n in range(-n):\n self.skip_back_timestep()\n elif n == 0:\n raise ValueError(\"step number must be positive or negative, not zero\")\n else:\n # should we seek(0) first?\n for n in range(n-1):\n self.skip_next_timestep()\n\n return self.get_next_timestep()", "def tail(file, n):\n with open(file) as in_fh:\n lines = in_fh.readlines()\n print(\"\".join(lines[-n:]))", "def next_line(self, oldLine):\n nextLine = ''\n #nextLine += self.rule[neighbor.index('0' + oldLine[:2])]\n for i in range(len(oldLine) - 2):\n nextLine += self.rule[neighbor.index(oldLine[i:i+3])]\n if len(self.lines) == 1: # if we work on the second line\n nextLine = self.rule[neighbor.index(oldLine[-2:] + '0')] +\\\n nextLine +\\\n self.rule[neighbor.index(oldLine[-2:] + '0')]\n else:\n nextLine = oldLine[0] + nextLine + oldLine[-1]\n #nextLine += self.rule[neighbor.index(oldLine[-2:] + '0')]\n return nextLine", "def prev_num(self):\n self.current_page - 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Jump to absolute line offset `line_offset`, load and return it.
def goto_line(self, line_offset): try: try: self.line_offset = line_offset - self.input_offset self.line = self.input_lines[self.line_offset] except IndexError: self.line = None raise EOFError return self.line finally: self.notify_observers()
[ "def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n # This is an error, meaning that there aren't even offset_line+1 lines in self.path.\n if f.readline() == '':\n raise ValueError('offset: {} is higher than the total number of lines in file {}'.format(\n offset_line, self.path))\n\n total_lines += 1\n\n # Retrieve the console_output just between offset_line and offset_line + max_lines\n for i in range(offset_line, offset_line + max_lines):\n line = f.readline()\n\n # We have reached the end of the file, or a line that has not finished being written to.\n if line == '' or not line.endswith(\"\\n\"):\n break\n\n console_output.append(line)\n output_lines += 1\n total_lines += 1\n\n # If there are more lines, then keep on counting in order to populate total_lines properly\n while f.readline():\n total_lines += 1\n\n return ConsoleOutputSegment(offset_line, output_lines, total_lines, ''.join(console_output))", "def current_from_import_import(cursor_offset, line):\n baseline = current_from_import_import_re_1.search(line)\n if baseline is None:\n return None\n match1 = current_from_import_import_re_2.search(line[baseline.end():])\n if match1 is None:\n return None\n matches = current_from_import_import_re_3.finditer(line[baseline.end():])\n for m in chain((match1, ), matches):\n start = baseline.end() + m.start(1)\n end = baseline.end() + m.end(1)\n if start < cursor_offset and end >= cursor_offset:\n return start, end, m.group(1)\n return None", "def load_offset(offset_file):\n offset_file = realpath(offset_file)\n return np.loadtxt(offset_file)", "def get_location_by_offset(filename, offset):\n with open(filename, encoding='utf-8', errors='ignore') as f:\n for row, line in enumerate(f, 1):\n length = len(line)\n if length < offset:\n offset -= length\n else:\n return row, offset + 1", "def current_from_import_from(cursor_offset, line):\n #TODO allow for as's\n tokens = line.split()\n if not ('from' in tokens or 'import' in tokens):\n return None\n matches = current_from_import_from_re.finditer(line)\n for m in matches:\n if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or\n (m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):\n return m.start(1), m.end(1), m.group(1)\n return None", "def jump_to_line(self, lineno):\r\n self._main.editor_jump_to_line(lineno=lineno)", "def getAddressFromFileOffset(self,offset):\n return HopperLowLevel.getAddressFromFileOffset(self.__internal_document_addr__, offset)", "def offset_from_line(line, firstlineno, lnotab):\n # TODO: Handle negetive offsets!\n n = len(lnotab)\n assert n & 1 == 0\n\n l = firstlineno\n tab = lnotab\n offset = 0\n index = 0\n while tab:\n index += 1\n b, d, *tab = tab\n l += d\n offset += b\n if l >= line:\n return offset, index\n raise IndexError(\"Line out of bound\")", "def getLoc(self, file, line, join=False):\n with open(file if not join else os.path.join(PATH, file), \"r\") as f:\n i = 0\n while i < line - 1:\n f.readline()\n i += 1\n return f.readline()", "def goToLine(self, lineno):\n # Go to start and move pointer to given line no\n self.goToStart()\n line_count = 1\n eof = False\n pos = 0\n while not eof and line_count != lineno:\n line = self.file_obj.readline()\n if not line:\n eof = True\n continue\n pos = self.file_obj.tell()\n line_count += 1\n\n self.line_no = line_count\n self.offset = pos", "def load_by_offset(self, offset, size):\n raise NotImplementedError()", "def _read_entity_from_offset(self, offset):\n self.entities_mm.seek(offset)\n l = self.entities_mm.readline()\n return self._string_to_entity(l)", "def jump_to_line(self, lineno=None):\r\n if lineno is not None:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(lineno)\r\n return\r\n\r\n maximum = self.blockCount()\r\n line = QInputDialog.getInt(self, self.tr(\"Jump to Line\"),\r\n self.tr(\"Line:\"), 1, 1, maximum, 1)\r\n if line[1]:\r\n self.emit(SIGNAL(\"addBackItemNavigation()\"))\r\n self.go_to_line(line[0] - 1)", "def _lower_bound(self, query: str, offset_l: int, offset_h: int) -> int:\n logging.debug('lower bound 2 %s %s %s', query, offset_l, offset_h)\n if offset_l >= offset_h:\n return self._seek_back_to_line_start(offset_l)\n\n mid = (offset_l + offset_h) // 2\n\n line_start = self._seek_back_to_line_start(mid)\n #current_id = self._id_from_line(line_start)\n current_line = self._get_line(line_start)\n next_line_start = self._seek_to_next_line(mid)\n\n #if current_id >= query:\n if current_line >= query:\n return self._lower_bound(query=query, offset_l=offset_l, offset_h=line_start - 1)\n return self._lower_bound(query=query, offset_l=next_line_start, offset_h=offset_h)", "def _resolve_lineno(self, lineno):\n if lineno is None:\n return self.line_number()\n return lineno", "def get_section_by_offset(self, offset):\n\n for section in self.sections:\n if section.contains_offset(offset):\n return section\n\n return None", "def fine_tuning(raw_line, offset_step=0.05):\n def _offset(symbols):\n if not symbols:\n return 0\n sign = int('{}1'.format(symbols[0]))\n return len(symbols) * offset_step * sign\n\n result = re.match(OFFSET_PATTERN, raw_line).groupdict()\n line = result.pop('line').strip()\n return {line: {k: _offset(v) for k, v in result.items()}}", "def __init__(self, pos_team=None, yardline=None, offset=None):\r\n if isinstance(offset, int):\r\n self.offset = offset\r\n return\r\n if yardline == '50':\r\n self.offset = 0\r\n return\r\n\r\n territory, yd_str = yardline.split()\r\n yd = int(yd_str)\r\n if territory == pos_team:\r\n self.offset = -(50 - yd)\r\n else:\r\n self.offset = 50 - yd", "def editor_go_to_line(self, line):\r\n editorWidget = self.get_current_editor()\r\n if editorWidget:\r\n editorWidget.jump_to_line(line)", "def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return source of line at absolute line offset `line_offset`.
def get_source(self, line_offset): return self.input_lines.source(line_offset - self.input_offset)
[ "def source_line(self) -> str:\n if not self.__source_line:\n self.__source_line = util.get_line(self.file_path, self.line)\n\n return self.__source_line", "def raise_source_exception(\n source: str,\n rel_path: Path,\n source_lineno: int,\n file_lineno: int,\n source_offset: int | None = None,\n exception: Exception | None = None,\n) -> None:\n message = exception.msg if exception else \"\"\n source_lines = [\n (\"....\" if n != source_lineno - 1 else \" >\") + line\n for n, line in enumerate(source.splitlines())\n ]\n if source_offset:\n source_lines.insert(source_lineno, f\"{' '*(source_offset+3)}^ {message}\")\n annotated_source = \"\\n\".join(source_lines)\n exception = type(exception) if exception else SyntaxError\n msg = f\"{rel_path}:{file_lineno}: {message}\\n{annotated_source}\"\n raise exception(\n msg,\n ) from None", "def _parse_from_offset(self, max_lines, offset_line):\n total_lines = 0\n output_lines = 0\n console_output = []\n\n with open(self.path, 'r', encoding='utf-8', errors='replace') as f:\n # Iterate up to the index offset_line\n for i in range(0, offset_line):\n # This is an error, meaning that there aren't even offset_line+1 lines in self.path.\n if f.readline() == '':\n raise ValueError('offset: {} is higher than the total number of lines in file {}'.format(\n offset_line, self.path))\n\n total_lines += 1\n\n # Retrieve the console_output just between offset_line and offset_line + max_lines\n for i in range(offset_line, offset_line + max_lines):\n line = f.readline()\n\n # We have reached the end of the file, or a line that has not finished being written to.\n if line == '' or not line.endswith(\"\\n\"):\n break\n\n console_output.append(line)\n output_lines += 1\n total_lines += 1\n\n # If there are more lines, then keep on counting in order to populate total_lines properly\n while f.readline():\n total_lines += 1\n\n return ConsoleOutputSegment(offset_line, output_lines, total_lines, ''.join(console_output))", "def GetSymbolSourceLine(symbol):\n return SourceSymbolSourceLine.get(symbol, 0)", "def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')", "def analyze_last_line(line, offset=None):\n tokens = utils.tokenize_source(line) # tokens do not include spaces nor comments\n\n if not tokens:\n return\n\n for analyzer in LINE_ANALYZERS:\n cause = analyzer(tokens, offset=offset)\n if cause:\n return cause\n return", "def current_from_import_from(cursor_offset, line):\n #TODO allow for as's\n tokens = line.split()\n if not ('from' in tokens or 'import' in tokens):\n return None\n matches = current_from_import_from_re.finditer(line)\n for m in matches:\n if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or\n (m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):\n return m.start(1), m.end(1), m.group(1)\n return None", "def make_source_range(self, token: str, string: str, lineno: int, offset: int = 0) -> SourceRange:\n col_begin = string.index(token, offset) + 1\n col_end = col_begin + len(token)\n return SourceRange.from_positions(self.cursor.translation_unit, lineno, col_begin, lineno, col_end)", "def current_from_import_import(cursor_offset, line):\n baseline = current_from_import_import_re_1.search(line)\n if baseline is None:\n return None\n match1 = current_from_import_import_re_2.search(line[baseline.end():])\n if match1 is None:\n return None\n matches = current_from_import_import_re_3.finditer(line[baseline.end():])\n for m in chain((match1, ), matches):\n start = baseline.end() + m.start(1)\n end = baseline.end() + m.end(1)\n if start < cursor_offset and end >= cursor_offset:\n return start, end, m.group(1)\n return None", "def getAddressFromFileOffset(self,offset):\n return HopperLowLevel.getAddressFromFileOffset(self.__internal_document_addr__, offset)", "def get_location_by_offset(filename, offset):\n with open(filename, encoding='utf-8', errors='ignore') as f:\n for row, line in enumerate(f, 1):\n length = len(line)\n if length < offset:\n offset -= length\n else:\n return row, offset + 1", "def get_string(self, offset):\r\n table_offset = self['sh_offset']\r\n s = parse_cstring_from_stream(self.stream, table_offset + offset)\r\n return s", "def _get_line(self, regex):\n return self._match(regex).group(1)", "def get_string_from_table(self, offset):\r\n return parse_cstring_from_stream(self.debug_str_sec.stream, offset)", "def handle_source(self, line):\n self._source_lines_buffered.append(line)\n ## Ask client if line is complete; get indent for next line:\n if self.use_kernel_is_complete:\n msg_id = self.client.is_complete(\"\\n\".join(self._source_lines_buffered))\n return self.handle_is_complete_reply(msg_id, timeout=self.kernel_is_complete_timeout)\n else:\n more = (line != \"\")\n return more, \"\"", "def _highlit_line(content, offsets, markup, markdown, encoding):\n def chunks():\n try:\n # Start on the line the highlights are on:\n chars_before = content.rindex('\\n', 0, offsets[0][0]) + 1\n except ValueError:\n chars_before = None\n for start, end in offsets:\n yield cgi.escape(content[chars_before:start].decode(encoding,\n 'replace'))\n yield markup\n yield cgi.escape(content[start:end].decode(encoding, 'replace'))\n yield markdown\n chars_before = end\n # Make sure to get the rest of the line after the last highlight:\n try:\n next_newline = content.index('\\n', chars_before)\n except ValueError: # eof\n next_newline = None\n yield cgi.escape(content[chars_before:next_newline].decode(encoding,\n 'replace'))\n return ''.join(chunks()).lstrip()", "def getLine(self, line_id: int) -> Line:\n return self.pool[line_id]", "def _read_entity_from_offset(self, offset):\n self.entities_mm.seek(offset)\n l = self.entities_mm.readline()\n return self._string_to_entity(l)", "def get_corresponding_lineno(self, lineno):\r\n for template_line, code_line in reversed(self.debug_info):\r\n if code_line <= lineno:\r\n return template_line\r\n return 1", "def getLine(self) -> \"SbLine const &\":\n return _coin.SbLineProjector_getLine(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return (source, line) tuple for current or given line number. Looks up the source and line number in the `self.input_lines` StringList instance to count for included source files. If the optional argument `lineno` is given, convert it from an absolute line number to the corresponding (source, line) pair.
def get_source_and_line(self, lineno=None): if lineno is None: offset = self.line_offset else: offset = lineno - self.input_offset - 1 try: src, srcoffset = self.input_lines.info(offset) srcline = srcoffset + 1 except (TypeError): # line is None if index is "Just past the end" src, srcline = self.get_source_and_line(offset + self.input_offset) return src, srcline + 1 except (IndexError): # `offset` is off the list src, srcline = None, None # raise AssertionError('cannot find line %d in %s lines' % # (offset, len(self.input_lines))) # # list(self.input_lines.lines()))) # assert offset == srcoffset, str(self.input_lines) # print "get_source_and_line(%s):" % lineno, # print offset + 1, '->', src, srcline # print self.input_lines return (src, srcline)
[ "def get_corresponding_lineno(self, lineno):\r\n for template_line, code_line in reversed(self.debug_info):\r\n if code_line <= lineno:\r\n return template_line\r\n return 1", "def _resolve_lineno(self, lineno):\n if lineno is None:\n return self.line_number()\n return lineno", "def source_line(self) -> str:\n if not self.__source_line:\n self.__source_line = util.get_line(self.file_path, self.line)\n\n return self.__source_line", "def line_number(self, line):\n ret_val = self._line_number(line)\n return ret_val", "def current_from_import_import(cursor_offset, line):\n baseline = current_from_import_import_re_1.search(line)\n if baseline is None:\n return None\n match1 = current_from_import_import_re_2.search(line[baseline.end():])\n if match1 is None:\n return None\n matches = current_from_import_import_re_3.finditer(line[baseline.end():])\n for m in chain((match1, ), matches):\n start = baseline.end() + m.start(1)\n end = baseline.end() + m.end(1)\n if start < cursor_offset and end >= cursor_offset:\n return start, end, m.group(1)\n return None", "def indexByLineNumber(self,n):\n for idx in range(len(self.__data)):\n if self.__data[idx].lineno() == n:\n return idx\n raise IndexError,\"No line number %d\" % n", "def _get_line_number(file_lines, pattern):\n return next(i for i, line in enumerate(file_lines) if pattern in line) + 1", "def scan_source_line_comments(\n self,\n fp: TextIO,\n line_numbers: Iterable[int]\n ) -> Tuple[List[Tuple[int, SourceCodeComments]], List[str]]:\n comments: List[Tuple[int, SourceCodeComments]] = []\n misspelled_comments: List[str] = []\n if not contains_codechecker_comment(fp):\n return comments, misspelled_comments\n\n line_numbers = sorted(line_numbers)\n for num in line_numbers:\n try:\n comments.append((num, self.get_source_line_comments(fp, num)))\n except SpellException as ex:\n misspelled_comments.append(str(ex))\n return comments, misspelled_comments", "def readline(self, lineno=None):\n if lineno:\n self.goToLine(lineno)\n line = self.file_obj.readline()\n if line: # If not EOF\n self.line_no += 1\n return line", "def _FindFileLine(outbuffer, line, fname, regex):\n match = regex.findall(outbuffer.GetLine(line))\n ifile = None\n if len(match):\n ifile = match[0][0]\n try:\n line = max(int(match[0][1]) - 1, 0)\n except (IndexError, TypeError):\n line = 0\n\n # If not an absolute path then the error is relative to the\n # script that produced this error message.\n if ifile is not None and not os.path.isabs(ifile):\n dname = os.path.split(fname)[0]\n ifile = os.path.join(dname, ifile)\n\n return (ifile, line)", "def line_search(self, regex, lineno=None):\n return regex.search(self.line_text(lineno))", "def get_line_number(self):\n return self.line_number", "def eval_line(self, number: int) -> object:\n\n if self.ast:\n interpreter = Interpreter(self.filename)\n\n cur_line = self.buf[number-1].strip()\n if (not cur_line) or any(cur_line.startswith(token)\n for token in self.ignored_tokens):\n raise ValueError()\n\n # constuction area:\n # =================\n pre_nodes = []\n\n def find_cur_node(node):\n if hasattr(node, 'lineno') and node.lineno == number:\n return node\n\n if hasattr(node, 'body'):\n for subn in node.body:\n if subn.lineno > number:\n break\n pre_nodes.append(subn)\n nextn = subn\n else:\n return None\n\n pre_nodes.pop()\n return find_cur_node(nextn)\n\n node = find_cur_node(self.ast)\n\n compiled = interpreter.compile(ast.Module(pre_nodes), 'exec')\n interpreter.exec_code(compiled)\n\n if isinstance(node, ast.If):\n source = node.test\n else:\n source = node.value\n\n compiled = interpreter.compile(ast.Expression(source), 'eval')\n value, error = interpreter.eval_code(compiled)\n # =================\n\n return (self._format_exc(error) if error\n else self._format_value(value))\n else:\n return self.msg", "def software_source_line_number(self):\n return self._software_source_line_number", "def current_from_import_from(cursor_offset, line):\n #TODO allow for as's\n tokens = line.split()\n if not ('from' in tokens or 'import' in tokens):\n return None\n matches = current_from_import_from_re.finditer(line)\n for m in matches:\n if ((m.start(1) < cursor_offset and m.end(1) >= cursor_offset) or\n (m.start(2) < cursor_offset and m.end(2) >= cursor_offset)):\n return m.start(1), m.end(1), m.group(1)\n return None", "def _compute_lineno(cls, table, code):\n for offset, lineno in dis.findlinestarts(code):\n adj_offset = offset + _FIXED_OFFSET\n if adj_offset in table:\n table[adj_offset].lineno = lineno\n # Assign unfilled lineno\n # Start with first bytecode's lineno\n known = code.co_firstlineno\n for inst in table.values():\n if inst.lineno >= 0:\n known = inst.lineno\n else:\n inst.lineno = known\n return table", "def get_lines(name, source = None):\n\n cmd = ['./llvm-to-source', name]\n if source:\n #cmd.append('-lines-only')\n cmd.append(source)\n p = Popen(cmd, cwd = srcdir, stdout=PIPE, stderr=PIPE)\n out, errs = p.communicate()\n if p.poll() != 0:\n sys.stderr.write(errs)\n sys.exit(1)\n\n assert not out is None\n return frozenset(map(int, out.split()))", "def getSnippetIdentifier(self, file, line):\n for i in self.fileInfo[file]:\n if i == \"path\":\n continue\n if line in range(self.fileInfo[file][i][\"start\"], self.fileInfo[file][i][\"stop\"] + 1):\n return i", "def parse_position(errmsg, arg):\n colon = arg.rfind(':') \n if colon >= 0:\n filename = arg[:colon].rstrip()\n m, f = lookupmodule(filename)\n if not f:\n errmsg(\"'%s' not found using sys.path\" % filename)\n return (None, None, None)\n else:\n filename = file_pyc2py(f)\n arg = arg[colon+1:].lstrip()\n pass\n try:\n lineno = int(arg)\n except TypeError:\n errmsg(\"Bad line number: %s\", str(arg))\n return (None, filename, None)\n return (None, filename, lineno)\n return (None, None, None)", "def lineno(self) -> int:\n return self.node.lineno" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Examine one line of input for a transition match & execute its method.
def check_line(self, context, state, transitions=None): if transitions is None: transitions = state.transition_order state_correction = None if self.debug: print(( '\nStateMachine.check_line: state="%s", transitions=%r.' % (state.__class__.__name__, transitions)), file=self._stderr) for name in transitions: pattern, method, next_state = state.transitions[name] match = pattern.match(self.line) if match: if self.debug: print(( '\nStateMachine.check_line: Matched transition ' '"%s" in state "%s".' % (name, state.__class__.__name__)), file=self._stderr) return method(match, context, next_state) else: if self.debug: print(( '\nStateMachine.check_line: No match in state "%s".' % state.__class__.__name__), file=self._stderr) return state.no_match(context, transitions)
[ "def check_line(self, context, state, transitions=None):\r\n if transitions is None:\r\n transitions = state.transition_order\r\n state_correction = None\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: state=\"%s\", transitions=%r.'\r\n % (state.__class__.__name__, transitions))\r\n for name in transitions:\r\n pattern, method, next_state = state.transitions[name]\r\n match = pattern.match(self.line)\r\n if match:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: Matched transition '\r\n '\"%s\" in state \"%s\".'\r\n % (name, state.__class__.__name__))\r\n return method(match, context, next_state)\r\n else:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.check_line: No match in state \"%s\".'\r\n % state.__class__.__name__)\r\n return state.no_match(context, transitions)", "def parse_transition_line(line: Line, trans_txt: str, from_state: State, to_state: State) -> Transition:\n m = re.fullmatch(r'^(\\w+)\\s*(\\[\\s*(.*?)\\s*\\]\\s*)?(/(.*))?', trans_txt.replace('\\\\n', ''))\n assert m, f'Invalid transition format in {line}: {line.orig_text}'\n\n event_name, _, guard_code, _, actions_txt = m.groups()\n actions_code = [] if not actions_txt else [x.strip() for x in actions_txt.split('/') if x.strip()]\n\n event = Event(event_name)\n guard = None if not guard_code else Guard(guard_code)\n actions = [Action(x) for x in actions_code]\n transition = Transition(event, guard, from_state, to_state, actions)\n\n return transition", "def run(self, input_lines, input_offset=0, context=None,\r\n input_source=None, initial_state=None):\r\n self.runtime_init()\r\n if isinstance(input_lines, StringList):\r\n self.input_lines = input_lines\r\n else:\r\n self.input_lines = StringList(input_lines, source=input_source)\r\n self.input_offset = input_offset\r\n self.line_offset = -1\r\n self.current_state = initial_state or self.initial_state\r\n if self.debug:\r\n print >>self._stderr, (\r\n u'\\nStateMachine.run: input_lines (line_offset=%s):\\n| %s'\r\n % (self.line_offset, u'\\n| '.join(self.input_lines)))\r\n transitions = None\r\n results = []\r\n state = self.get_state()\r\n try:\r\n if self.debug:\r\n print >>self._stderr, '\\nStateMachine.run: bof transition'\r\n context, result = state.bof(context)\r\n results.extend(result)\r\n while True:\r\n try:\r\n try:\r\n self.next_line()\r\n if self.debug:\r\n source, offset = self.input_lines.info(\r\n self.line_offset)\r\n print >>self._stderr, (\r\n u'\\nStateMachine.run: line (source=%r, '\r\n u'offset=%r):\\n| %s'\r\n % (source, offset, self.line))\r\n context, next_state, result = self.check_line(\r\n context, state, transitions)\r\n except EOFError:\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: %s.eof transition'\r\n % state.__class__.__name__)\r\n result = state.eof(context)\r\n results.extend(result)\r\n break\r\n else:\r\n results.extend(result)\r\n except TransitionCorrection, exception:\r\n self.previous_line() # back up for another try\r\n transitions = (exception.args[0],)\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: TransitionCorrection to '\r\n 'state \"%s\", transition %s.'\r\n % (state.__class__.__name__, transitions[0]))\r\n continue\r\n except StateCorrection, exception:\r\n self.previous_line() # back up for another try\r\n next_state = exception.args[0]\r\n if len(exception.args) == 1:\r\n transitions = None\r\n else:\r\n transitions = (exception.args[1],)\r\n if self.debug:\r\n print >>self._stderr, (\r\n '\\nStateMachine.run: StateCorrection to state '\r\n '\"%s\", transition %s.'\r\n % (next_state, transitions[0]))\r\n else:\r\n transitions = None\r\n state = self.get_state(next_state)\r\n except:\r\n if self.debug:\r\n self.error()\r\n raise\r\n self.observers = []\r\n return results", "def parse(self, player, message):\n #test if the message match a command available for the player state\n matched = self.cmd_regex[player.get_state()].match(message)\n if matched:\n # execute the relative function\n cmd = matched.group(\"command\")\n arg = matched.group(\"arguments\") or ''\n getattr(self, Cmd.commands[cmd].fn)(player, arg)\n else:\n #self.game.log(\n # \"Unknown command <{}> for state {}.\"\n # .format(message, player.get_state()))\n info(player, \"<code>Arglebargle&nbsp;!?</code>\")", "def transition(self):\n statedef = self.transitions[self.state]\n for path in statedef:\n pat, dest = path[:2]\n retval = None\n if type(pat).__name__ == \"str\" and pat == self.char or \\\n type(pat).__name__ == \"SRE_Pattern\" and pat.match(self.char): # Regexp objects match like regexps\n for action in path[2:]:\n retval = action(self) # Keep the return value to return from ourselves\n self.state = dest\n return retval\n raise Exception(\"No matching path for char %s from state %d.\" % (self.char, self.state))", "def consume(self, inp):\n if self.state.is_end:\n raise ValueError(\"state %s is terminal\" % self.state.name)\n # Follow the first matched rule of current state.\n for predicate, target, action in self.state.rules:\n if predicate(inp, self.stack):\n if action is not None:\n action(inp, self.stack)\n self.state = target\n break\n else: # No match found, follow default.\n if self.state.default_action is not None:\n self.state.default_action(inp, self.stack)\n self.state = self.state.default_target", "def process_line(self, line):\n if not line:\n return\n msg = self.line_to_message(line)\n self.handle_message(msg)", "def parse(line):\n if line.startswith('turn on'):\n action = 'on'\n elif line.startswith('turn off'):\n action = 'off'\n elif line.startswith('toggle'):\n action = 'toggle'\n else:\n raise Exception('Unexpected input: \"{}\"'.format(line))\n start, end = map(parse_pair, re.findall(r'\\d+,\\d+', line))\n return action, start, end", "def test_match_sentence_with_steps(given_sentence, given_steps, expected_argument_match_type, expected_func_match):\n # given & when\n match = matcher.match_step(given_sentence, given_steps)\n\n # then\n assert isinstance(match.argument_match, expected_argument_match_type)\n assert match.func == expected_func_match", "def process_ops_input(self):\n input_data = self.text()\n if (self.local_state == State.GOTO_LINE):\n self.goto_line.emit(int(input_data))\n else:\n self.search.emit(input_data)", "def input(self, i):\n self.i_count += 1\n rlist = self.rules.get(self.state, [])\n for (test, dst, action, tag) in rlist + self.rules.get(None, []): # Rules starting from None are added to all states\n t_info = TransitionInfo(self.state, dst, self.i_count, None)\n result = test(i, t_info) if callable(test) else test == i\n t_info = t_info._replace(result=result)\n if result:\n if dst is not None: # Transitions ending in None stay in the same state\n self.state = dst\n # Run the action after the state change so it could override the end state (e.g. pop state from a stack)\n out = action(i, t_info) if callable(action) else action\n # Be sure to trace the actual end state after `action` is done\n self.tracer(i, TraceInfo(t_info, test, action, tag, out, self.state))\n return out\n self.tracer(i, TraceInfo(t_info, test, action, tag, None, self.state))\n\n return self.unrecognized(i, self.state, self.i_count)", "def process_line(self, line):\n args = line.split(' ')\n command = args[0]\n try:\n handler = getattr(self, f'c_{command}')\n except AttributeError:\n log.warning(f'command {command!r} not found')\n\n try:\n handler(args)\n except ShutdownClient as err:\n self.shutdown(err.args[0])\n except Exception:\n log.exception('error executing command')", "def testLineParsingNormal(self):\n\n a = LedSwitcher(\"../test/testinputs/input_assign3.txt\")\n a.parseFile()\n self.assertTrue(a.parseEachLine(\"turn on 619,181 through 736,944\") == [True, 619, 181, 736, 944])", "def parse(cls, input):", "def __get_transition(self, i: int) -> int:\n line = self.contents[i]\n pieces = [x for x in line.split() if (x.find(':') == -1)]\n action = self.actions.index(pieces[0])\n\n if len(pieces) == 4:\n # case 1: T: <action> : <start-state> : <next-state> %f\n start_state = self.states.index(pieces[1])\n next_state = self.states.index(pieces[2])\n prob = float(pieces[3])\n self.T[(action, start_state, next_state)] = prob\n return i + 1\n\n elif len(pieces) == 3:\n # case 2: T: <action> : <start-state> : <next-state>\n # %f\n start_state = self.states.index(pieces[1])\n next_state = self.states.index(pieces[2])\n next_line = self.contents[i + 1]\n prob = float(next_line)\n self.T[(action, start_state, next_state)] = prob\n return i + 2\n\n elif len(pieces) == 2:\n # case 3: T: <action> : <start-state>\n # %f %f ... %f\n start_state = self.states.index(pieces[1])\n next_line = self.contents[i + 1]\n probs = next_line.split()\n assert len(probs) == len(self.states)\n for j in range(len(probs)):\n prob = float(probs[j])\n self.T[(action, start_state, j)] = prob\n return i + 2\n\n elif len(pieces) == 1:\n next_line = self.contents[i + 1]\n if next_line == 'identity':\n # case 4: T: <action>\n # identity\n for j in range(len(self.states)):\n for k in range(len(self.states)):\n prob = 1.0 if j == k else 0.0\n self.T[(action, j, k)] = prob\n return i + 2\n\n elif next_line == 'uniform':\n # case 5: T: <action>\n # uniform\n prob = 1.0 / float(len(self.states))\n for j in range(len(self.states)):\n for k in range(len(self.states)):\n self.T[(action, j, k)] = prob\n return i + 2\n\n else:\n # case 6: T: <action>\n # %f %f ... %f\n # %f %f ... %f\n # ...\n # %f %f ... %f\n for j in range(len(self.states)):\n probs = next_line.split()\n assert len(probs) == len(self.states)\n for k in range(len(probs)):\n prob = float(probs[k])\n self.T[(action, j, k)] = prob\n next_line = self.contents[i + 2 + j]\n return i + 1 + len(self.states)\n\n else:\n raise Exception('Cannot parse line ' + line)", "def parseInput(input):\n # parse=bash(\"sh ../bitpar/parse '\"+input+\"'\") # ouput: [.VP [.V draw][.NP [.D a][.N-bar [.N square]]]]\n bash(\"java -jar ../lambda/lambda-auto.jar ../lambda/input.txt > ../lambda/input.tex\")\n fml=bash(\"make -C ../lambda input.fml\")\n print fml\n cmd=`fml`.split('true ')[1]\n \n # TEST CASES\n # cmd=\"draw(Gy[red(y) & square(y)])\" \n cmd=\"draw(\\gamma y(red(y) & square(y))).\"\n\n print cmd\n parse(cmd)", "def lex(self, line):\n\n # only add line if we are in a continuation or line is not empty\n if self.continuation is True or line.strip() != '':\n self.line += line\n\n self.continuation = False\n # keep running states until out of data or we need a continuation\n while self.continuation is False and len(self.line) > 0:\n for token in self.state():\n if token.ident == Lexer.error.ident:\n yield token\n # reset state on error\n self._reset()\n return\n yield token", "def step(self):\n if self.__global_state != DFAGlobalState.START:\n raise RuntimeError('DFA is not started!')\n\n if len(self.__input_list) > 0:\n ch = self.__input_list[0]\n transit_to = self.__find_current_state_transition(ch)\n if transit_to:\n self.__logging_list.add_event(DfaLoggingEvent(self.__current_state, ch, transit_to))\n self.__current_state = transit_to\n self.__input_list = self.__input_list[1:]\n else:\n self.__logging_list.set_error(f'no transition for symbol \"{ch}\" in state \"{self.__current_state}\"')\n self.halt()\n return\n else:\n if self.__current_state not in self.__dfa_dict['end_states']:\n self.__logging_list.set_error(f'input string ended at non end state \"{self.__current_state}\"')\n self.halt()", "def parse(self, transitions):\n #print \"\\n\\n\\n\\n%s\\n\\n\\n\\n\\n\\n\"%self.sentence\n # print transitions\n for transition in transitions:\n self.parse_step(transition)\n #print \"#######################\\n%s##############\\n\"%self.sentence\n return self.dependencies" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize & add a `state_class` (`State` subclass) object.
def add_state(self, state_class): statename = state_class.__name__ if statename in self.states: raise DuplicateStateError(statename) self.states[statename] = state_class(self, self.debug)
[ "def setup_class(cls):\n cls.state = State()\n cls.state.name = \"Oregon\"", "def set_classy_state(self, state: Dict[str, Any]) -> None:\n raise NotImplementedError", "def fsm_factory(name, states):\n className = name.capitalize() + \"State\"\n attribs = dict(\n __mapper_args__={\"polymorphic_identity\": name},\n table=name,\n values=states,\n )\n class_ = type(className, (State,), attribs)\n return class_", "def __init__(self):\n this = _coin.new_ScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def _state(self):\n self.state = _State(self.ct)", "def __init__(self):\n this = _coin.new_SoScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, state=False):\r\n \r\n # If state is a string, convert it to all caps (for case insensitivity)\r\n # and convert it into bool\r\n # (True if it is \"ON\", False if it is \"OFF\")\r\n if(isinstance(state, str)):\r\n state = state.upper()\r\n if (state == \"ON\"):\r\n state = True\r\n elif (state == \"OFF\"):\r\n state = False\r\n else:\r\n raise InvalidStateException\r\n \r\n # Set the state of the switch\r\n self._on = state\r\n \r\n return", "def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state", "def __init__(self):\n this = _coin.new_ScXMLStateElt()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def state_class(self):\n return self._stateclass", "def test_init_state(self) -> None:\n # Execute\n state = self.state_factory()\n\n # Assert\n assert isinstance(state, State)", "def __init__(self, state_name: str, changes: List[int]) -> None:\n self.__changes = changes\n super(GuineaPigState, self).__init__(state_name.upper())", "def state_class(self):\n return self._state_class", "def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_enable_bfd_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_enable_bfd_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()", "def _set_state(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"state must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_state_openconfig_local_routing__local_routes_static_routes_static_next_hops_next_hop_state, is_container='container', yang_name=\"state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__state = t\n if hasattr(self, '_set'):\n self._set()", "def initial_state(self):\n return GeneralGameState(self)", "def __init__(self, state_type=None, name=None, path=None, vibs_path=None, sigma=None,\n mass=None, inertia=None, gasdata=None, add_to_energy=None, path_to_pickle=None,\n read_from_alternate=None, truncate_freq=True, energy_source=None, freq_source=None,\n freq=None, i_freq=None, Gelec=None, Gzpe=None, Gvibr=None, Gtran=None, Grota=None, Gfree=None):\n\n if path_to_pickle:\n assert (os.path.isfile(path_to_pickle))\n newself = pickle.load(open(path_to_pickle, 'rb'))\n assert (isinstance(newself, State))\n for att in newself.__dict__.keys():\n setattr(self, att, getattr(newself, att))\n else:\n if name is None:\n name = os.path.basename(path)\n self.state_type = state_type\n self.name = name\n self.path = path\n self.vibs_path = vibs_path\n self.sigma = sigma\n self.mass = mass\n self.inertia = inertia\n self.gasdata = gasdata\n self.add_to_energy = add_to_energy\n self.read_from_alternate = read_from_alternate\n self.truncate_freq = truncate_freq\n self.energy_source = energy_source\n self.freq_source = freq_source\n self.Gelec = Gelec\n self.Gzpe = Gzpe\n self.Gtran = Gtran\n self.Gvibr = Gvibr\n self.Grota = Grota\n self.Gfree = Gfree\n self.tran_source = None if self.Gtran is None else 'inputfile'\n self.rota_source = None if self.Grota is None else 'inputfile'\n self.vibr_source = None if self.Gvibr is None else 'inputfile'\n self.free_source = None if self.Gfree is None else 'inputfile'\n self.freq = None\n self.i_freq = None\n self.shape = None\n self.atoms = None\n if freq is not None:\n self.freq_source = 'inputfile'\n self.freq = np.array(sorted(freq, reverse=True))\n if i_freq is not None:\n self.i_freq = np.array(sorted(i_freq, reverse=True))\n if self.state_type == 'gas':\n assert(self.sigma is not None)\n\n if self.inertia is not None:\n inertia_cutoff = 1.0e-12\n self.inertia = np.array([i if i > inertia_cutoff else\n 0.0 for i in self.inertia])\n self.shape = len([i for i in self.inertia if i > 0.0])\n if self.shape < 2:\n print('Too many components of the moments of inertia are zero.'\n 'Please specify atoms differently.')", "def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLStateElt_addState(self, state)", "def addState(self, state):\n id = len(self.states)\n self.states.append(state)\n return id", "def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLScxmlElt_addState(self, state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add `state_classes` (a list of `State` subclasses).
def add_states(self, state_classes): for state_class in state_classes: self.add_state(state_class)
[ "def add_css_classes(self, *css_classes):\n for cls in css_classes:\n self._css_classes.add(cls)", "def add_class(self, class_):\n self.classes.append(class_)", "def addState(self, state):\n id = len(self.states)\n self.states.append(state)\n return id", "def _register_classes(classes, addon_name_for_counter=None):\n\n from bpy.utils import register_class\n\n class_count = 0\n for cls in classes:\n register_class(cls)\n class_count += 1\n if addon_name_for_counter:\n print(f\"{addon_name_for_counter}: Registered {str(class_count)} classes\")", "def register_event_handler_classes(self, *event_handler_classes: Type[EventHandler]) -> None:\n _logger.info(\"Registering event handler classes: {}\",\n \", \".join(str(c) for c in event_handler_classes))\n self._handlers += [self.injector.get_instance(cls) for cls in event_handler_classes]", "def _registerClasses(classes) -> None:\n global _registered_classes\n _registered_classes = classes", "def set_classy_state(self, state: Dict[str, Any]) -> None:\n raise NotImplementedError", "def update_state_history(self, state: int) -> None:\n self.state_history.append(state)", "def add_states(self, name, state_group):\n try:\n self._state_groups[name] = state_group\n except AttributeError:\n self._state_groups = { name: state_group }", "def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLStateElt_addState(self, state)", "def pushState(self, state):\n self.statebuff = [state] + self.statebuff[:len(self.statebuff) - 1]", "def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLScxmlElt_addState(self, state)", "def add_state_group(\n self, name: str, *states: t.Union[ManagedState, ManagedStateGroup]\n ) -> None:\n # See `_add_state_internal` for explanation of the following\n if hasattr(self, name):\n raise AttributeError(\n f\"State group name {name!r} conflicts with an existing \"\n f\"attribute in the state manager\"\n )\n mstate = ManagedStateGroup(name, self, states)\n self.states[name] = mstate\n setattr(self, name, mstate)", "def add_state(self, state):\n self.Q.add(state)\n self.delta[state] = defaultdict(set)", "def add(self, state):\r\n # The hash function is a Python builtin that generates\r\n # a hash value from its argument. Use this to create\r\n # a dictionary key. Handle collisions by storing \r\n # states that hash to the same key in a bucket list.\r\n # Note that when you access a Python dictionary by a\r\n # non existant key, it throws a KeyError\r\n \r\n # if the hash key of the given state is not in this\r\n # explored instance's set of keys\r\n if state.__hash__() not in self.explored_set.keys():\r\n # create a new set for the particular hash key\r\n self.explored_set[state.__hash__()] = set()\r\n # then just add the state to the set of the\r\n # particular hash key\r\n self.explored_set[state.__hash__()].add(state)", "def addState(self, state: 'ScXMLStateElt') -> \"void\":\n return _coin.ScXMLParallelElt_addState(self, state)", "def setStateList (self, states):\n\t\tself.state_list = states", "def register_classes_factory(classes):\n def register():\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n\n def unregister():\n from bpy.utils import unregister_class\n for cls in reversed(classes):\n unregister_class(cls)\n\n return register, unregister", "def add(self, state):\n raise Exception(\"TreeSearch is abstract\")", "def add_state(frontier,state, cost,stateStr,depth):\n\n count = next(counter)\n entry = [cost, count, state,stateStr,depth]\n entry_finder[stateStr] = entry\n heappush(frontier, entry)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make and add transitions listed in `self.initial_transitions`.
def add_initial_transitions(self): if self.initial_transitions: names, transitions = self.make_transitions( self.initial_transitions) self.add_transitions(names, transitions)
[ "def getAutomaticTransitions():", "def set_transitions(self, cell_transition, orientation, new_transitions):\n raise NotImplementedError()", "def setup_transition_list(self):\n \n # Create an empty transition list\n xn_list = []\n \n # Append four transitions to the list.\n # Note that the arguments to the Transition() object constructor are:\n # - Tuple representing starting pair state\n # (left cell, right cell, orientation [0=horizontal])\n # - Tuple representing new pair state\n # (bottom cell, top cell, orientation [1=vertical])\n # - Transition rate (cells per time step, in this case 1 sec)\n # - Name for transition\n # - Flag indicating that the transition involves an exchange of properties\n # - Function to be called after each transition, to update a property\n # (in this case, to simulate bleaching of the luminescence signal)\n xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion', True, self.update_bleaching) )\n xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion', True, self.update_bleaching) )\n xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion', True, self.update_bleaching) )\n xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion', True, self.update_bleaching) )\n \n return xn_list", "def __init__(self, transitions):\n if isinstance(transitions, types.GeneratorType):\n transitions = [t for t in transitions]\n assert isinstance(transitions, list) # currently, a list is wanted for a MDD (and not a set); to be changed?\n super().__init__(transitions)", "def add_transition(self, from_state, to_state,\n trans_func, output_func):\n\n if from_state in self.transitions.keys():\n self.transitions[from_state].append((\n to_state,\n trans_func,\n output_func\n ))\n\n return\n\n self.transitions[from_state] = []\n self.transitions[from_state].append((\n to_state,\n trans_func,\n output_func\n ))", "def add_transition(self, prev_time_step, prev_action, prev_legal_actions,\n time_step):\n assert prev_time_step is not None\n next_legal_actions = (\n time_step.observations[\"legal_actions\"][self.player_id])\n next_legal_one_hots = self._to_one_hot(next_legal_actions)\n # Added for deep OMD: keep previous action mask.\n prev_legal_one_hots = self._to_one_hot(prev_legal_actions)\n\n transition = Transition(\n info_state=(\n prev_time_step.observations[\"info_state\"][self.player_id][:]),\n action=prev_action,\n legal_one_hots=prev_legal_one_hots,\n reward=time_step.rewards[self.player_id],\n next_info_state=time_step.observations[\"info_state\"][self.player_id][:],\n is_final_step=float(time_step.last()),\n next_legal_one_hots=next_legal_one_hots)\n self._replay_buffer.add(transition)", "def _start_transition(self, transition: TransitionBase, current_text: str, new_text: str,\n current_colors: List[RGBColor], new_colors: List[RGBColor],\n update_hz: float, flashing, flash_mask):\n current_colors = self._expand_colors(current_colors, len(current_text))\n new_colors = self._expand_colors(new_colors, len(new_text))\n if self._current_transition:\n self._stop_transition()\n self._current_transition = TransitionRunner(self.machine, transition, current_text, new_text,\n current_colors, new_colors)\n transition_text = next(self._current_transition)\n self._update_display(SegmentDisplayState(transition_text, flashing, flash_mask))\n self._transition_update_task = self.machine.clock.schedule_interval(self._update_transition, 1 / update_hz)", "def lease_transitions(self, lease_transitions):\n\n self._lease_transitions = lease_transitions", "def start(transition):", "def run(self, s):\n state = self.init_state\n for c in s:\n state = self.transition(state, c)\n return state", "def addTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLStateElt_addTransition(self, transition)", "def transitions(jira, args):\n print(\"Available JIRA transitions:\")\n pprint.pprint(jira.transitions(args.issue))", "def setDefaultStateTransitionHandlers(self):\n StateTransition.setDefaultStateTransitionHandler(\n MemoryStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.Memory,\n )\n StateTransition.setDefaultStateTransitionHandler(\n VectorRegisterStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.VectorRegister,\n )\n StateTransition.setDefaultStateTransitionHandler(\n SystemRegisterStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.SystemRegister,\n )\n StateTransition.setDefaultStateTransitionHandler(\n GprStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.GPR,\n )\n StateTransition.setDefaultStateTransitionHandler(\n VmContextStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.VmContext,\n )\n StateTransition.setDefaultStateTransitionHandler(\n PrivilegeLevelStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.PrivilegeLevel,\n )\n StateTransition.setDefaultStateTransitionHandler(\n PcStateTransitionHandlerRISCV(self.genThread), EStateElementType.PC\n )\n StateTransition.setDefaultStateTransitionHandler(\n FloatingPointRegisterStateTransitionHandlerRISCV(self.genThread),\n EStateElementType.FloatingPointRegister,\n )", "def setTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLInitialElt_setTransition(self, transition)", "def bindTransitions( form_instance, transitions, wf_name=None, wf=None):\n\n if wf_name:\n success_factory = lambda tid: TransitionHandler( tid, wf_name )\n else:\n success_factory = TransitionHandler\n\n actions = []\n for tid in transitions:\n d = {}\n if success_factory:\n d['success'] = success_factory( tid )\n if wf is not None:\n title = _(unicode(wf.getTransitionById( tid ).title))\n action = form.Action(title, **d)\n else:\n action = form.Action( tid, **d)\n action.form = form_instance\n action.__name__ = \"%s.%s\"%(form_instance.prefix, action.__name__)\n \n actions.append( action )\n return actions", "def output_transitions(self, output_transitions):\n\n self._output_transitions = output_transitions", "def add_to_fsm(self, fsm):\n final = fsm.get_next_state()\n if fsm.finals:\n final = fsm.finals[0]\n fsm.add_final(final)\n current = fsm.initial\n for i in range(len(self.relations) - 1):\n next_node = fsm.get_next_state()\n if i in self.inputs:\n fsm.add_transition(current,\n next_node,\n self.relations[i][0] + \"_IN\" +\n self.relations[i][1] * \"m\")\n else:\n fsm.add_transition(current,\n next_node,\n self.relations[i][0] + \"_OUT\" +\n self.relations[i][1] * \"m\")\n current = next_node\n if i in self.outputs:\n fsm.add_final(next_node)\n last = len(self.relations) - 1\n # Normally this is stupid...\n if last in self.inputs:\n fsm.add_transition(current,\n final,\n self.relations[last][0] + \"_IN\" +\n self.relations[last][1] * \"m\")\n else:\n fsm.add_transition(current,\n final,\n self.relations[last][0] + \"_OUT\" +\n self.relations[last][1] * \"m\")\n return fsm", "def _update_transition(self):\n try:\n transition_text = next(self._current_transition)\n self._update_display(SegmentDisplayState(transition_text, self._current_state.flashing,\n self._current_state.flash_mask))\n\n except StopIteration:\n self._stop_transition()", "def addTransition( self, wf, trans_id, REQUEST=None ):\n workflow = self[wf]\n workflow.transitions.addTransition(trans_id)\n if REQUEST is not None:\n REQUEST['RESPONSE'].redirect( self.portal_url() + '/transitions?wf=' + wf + \\\n '&portal_status_message=Transition+added' )", "def addTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLParallelElt_addTransition(self, transition)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a transition by `name`.
def remove_transition(self, name): try: del self.transitions[name] self.transition_order.remove(name) except: raise UnknownTransitionError(name)
[ "def remove(self, name):\n if self.states[name]:\n del self.states[name]", "def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLStateElt_removeTransition(self, transition)", "def removeScene(self, name: str) -> None:\r\n\r\n for scene in self.scenes:\r\n if scene.name == name:\r\n self.scenes.remove(scene)\r\n return", "def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLParallelElt_removeTransition(self, transition)", "def remove_layer(self, name: str):\n for i, layer in enumerate(self.layers):\n if layer.name == name:\n del self.layers[i]", "def remove(self, name):\n try:\n self.schedule_lock.acquire()\n the_task = None\n for task in self.schedule:\n if task.name == name:\n the_task = task\n if the_task is not None:\n self.schedule.remove(the_task)\n except:\n raise\n finally:\n self.schedule_lock.release()", "def delete(self, name):\n if name in self.steps:\n self.steps.pop(name)\n else:\n self.log('{} not in steps dict'.format(name), level='warn')\n if name in self.order:\n ind = self.order.index(name)\n self.order = self.order[:ind] + self.order[ind + 1:]\n else:\n self.log('{} not in order tuple'.format(name), level='warn')\n self.save()", "def remove(self, name):\n if not isinstance(name, string_types):\n raise TypeError('Tensor name must be a string.')\n if not self.has(name):\n raise RuntimeError(\"Can't find tensor: {}\".format(name))\n\n tensor = self._data[name]\n del self._data[name]\n\n return tensor", "def remove(self, name):\n # remove and return the sentence sequence\n return self.sentences.pop(name, None)", "def remove(self, name):\r\n if name not in self._task_map: return True\r\n\r\n id = [x.name() for x in self._task_list].index(name)\r\n self._task_list[id].stop(self)\r\n self.log.msg('Removed task`' + name +'`')\r\n del self._task_list[id]\r\n del self._task_map[name]\r\n return True", "def removeCalendarWithName(name): # @NoSelf", "def remove_input(self, name):\n self._input.remove(name)", "def remove_transition(self, component):\n # check if component is valid\n if component != None:\n # check object type\n if type(component) == transition.Transition:\n # remove transition\n del self._transitions[component.key]\n return True\n return False", "def remove_curve(self, name):\n logger.debug(\"Removing %s from TyphonTimePlot ...\", name)\n self.timechart.remove_curve(name)", "def delete(self, name):\n LOG.info(\"Delete workflow [name=%s]\" % name)\n\n db_api.delete_workflow_definition(name)", "def removeReference(self, name: 'SbName') -> \"void\":\n return _coin.SoInput_removeReference(self, name)", "def removeEntityName(name):\n ierr = c_int()\n lib.gmshModelRemoveEntityName(\n c_char_p(name.encode()),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelRemoveEntityName returned non-zero error code: \",\n ierr.value)", "def remove_hero(self, name):\n if self.name in self.heroes:\n z = index(self.name)\n self.heroes.pop(z)\n else:\n return 0", "def remove_transition_key(self, key):\n # check if key is valid\n if key != \"\" and self._transitions.has_key(key):\n # remove transition\n del self._transitions[key]\n return True\n return False", "def Remove(self, name):\n new_waypoints = []\n removed_waypoint = None\n with self._lock:\n for waypoint in self._waypoints:\n if waypoint.name != name:\n new_waypoints.append(waypoint)\n else:\n removed_waypoint = waypoint\n self._waypoints = new_waypoints\n if self._waypoints:\n self._waypoints[0].active = True\n return removed_waypoint" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of transition names and a transition mapping.
def make_transitions(self, name_list): stringtype = type('') names = [] transitions = {} for namestate in name_list: if type(namestate) is stringtype: transitions[namestate] = self.make_transition(namestate) names.append(namestate) else: transitions[namestate[0]] = self.make_transition(*namestate) names.append(namestate[0]) return names, transitions
[ "def create_transition_dict(self):\n out = {}\n for state in self.states:\n to_states, probas = self.transition_from(state)\n out[state] = {s: p for s, p in zip(to_states, probas)}\n return out", "def getAutomaticTransitions():", "def transitions(self) -> list:\n transitions = [\n key\n for key in self.trans_exec\n if self.idf.as_version >= key > self.idf.file_version\n ]\n transitions.sort()\n return transitions", "def transitions(self):\n transitions = [\n key\n for key in self.trans_exec\n if self.idf.as_version >= key > self.idf.file_version\n ]\n transitions.sort()\n return transitions", "def rename(self):\n\n ids = {s: str(i) for (i, s) in enumerate(sorted(list(self.Q())))}\n\n self.transitions = [(ids[t[0]], t[1], ids[t[2]]) for t in self.transitions]\n self.F = [ids[f] for f in self.F]\n self.q0 = ids[self.q0]", "def getTransitions(source, type):", "def state_names(model):\n return tuple(n for n, v in model[\"state\"])", "def transitions(jira, args):\n print(\"Available JIRA transitions:\")\n pprint.pprint(jira.transitions(args.issue))", "def get_state_names() -> List[str]:\n names = []\n names += get_state_names_1qubit()\n names += get_state_names_2qubit()\n names += get_state_names_3qubit()\n names += get_state_names_1qutrit()\n names += get_state_names_2qutrit()\n return names", "def __transitions(self):\n # Initialize the transition probailities tensor (S,S,A)\n dimensions = (self.n_states,self.n_states,self.n_actions)\n transition_probabilities = np.zeros(dimensions)\n\n # Compute the transition probabilities. Note that the transitions\n # are deterministic.\n for s in range(self.n_states):\n for a in range(self.n_actions):\n n = 0\n next_s_vec = list()\n for a_batman in self.actions_batman:\n next_s, caught = self.__move(s, a, a_batman)\n \n if caught:\n n = 1\n next_s_vec = [next_s]\n break\n\n elif next_s != None:\n n += 1\n next_s_vec.append(next_s)\n \n for next_s in next_s_vec:\n transition_probabilities[next_s, s, a] = 1/n\n return transition_probabilities", "def edit_names(a):\n\n dictionary={}\n i=0\n for state in a.states:\n dictionary[str(i)]=state\n i+=1\n\n # rename states\n a.states=list(a.states)\n for i in range(len(a.states)):\n a.states[i]=list(dictionary.keys())[list(dictionary.values()).index(a.states[i])]\n a.states=set(a.states)\n\n # rename start states\n a.start=list(a.start)\n for i in range(len(a.start)):\n a.start[i]=list(dictionary.keys())[list(dictionary.values()).index(a.start[i])]\n a.start=set(a.start)\n\n # rename accept states\n a.accept=list(a.accept)\n for i in range(len(a.accept)):\n a.accept[i]=list(dictionary.keys())[list(dictionary.values()).index(a.accept[i])]\n a.accept=set(a.accept)\n\n # rename transitions\n for i in range(len(a.transitions)):\n a.transitions[i][0]=list(dictionary.keys())[list(dictionary.values()).index(a.transitions[i][0])]\n a.transitions[i][2]=list(dictionary.keys())[list(dictionary.values()).index(a.transitions[i][2])]", "def getTransition(self) -> \"ScXMLTransitionElt *\":\n return _coin.ScXMLHistoryElt_getTransition(self)", "def parse(self, transitions):\n #print \"\\n\\n\\n\\n%s\\n\\n\\n\\n\\n\\n\"%self.sentence\n # print transitions\n for transition in transitions:\n self.parse_step(transition)\n #print \"#######################\\n%s##############\\n\"%self.sentence\n return self.dependencies", "def get_transition(counts, label):\r\n counts.inc_no_visible()\r\n return petri.petrinet.PetriNet.Transition(label, label)", "def get_transitions(self, cell_transition, orientation):\n raise NotImplementedError()", "def get_transitions(self, player):\n\n return self.transitions.get(player, [])", "def get_transitions(content, request, from_state=None):", "def parse_initial_state_transitions(lines: List[Line]) -> Tuple[Dict[str, Line], List[Line]]:\n remaining_lines = []\n initial_state_names = {}\n\n for line in lines:\n m = re.fullmatch(r'^\\[\\*\\]\\s+-{1,2}>\\s+(\\w+)\\s*(.*)', line.text)\n if not m:\n remaining_lines.append(line)\n continue\n\n name, trailing_text = m.groups()\n assert name not in initial_state_names, f'Duplicate initial transition for state {name} in {line}'\n assert not trailing_text, f'Additional text after initial transition in {line}: {line.orig_text}'\n initial_state_names[name] = line\n\n return initial_state_names, remaining_lines", "def _MapToMathsNames(startingPoint, GoalFunction, Inequalities, Equalities):\n x = startingPoint\n F = GoalFunction\n gs = Inequalities\n hs = Equalities\n return x, F, gs, hs", "def get_transition(self,route_id,stop_id):\n if stop_id in self.routes[route_id].trans:\n return self.routes[route_id].trans[stop_id]\n else:\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A "do nothing" transition method. Return unchanged `context` & `next_state`, empty result. Useful for simple state changes (actionless transitions).
def nop(self, match, context, next_state): return context, next_state, []
[ "def noop_context():\n yield", "def noop(value, state = None):\n return value, None", "def strip_state(e: Expression) -> None:\n if hasattr(e, \"state\"):\n e.state = None\n for c in e.children():\n strip_state(c)", "def reset(self):\n self.env.reset()\n\n repeat_noop_times = self.unwrapped.np_random.randint(1, self.max_noop_times + 1)\n for _ in range(repeat_noop_times):\n state, _, done, _ = self.env.step(self.noop_action)\n if done:\n state = self.env.reset()\n\n return state", "def erase_state(self, *args):\n return _wali.WFA_erase_state(self, *args)", "def SoAction_nullAction(action: 'SoAction', node: 'SoNode') -> \"void\":\n return _coin.SoAction_nullAction(action, node)", "def nullAction(action: 'SoAction', node: 'SoNode') -> \"void\":\n return _coin.SoAction_nullAction(action, node)", "def removeStateChangeCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_removeStateChangeCallback(self, *args)", "def patch_opp_state(opp):\n opp.state = core.CoreState.not_running", "def _reset_state_wrapper(self):\n self._reset_state_impl()\n self._is_adapted = False", "def next_state(self, state: State, jointaction: JointAction) -> State:\n pass", "def go_to_state(self, next_state):\n for t in self.transitions:\n if t.next_state == None:\n t.next_state = next_state\n return self.root", "def reset_context(self):\n self.current.clear()", "def test_disable_running_transition():\n\n def assert_new(instance):\n \"\"\"\n ensure the state is still the original state\n \"\"\"\n assert instance.state == \"new\"\n\n x = get_thing()\n x.disable_running_state(assert_new)", "def remove_state(self) -> None:\n self._state = {}", "def reset(self):\n self.mutate(WorldState())", "def transition(self):\n next_state = self.current_state.transition()\n # self.printStateChange(self.current_state, next_state)\n self.current_state = next_state", "def restore_state(self):\n if self:\n self.pop()\n else:\n log.warning(\"Can't reset empty state\")", "def prepare_forward(self, seeding_pos):\n self.hidden_recurrent_states = None\n\n return super().prepare_forward(seeding_pos)", "def reset(self, s = None, as_tuple = False):\n if (s is None):\n self.current_state = self.legal_states[np.random.choice(self.states())]\n else:\n s = s if isinstance(s, tuple) else self.legal_states[s]\n self.current_state = s\n return self.state(as_tuple = as_tuple), self.rewards[self.state(as_tuple=True)], self.terminals[self.state(as_tuple=True)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an indented block and info. Extract an indented block where the indent is known for all lines. Starting with the current line, extract the entire text block with at least `indent` indentation (which must be whitespace, except for the first line).
def get_known_indented(self, indent, until_blank=False, strip_indent=True): offset = self.abs_line_offset() indented, indent, blank_finish = self.input_lines.get_indented( self.line_offset, until_blank, strip_indent, block_indent=indent) self.next_line(len(indented) - 1) # advance to last indented line while indented and not indented[0].strip(): indented.trim_start() offset += 1 return indented, offset, blank_finish
[ "def get_block_indent(text):\n lines = text.split('\\n')\n cnt = []\n for i in lines:\n if i != '' and not i.isspace():\n cnt.append(get_indent(i))\n return min(cnt)", "def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )", "def indenter(text_to_indent):\n temp = \"\"\n for line in json.dumps(text_to_indent, indent=2).split('\\n'):\n temp = temp + \"# %s\\n\" % line\n return temp.strip()", "def _indents(self, line) -> Tuple[int, int]:\n import re\n\n indent = len(re.match(r'( *)', line).group(1))\n list_match = re.match(r'( *)(([*\\-+>]+|\\w+\\)|\\w+\\.) +)', line)\n if list_match:\n sub_indent = indent + len(list_match.group(2))\n else:\n sub_indent = indent\n\n return indent, sub_indent", "def _indent(text):\r\n indented, lines = '', text.splitlines(True)\r\n for line in lines:\r\n indented += '..' + line\r\n return indented", "def indentation():\n try:\n indent()\n yield\n finally:\n unindent()", "def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))", "def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish", "def _deduce_line_current_indent(\n elements: ReflowSequenceType, last_line_break_idx: Optional[int] = None\n) -> str:\n indent_seg = None\n if last_line_break_idx:\n indent_seg = cast(\n ReflowPoint, elements[last_line_break_idx]\n )._get_indent_segment()\n elif isinstance(elements[0], ReflowPoint) and elements[0].segments[\n 0\n ].pos_marker.working_loc == (1, 1):\n # No last_line_break_idx, but this is a point. It's the first line.\n # Get the last whitespace element.\n # TODO: We don't currently handle the leading swallowed whitespace case.\n # That could be added here, but it's an edge case which can be done\n # at a later date easily. For now it won't get picked up.\n for indent_seg in elements[0].segments[::-1]:\n if indent_seg.is_type(\"whitespace\"):\n break\n # Handle edge case of no whitespace, but with newline.\n if not indent_seg.is_type(\"whitespace\"):\n indent_seg = None\n\n if not indent_seg:\n return \"\"\n\n # We have to check pos marker before checking is templated.\n # Insertions don't have pos_markers - so aren't templated,\n # but also don't support calling is_templated.\n if indent_seg.is_type(\"placeholder\"):\n # It's a consumed indent.\n return cast(TemplateSegment, indent_seg).source_str.split(\"\\n\")[-1] or \"\"\n elif not indent_seg.pos_marker or not indent_seg.is_templated:\n assert \"\\n\" not in indent_seg.raw, f\"Found newline in indent: {indent_seg}\"\n return indent_seg.raw\n else: # pragma: no cover\n # It's templated. This shouldn't happen. Segments returned by\n # _get_indent_segment, should be valid indents (i.e. whitespace\n # or placeholders for consumed whitespace). This is a bug.\n if indent_seg.pos_marker:\n reflow_logger.warning(\n \"Segment position marker: %s: [SRC: %s, TMP:%s]\",\n indent_seg.pos_marker,\n indent_seg.pos_marker.source_slice,\n indent_seg.pos_marker.templated_slice,\n )\n raise NotImplementedError(\n \"Unexpected templated indent. Report this as a bug on \"\n f\"GitHub. Segment: {indent_seg}\\n\"\n \"https://github.com/sqlfluff/sqlfluff/issues/new/choose\"\n )", "def _indent_description(indent: str):\n if indent == \"\":\n return \"no indent\"\n elif \" \" in indent and \"\\t\" in indent:\n return \"mixed indent\"\n elif indent[0] == \" \":\n assert all(c == \" \" for c in indent)\n return f\"indent of {len(indent)} spaces\"\n elif indent[0] == \"\\t\": # pragma: no cover\n assert all(c == \"\\t\" for c in indent)\n return f\"indent of {len(indent)} tabs\"\n else: # pragma: no cover\n raise NotImplementedError(f\"Invalid indent construction: {indent!r}\")", "def find_indentation(node):\r\n while node is not None:\r\n if node.type == syms.suite and len(node.children) > 2:\r\n indent = node.children[1]\r\n if indent.type == token.INDENT:\r\n return indent.value\r\n node = node.parent\r\n return \"\"", "def test_text_indent():\r\n for indent in ['12px', '6%']: # 6% of 200px is 12px\r\n page, = parse('''\r\n <style>\r\n @page { size: 220px }\r\n body { margin: 10px; text-indent: %(indent)s }\r\n </style>\r\n <p>Some text that is long enough that it take at least three line,\r\n but maybe more.\r\n ''' % {'indent': indent})\r\n html, = page.children\r\n body, = html.children\r\n paragraph, = body.children\r\n lines = paragraph.children\r\n text_1, = lines[0].children\r\n text_2, = lines[1].children\r\n text_3, = lines[2].children\r\n assert text_1.position_x == 22 # 10px margin-left + 12px indent\r\n assert text_2.position_x == 10 # No indent\r\n assert text_3.position_x == 10 # No indent\r", "def _get_indent_segment(self) -> Optional[RawSegment]:\n indent = None\n for seg in reversed(self.segments):\n if seg.pos_marker and not seg.pos_marker.is_literal():\n # Skip any templated elements.\n # NOTE: It must _have_ a position marker at this\n # point however to take this route. A segment\n # without a position marker at all, is an edit\n # or insertion, and so should still be considered.\n continue\n elif seg.is_type(\"newline\"):\n return indent\n elif seg.is_type(\"whitespace\"):\n indent = seg\n elif \"\\n\" in (get_consumed_whitespace(seg) or \"\"):\n # Consumed whitespace case.\n # NOTE: In this situation, we're not looking for\n # separate newline and indent segments, we're\n # making the assumption that they'll be together\n # which I think is a safe one for now.\n return seg\n # i.e. if we never find a newline, it's not an indent.\n return None", "def _indent(self, level: int) -> Text:\n\n return self.indent * level", "def get_indent(node, level=0):\n if node.parent:\n level += 1\n return get_indent(node.parent, level)\n return level", "def prologue(_indent):\n return \"\"", "def get_indentation(func):\n src_lines = getsourcelines(func)[0]\n for line in src_lines:\n if not (line.startswith('@') or line.startswith('def') or line.lstrip().startswith('#')):\n return line[:len(line) - len(line.lstrip())]\n return pytypes.default_indent", "def _indent(self, dedent=True):\n num_newlines = self._get_cursor().selectedText().count(u\"\\u2029\")\n save_cur = self._get_cursor()\n cur = self._get_cursor()\n\n # move to first line of selection, if present\n cur.setPosition(cur.selectionStart())\n self._control.setTextCursor(cur)\n spaces = self._get_leading_spaces()\n # calculate number of spaces neded to align/indent to 4-space multiple\n step = self._tab_width - (spaces % self._tab_width)\n\n # insertText shouldn't replace if selection is active\n cur.clearSelection()\n\n # indent all lines in selection (ir just current) by `step`\n for _ in range(num_newlines+1):\n # update underlying cursor for _get_line_start_pos\n self._control.setTextCursor(cur)\n # move to first non-ws char on line\n cur.setPosition(self._get_line_start_pos())\n if dedent:\n spaces = min(step, self._get_leading_spaces())\n safe_step = spaces % self._tab_width\n cur.movePosition(QtGui.QTextCursor.Right,\n QtGui.QTextCursor.KeepAnchor,\n min(spaces, safe_step if safe_step != 0\n else self._tab_width))\n cur.removeSelectedText()\n else:\n cur.insertText(' '*step)\n cur.movePosition(QtGui.QTextCursor.Down)\n\n # restore cursor\n self._control.setTextCursor(save_cur)", "def dumped (text, level, indent=2):\n return indented (\"{\\n%s\\n}\" % indented (text, level+1, indent) or \"None\", level, indent) + \"\\n\"", "def getIndentation(self, line):\n\t\n\t\tnonSpace = re.search('\\S', line)\n\t\n\t\tif nonSpace is None:\n\t\t\treturn 0\n\t\t\t\n\t\telse:\n\t\t\tif re.match('^\\t*\\S', line):\n\t\t\t\treturn nonSpace.start()\n\t\t\t\t\n\t\t\telif re.match('^\\ *\\S', line):\n\t\t\t\treturn nonSpace.start() / 4" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a `StateSM` object; extends `State.__init__()`. Check for indent state machine attributes, set defaults if not set.
def __init__(self, state_machine, debug=False): State.__init__(self, state_machine, debug) if self.indent_sm is None: self.indent_sm = self.nested_sm if self.indent_sm_kwargs is None: self.indent_sm_kwargs = self.nested_sm_kwargs if self.known_indent_sm is None: self.known_indent_sm = self.indent_sm if self.known_indent_sm_kwargs is None: self.known_indent_sm_kwargs = self.indent_sm_kwargs
[ "def __init__(self, token, state, extra):\n self.state = state\n self.token = token\n self.extra = extra\n pass", "def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state", "def __init__(self):\n self.action_space = [(0, 0)] + list(permutations([i for i in range(m)], 2))\n self.action_space = [list(i) for i in self.action_space]\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def __init__(self, state=False):\r\n \r\n # If state is a string, convert it to all caps (for case insensitivity)\r\n # and convert it into bool\r\n # (True if it is \"ON\", False if it is \"OFF\")\r\n if(isinstance(state, str)):\r\n state = state.upper()\r\n if (state == \"ON\"):\r\n state = True\r\n elif (state == \"OFF\"):\r\n state = False\r\n else:\r\n raise InvalidStateException\r\n \r\n # Set the state of the switch\r\n self._on = state\r\n \r\n return", "def __init__(self):\n self.action_space = tuple([(pick_up,drop) for pick_up in (1,2,3,4,5) for drop in (1,2,3,4,5) if pick_up!=drop])\n self.state_space = [(loc, time, day) for loc in np.arange(1,m+1) for time in range(t) for day in range(d)]\n self.state_init = random.choice(self.state_space)\n self.state_input = (np.arange(1,m+1) , np.arange(0,t) , np.arange(0,d))\n # Start the first round\n self.reset()", "def __init__(self):\n this = _coin.new_ScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoScXMLStateMachine()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, agent_id=None, agent_positions=None,\n food_positions=None, fragile_agents=None, wall_positions=None,\n legal_actions=None, reward=None, executed_action=None,\n test_mode=None, realPosition=None):\n super(StateMessage, self).__init__(msg_type=STATE_MSG)\n\n self.agent_id = agent_id\n self.agent_positions = agent_positions\n self.food_positions = food_positions\n self.fragile_agents = fragile_agents\n self.wall_positions = wall_positions\n self.legal_actions = legal_actions\n self.reward = reward\n self.executed_action = executed_action\n self.test_mode = test_mode\n self.realPosition = realPosition", "def __init__(self):\n # Create a initialized state map where all tiles are assumed unknown\n self._state = [TileState.Unknown] * StateMap.TILE_NUMBER\n self._state.append(False) # isClaim bit\n self._state.append(False) # Claim action bit", "def _sgf_init_gamestate(sgf_root):\n\tprops = sgf_root.properties\n\ts_size = props.get('SZ', ['19'])[0]\n\ts_player = props.get('PL', ['B'])[0]\n\t# init board with specified size\n\tgs = go.GameState(int(s_size))\n\t# handle 'add black' property\n\tif 'AB' in props:\n\t\tfor stone in props['AB']:\n\t\t\tgs.do_move(_parse_sgf_move(stone), go.BLACK)\n\t# handle 'add white' property\n\tif 'AW' in props:\n\t\tfor stone in props['AW']:\n\t\t\tgs.do_move(_parse_sgf_move(stone), go.WHITE)\n\t# setup done; set player according to 'PL' property\n\tgs.current_player = go.BLACK if s_player == 'B' else go.WHITE\n\treturn gs", "def __initialise_states(self):\n\n # Start not dead and not powered up\n self.powered_up = False\n self.dead = False", "def initial_state(self):\n return GeneralGameState(self)", "def setup_class(cls):\n cls.state = State()\n cls.state.name = \"Oregon\"", "def __init__(self, *, indent: int = 0, indent_step: int = 4):\n self._indent = indent\n self._code = []\n self.INDENT_STEP = indent_step", "def init_level(self, level='lowest'):\n # TODO: init encoders, handle different size aligners\n if len(self.aligners) > 1:\n if level == 'lowest':\n state_dict = self.aligners[1].state_dict()\n self.aligners[0].load_state_dict(state_dict)\n elif level == 'highest':\n state_dict = self.aligners[-2].state_dict()\n self.aligners[-1].load_state_dict(state_dict)\n return self", "def make_state():\r\n return State(name=\"\", cntBins=0, cntSimulation=0\r\n , intLane=0, valBucket = [], cntBucket = []\r\n , pathDirectionList = []\r\n , pathScoreList = []\r\n ) #returns a State objec\r", "def __init__(self, states, params, rxnstring=None):\n\n self.states = states # State management\n self.params = params # Parameter management\n self.reactants = []\n self.products = []\n self.activators = []\n self.inhibitors = []\n self.mark = '--'\n\n if rxnstring: self.read_rxn_str(rxnstring)", "def setStateMachine(self, sm: 'ScXMLStateMachine') -> \"void\":\n return _coin.ScXMLEvaluator_setStateMachine(self, sm)", "def setStateMachine(self, sm: 'ScXMLStateMachine') -> \"void\":\n return _coin.ScXMLCoinEvaluator_setStateMachine(self, sm)", "def make_sm(self):\n return smach.StateMachine(outcomes=['succeeded','aborted','preempted'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle an indented text block. Extend or override in subclasses. Recursively run the registered state machine for indented blocks (`self.indent_sm`).
def indent(self, match, context, next_state): indented, indent, line_offset, blank_finish = \ self.state_machine.get_indented() sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results
[ "def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )", "def update_indent(self) -> None:\n self.indent = self.base_indent * self.level\n self.newline_indent = \"\\n\" + self.indent", "def _indent(self, dedent=True):\n num_newlines = self._get_cursor().selectedText().count(u\"\\u2029\")\n save_cur = self._get_cursor()\n cur = self._get_cursor()\n\n # move to first line of selection, if present\n cur.setPosition(cur.selectionStart())\n self._control.setTextCursor(cur)\n spaces = self._get_leading_spaces()\n # calculate number of spaces neded to align/indent to 4-space multiple\n step = self._tab_width - (spaces % self._tab_width)\n\n # insertText shouldn't replace if selection is active\n cur.clearSelection()\n\n # indent all lines in selection (ir just current) by `step`\n for _ in range(num_newlines+1):\n # update underlying cursor for _get_line_start_pos\n self._control.setTextCursor(cur)\n # move to first non-ws char on line\n cur.setPosition(self._get_line_start_pos())\n if dedent:\n spaces = min(step, self._get_leading_spaces())\n safe_step = spaces % self._tab_width\n cur.movePosition(QtGui.QTextCursor.Right,\n QtGui.QTextCursor.KeepAnchor,\n min(spaces, safe_step if safe_step != 0\n else self._tab_width))\n cur.removeSelectedText()\n else:\n cur.insertText(' '*step)\n cur.movePosition(QtGui.QTextCursor.Down)\n\n # restore cursor\n self._control.setTextCursor(save_cur)", "def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)", "def _indent(self, level: int) -> Text:\n\n return self.indent * level", "def indentation():\n try:\n indent()\n yield\n finally:\n unindent()", "def indent(self, lvl=1):\n self.current_level += lvl\n assert self.current_level >= 0, \"Level of indentation cannot become negative\"\"\"", "def add_text(self, text):\n if text.startswith(nl):\n text = text[1:]\n\n cls = ''\n prefix = ''\n is_code = False\n is_output = False\n interp_line = False\n after_blank = False # state 'after blank line'\n blank = False\n bullets = 0\n code_indent = 0\n output_indent = 0\n\n for line in text.split(nl):\n sline = line.strip()\n if sline.startswith('#'):\n continue\n\n # handle <ul> <li> ...\n if sline == '*':\n bullets = 1\n elif bullets == 1 and sline.startswith('*'):\n bullets = 2\n elif bullets == 2 and not sline.startswith('*'):\n bullets = 0\n self.commands.append( dict(cmd=\"text\", arg=\"</ul>\", indent=indent, cls=cls, prefix=prefix) )\n\n line = line.rstrip()\n blank = bool(not line)\n indent = len(line) - len(line.lstrip()) + 1\n\n if interp_typecmd and line.strip().startswith(\">>>\"):\n self.commands.append(dict(cmd=\"type\", arg=None))\n cls = \"code\"\n prefix = escape(\">>>\") + nbsp\n is_code = True\n interp_line = True\n # interp.prompt, space, 1 level of block indent\n code_indent = indent + 3+1\n output_indent = code_indent - 4\n\n # blank line; next line at code indent: still code; ELSE reset code\n # non-blank line; next line at code indent - 4: output\n\n # shorter indent than code should be means end of code block; ignore blank lines\n if not interp_line and indent < code_indent and not blank:\n is_code = False; cls = ''\n\n if not interp_line and after_blank and indent != code_indent and not blank:\n is_code = False; cls = ''\n\n if indent==output_indent and not interp_line:\n is_output = True; cls = \"output\"\n\n if is_output and indent < output_indent:\n is_output = False; cls = ''\n\n # ugly hack: force bigger indent on lines of code except for interp lines\n if is_code and not interp_line:\n indent += 4\n\n line = line.lstrip(\"> \")\n arg = escape(line)\n arg = arg.replace(space, nbsp).replace(\"--\", \"&mdash;\")\n if is_code or is_output:\n for name, fn, tag in images:\n arg = arg.replace(name+\"png\", fn)\n arg = arg.replace(fn, tag)\n\n if bullets == 1:\n self.commands.append( dict(cmd=\"text\", arg=\"<ul>\", indent=indent, cls=cls, prefix=prefix) )\n elif bullets == 2:\n arg = \"<li>%s</li>\" % arg.lstrip('*')\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n else:\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n\n prefix = ''\n interp_line = False\n after_blank = bool(not line.strip())", "def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r", "def indent(self, indent: int):\n self._column_offset += indent\n self._current_text.set_x_offset(self._column_offset)", "def _indent(text):\r\n indented, lines = '', text.splitlines(True)\r\n for line in lines:\r\n indented += '..' + line\r\n return indented", "def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))", "def increaseIndentation():\n\tglobal indentLength\n\tindentLength = indentLength + 3", "def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"", "def setIndentStep(self, *args):\r\n return _osgDB.Output_setIndentStep(self, *args)", "def indented(self, message: str, added_depth: int = 0) -> str:\n depth = self.depth + added_depth\n return depth * 4 * ' ' + message", "def render_text(self, indent: str = \" \") -> str:\n self.preprocess()\n return f\"{self._start()}{self._mid(indent)}{self._end()}\"", "def _indent(self):\n if self._debug:\n self._debug += 1", "def indenter(text_to_indent):\n temp = \"\"\n for line in json.dumps(text_to_indent, indent=2).split('\\n'):\n temp = temp + \"# %s\\n\" % line\n return temp.strip()", "def dumped (text, level, indent=2):\n return indented (\"{\\n%s\\n}\" % indented (text, level+1, indent) or \"None\", level, indent) + \"\\n\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle a knownindent text block. Extend or override in subclasses. Recursively run the registered state machine for knownindent indented blocks (`self.known_indent_sm`). The indent is the length of the match, ``match.end()``.
def known_indent(self, match, context, next_state): indented, line_offset, blank_finish = \ self.state_machine.get_known_indented(match.end()) sm = self.known_indent_sm(debug=self.debug, **self.known_indent_sm_kwargs) results = sm.run(indented, input_offset=line_offset) return context, next_state, results
[ "def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish", "def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r", "def testCurrentIndent(self):\n\n self.controller.tabUsesSpaces = True\n self.assert_(self.controller._indent_for_block(\"\"\"a=3\"\"\") == None)\n self.assert_(self.controller._indent_for_block(\"\") == None)\n block = \"\"\"def test():\\n a=3\"\"\"\n self.assert_(self.controller._indent_for_block(block) == \\\n ' ' * self.controller.tabSpaces)\n\n block = \"\"\"if(True):\\n%sif(False):\\n%spass\"\"\" % \\\n (' '*self.controller.tabSpaces,\n 2*' '*self.controller.tabSpaces)\n self.assert_(self.controller._indent_for_block(block) == \\\n 2*(' '*self.controller.tabSpaces))", "def _get_indent_segment(self) -> Optional[RawSegment]:\n indent = None\n for seg in reversed(self.segments):\n if seg.pos_marker and not seg.pos_marker.is_literal():\n # Skip any templated elements.\n # NOTE: It must _have_ a position marker at this\n # point however to take this route. A segment\n # without a position marker at all, is an edit\n # or insertion, and so should still be considered.\n continue\n elif seg.is_type(\"newline\"):\n return indent\n elif seg.is_type(\"whitespace\"):\n indent = seg\n elif \"\\n\" in (get_consumed_whitespace(seg) or \"\"):\n # Consumed whitespace case.\n # NOTE: In this situation, we're not looking for\n # separate newline and indent segments, we're\n # making the assumption that they'll be together\n # which I think is a safe one for now.\n return seg\n # i.e. if we never find a newline, it's not an indent.\n return None", "def update_indent(self) -> None:\n self.indent = self.base_indent * self.level\n self.newline_indent = \"\\n\" + self.indent", "def match_multiline(self, text, delimiter, in_state, style):\n\t\t# If inside triple-single quotes, start at 0\n\t\tif self.previousBlockState() == in_state:\n\t\t\tstart = 0\n\t\t\tadd = 0\n\t\t# Otherwise, look for the delimiter on this line\n\t\telse:\n\t\t\tstart = delimiter.indexIn(text)\n\t\t\t# Move past this match\n\t\t\tadd = delimiter.matchedLength()\n\n\t\t# As long as there's a delimiter match on this line...\n\t\twhile start >= 0:\n\t\t\t# Look for the ending delimiter\n\t\t\tend = delimiter.indexIn(text, start + add)\n\t\t\t# Ending delimiter on this line?\n\t\t\tif end >= add:\n\t\t\t\tlength = end - start + add + delimiter.matchedLength()\n\t\t\t\tself.setCurrentBlockState(0)\n\t\t\t# No; multi-line string\n\t\t\telse:\n\t\t\t\tself.setCurrentBlockState(in_state)\n\t\t\t\tlength = len(text) - start + add\n\t\t\t# Apply formatting\n\t\t\tself.setFormat(start, length, style)\n\t\t\t# Look for the next match\n\t\t\tstart = delimiter.indexIn(text, start + length)\n\n\t\t# Return True if still inside a multi-line string, False otherwise\n\t\tif self.currentBlockState() == in_state:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def _indent(self, dedent=True):\n num_newlines = self._get_cursor().selectedText().count(u\"\\u2029\")\n save_cur = self._get_cursor()\n cur = self._get_cursor()\n\n # move to first line of selection, if present\n cur.setPosition(cur.selectionStart())\n self._control.setTextCursor(cur)\n spaces = self._get_leading_spaces()\n # calculate number of spaces neded to align/indent to 4-space multiple\n step = self._tab_width - (spaces % self._tab_width)\n\n # insertText shouldn't replace if selection is active\n cur.clearSelection()\n\n # indent all lines in selection (ir just current) by `step`\n for _ in range(num_newlines+1):\n # update underlying cursor for _get_line_start_pos\n self._control.setTextCursor(cur)\n # move to first non-ws char on line\n cur.setPosition(self._get_line_start_pos())\n if dedent:\n spaces = min(step, self._get_leading_spaces())\n safe_step = spaces % self._tab_width\n cur.movePosition(QtGui.QTextCursor.Right,\n QtGui.QTextCursor.KeepAnchor,\n min(spaces, safe_step if safe_step != 0\n else self._tab_width))\n cur.removeSelectedText()\n else:\n cur.insertText(' '*step)\n cur.movePosition(QtGui.QTextCursor.Down)\n\n # restore cursor\n self._control.setTextCursor(save_cur)", "def step(self):\n # Try matching (if we haven't already)\n if self.untried_match():\n token = self.match()\n if token is not None:\n return token\n\n # Try expanding.\n production = self.expand()\n if production is not None:\n return production\n\n # Try backtracking\n if self.backtrack():\n self._trace_backtrack(self._tree, self._frontier)\n return True\n\n # Nothing left to do.\n return None", "def match_multiline(self, text, delimiter, in_state, style,\r\n hls=[], highlight_errors=lambda x: x, user_data=None):\r\n # If inside triple-single quotes, start at 0\r\n if self.previousBlockState() == in_state:\r\n start = 0\r\n add = 0\r\n # Otherwise, look for the delimiter on this line\r\n else:\r\n start = delimiter.indexIn(text)\r\n # Move past this match\r\n add = delimiter.matchedLength()\r\n\r\n # As long as there's a delimiter match on this line...\r\n while start >= 0:\r\n # Look for the ending delimiter\r\n end = delimiter.indexIn(text, start + add)\r\n # Ending delimiter on this line?\r\n if end >= add:\r\n length = end - start + add + delimiter.matchedLength()\r\n self.setCurrentBlockState(0)\r\n # No; multi-line string\r\n else:\r\n self.setCurrentBlockState(in_state)\r\n length = len(text) - start + add\r\n\r\n st_fmt = self.format(start)\r\n start_collides = [pos for pos in hls if pos[0] < start < pos[1]]\r\n\r\n # Apply formatting\r\n if ((st_fmt != STYLES['comment']) or\r\n ((st_fmt == STYLES['comment']) and\r\n (self.previousBlockState() != 0))) and \\\r\n (len(start_collides) == 0):\r\n style = highlight_errors(style, user_data)\r\n self.setFormat(start, length, style)\r\n else:\r\n self.setCurrentBlockState(0)\r\n # Look for the next match\r\n start = delimiter.indexIn(text, start + length)\r\n\r\n # Return True if still inside a multi-line string, False otherwise\r\n if self.currentBlockState() == in_state:\r\n return True\r\n else:\r\n return False", "def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )", "def read_multiline(self, line, f, indent):\n log.debug('Beginning multiline search at position %d in %s', f.tell(), self.fname)\n result = ''\n\n n = line.find(self.ms)\n if n >= 0:\n line = line[n + len(self.ms):]\n\n while line:\n if line[:indent].isspace() and len(line) > indent:\n line = line[indent:]\n\n if self.me in self.multi_re.sub('', line):\n result += ''.join(line.rsplit(self.me, 1))\n break\n\n result += line\n line = f.readline()\n else:\n raise ParseError('Unexpected EOF while parsing %s.' % self.fname)\n\n return result", "def highlightBlock(self, text):\n\t\t# Do other syntax formatting\n\t\tfor expression, nth, format in self.rules:\n\t\t\tindex = expression.indexIn(text, 0)\n\n\t\t\twhile index >= 0:\n\t\t\t\t# We actually want the index of the nth match\n\t\t\t\tindex = expression.pos(nth)\n\t\t\t\tlength = len(expression.cap(nth))\n\t\t\t\tself.setFormat(index, length, format)\n\t\t\t\tindex = expression.indexIn(text, index + length)\n\n\t\tself.setCurrentBlockState(0)\n\n\t\t# Do multi-line strings\n\t\tin_multiline = self.match_multiline(text, *self.tri_single)\n\t\tif not in_multiline:\n\t\t\tin_multiline = self.match_multiline(text, *self.tri_double)", "def test_first_line_indent(self):\n try:\n self._read_string(' Project indented')\n raise AssertionError('SyntaxError expected')\n except reader.SyntaxError:\n pass", "def _parse_hit_match_block(self, hit_match_data):\n\n def match_is_valid(match):\n \"\"\"Return True if match is not a Consensus column (PRIVATE).\n\n It's not possible to distinguish a sequence line from a Consensus line with\n a regexp, so need to check the ID column.\n \"\"\"\n return match.group(1).strip() != \"Consensus\"\n\n while True:\n if not self.line.strip(): # blank lines indicate the end of a hit block\n return\n match = re.match(_RE_MATCH_BLOCK_QUERY_SEQ, self.line)\n if match and match_is_valid(match):\n hit_match_data[\"query_seq\"] += match.group(3).strip()\n if hit_match_data[\"query_start\"] is None:\n hit_match_data[\"query_start\"] = int(match.group(2))\n hit_match_data[\"query_end\"] = int(match.group(4))\n else:\n match = re.match(_RE_MATCH_BLOCK_HIT_SEQ, self.line)\n if match and match_is_valid(match):\n hit_match_data[\"hit_seq\"] += match.group(3).strip()\n if hit_match_data[\"hit_start\"] is None:\n hit_match_data[\"hit_start\"] = int(match.group(2))\n hit_match_data[\"hit_end\"] = int(match.group(4))\n self.line = self.handle.readline()", "def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules :\n index = expression.indexIn(text, 0)\n \n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)", "def indent(self, indent: int):\n self._column_offset += indent\n self._current_text.set_x_offset(self._column_offset)", "def _indent(self, level: int) -> Text:\n\n return self.indent * level", "def _indents(self, line) -> Tuple[int, int]:\n import re\n\n indent = len(re.match(r'( *)', line).group(1))\n list_match = re.match(r'( *)(([*\\-+>]+|\\w+\\)|\\w+\\.) +)', line)\n if list_match:\n sub_indent = indent + len(list_match.group(2))\n else:\n sub_indent = indent\n\n return indent, sub_indent", "def highlightBlock(self, text):\n pos = matchLen = 0\n for matchNum in range(self.skipMatches + 1):\n pos += matchLen\n if self.searchText:\n pos = text.lower().find(self.searchText, pos)\n matchLen = len(self.searchText)\n else:\n match = self.regExpObj.search(text, pos)\n pos = match.start() if match else -1\n matchLen = len(match.group())\n if pos >= 0:\n self.setFormat(pos, matchLen, self.charFormat)", "def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove items from the start of the list, without touching the parent.
def trim_start(self, n=1): if n > len(self.data): raise IndexError("Size of trim too large; can't trim %s items " "from a list of size %s." % (n, len(self.data))) elif n < 0: raise IndexError('Trim size must be >= 0.') del self.data[:n] del self.items[:n] if self.parent: self.parent_offset += n
[ "def removeFirst(self):\n\t\tself.head = self.head.after", "def delete_beg(self):\n\n if self.head != None:\n\n # grab the node that comes after the head.\n aft_head = self.head.next_node\n\n # have the last node now point to that node\n self.end.next_node = aft_head\n\n # set the head property.\n self.head = aft_head\n\n else:\n raise ValueError(\"The list is empty\")", "def remove_from_parents(self):\n pass", "def remove_front(self) -> None:\n length = self.length()\n head = self.head\n cur = self.head.next\n # Checking for empty list. If list is empty an exception is raised.\n if length < 1:\n raise SLLException\n # Else, removing the node directly after the head.\n else:\n head.next = cur.next", "def removeFromFront(self):\n newNext = self.head.getNext().getNext()\n # remove the first real node in the list\n self.head.setNext(newNext)\n self.size -= 1\n self._tailRemoveCheck()", "def moveFirst(self):\n if self.parent:\n self.parent.childList.remove(self)\n self.parent.childList.insert(0, self)\n globalref.docRef.modified = True", "def __delete_first_node(\n self\n ):\n list_is_size_1 = self.size() == 1\n\n if list_is_size_1:\n self.first_node = None\n self.last_node = None\n else:\n self.first_node = self.get_node(1)\n self.first_node.set_previous_node(None)", "def chop(lst):\n del lst[0] # Removes the first element\n del lst[-1] # Removes the last element", "def chop(lst):\r\n del lst[0] # Removes the first element\r\n del lst[-1] # Removes the last element\r", "def removeFirst(self):\n self._removeFirst_animation()\n \n super(A_LinkedList, self).removeFirst()", "def nodeMoveFirst(self):\n self.currentSelectionModel().sortSelection()\n selNodes = self.currentSelectionModel().selectedNodes()\n undo.ChildListUndo(self.model.undoList,\n [node.parent for node in selNodes])\n for node in reversed(selNodes):\n node.parent.childList.remove(node)\n node.parent.childList.insert(0, node)\n self.currentSelectionModel().selectNodes(selNodes, False)\n self.updateAll()", "def remove_before_current():\n tltracks = mp.tracklist.get_tl_tracks()\n if len(tltracks) == 0:\n logger.debug(\"Tracklist empty. Aborting track removals.\")\n return\n\n # index of currently playing track\n current = tltracks[0]\n curridx = mp.tracklist.index(tlid=current.tlid)\n\n # remove tracks if necessary\n if curridx != 0:\n logger.warning(f\"Current track is at idx: {curridx}\"\n \"Removing all tracks before it.\")\n tracks = mp.tracklist.get_tracks()\n remuris = [t.uri for t in tracks[:curridx]]\n logger.debug(f\"Removing tracks: {remuris}\")\n remove_tracks(remuris)", "def delete_front(self) -> None:\n if not self.head:\n raise IndexError(\"Deleting from an empty list\")\n\n current_node = self.head\n\n if current_node.next_ptr == current_node:\n self.head, self.length = None, 0\n else:\n while current_node.next_ptr != self.head:\n current_node = current_node.next_ptr\n\n current_node.next_ptr = self.head.next_ptr\n self.head = self.head.next_ptr\n\n self.length -= 1", "def clear_items_sequential(self):\n pass", "def removeFront(self):\n if self.items:\n return self.items.pop(0)\n else:\n raise Exception('can not remove from empty deque')", "def remove_from_front(self):\n if len(self.orders) > 0:\n return self.orders.pop(0)\n else:\n return None", "def deleteFront(self) -> bool:\n if len(self.list):\n del self.list[0]\n return True\n else:return False", "def reset_position(self):\r\n eliminated = [pos.obj for pos in self.positions]\r\n self.positions = deque()\r\n return eliminated", "def remove(self):\r\n if self.parent:\r\n for i, node in enumerate(self.parent.children):\r\n if node is self:\r\n self.parent.changed()\r\n del self.parent.children[i]\r\n self.parent = None\r\n return i", "def delete_middle(self):\n\t\tif self.head is None:\n\t\t\traise ValueError(\"Cannot find an element in an empty list\")\n\n\t\tcurrent = self.head\n\t\tmid_index = 0\n\t\tcount = 0\n\t\taux = None\n\t\tmid = self.head\n\n\t\twhile current is not None:\n\t\t\tif mid_index < int(count/2):\n\t\t\t\taux = mid\n\t\t\t\tmid = mid.next\n\t\t\t\tmid_index += 1\n\t\t\tcount += 1\n\t\t\tcurrent = current.next\n\n\t\tif aux is None:\n\t\t\tself.head = self.head.next\n\t\telse:\n\t\t\taux.next = mid.next\n\n\t\tdel mid" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return source for index `i`.
def source(self, i): return self.info(i)[0]
[ "def source(self, index=0):\n if not self._sources:\n self.get_data()\n try:\n sitename, url = self._sources[index]\n except TypeError:\n return self._sources[index]\n except IndexError:\n raise NotFoundError(\"No episode sources found.\")\n\n ext = get_extractor(sitename)(\n url, quality=self.quality, headers=self.headers)\n self._sources[index] = ext\n\n return ext", "def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp", "def __getitem__(self, i):\n return self._data[i]", "def get_all_sources(item=None):", "def __getitem__(self, i):\n if not isinstance(i, slice):\n raise ValueError(\"Only slices can be used.\")\n return self.prepareIterator(i.step, i.start, i.stop)", "def sources(self) -> Iterator[Slot[ItemT]]:\n for slot in self._slots:\n if slot.is_source:\n yield slot", "def iterSources(self):\n for row in self.iterDictQuery(\"%s ORDER BY name\" % self.sourceQuery):\n yield ThermSource(self, **row)", "def get_source():", "def __getitem__(self, index):\n return self.target[self.position + index]", "def source_index(self):\n return os.path.join(self.data_directory, 'sources')", "def get_example(self, i):\n key = str(i)\n if key not in self.cache:\n self.cache[key] = self._dataset[i]\n return self.cache[key]", "def __getitem__(self, i):\n\n return self.documents[i]", "def srcdocs(self, i=1):\n res = []\n db = self.srcdb(i=i)\n for did in db:\n res += [dict(db[did])]\n return res", "def getSource(self, name: str) -> Optional[\"Source\"]:\r\n\r\n for source in self.currentScene.sources:\r\n if source.name == name:\r\n return source\r\n return None\r\n #TODO: Search in non-current scene\r", "def get_source_by_name(self, name):\r\n sources = self.call(GetSourcesList())\r\n for source in sources.getSources():\r\n if source[\"name\"] == name:\r\n return source\r\n return None", "def src_idx(self, src_id='', dest_id='', dest_ports=slice(None, None)):\n\n if src_id == '' and dest_id == '':\n src_id = self.A_id\n dest_id = self.B_id\n mask = self.src_mask(src_id, dest_id, dest_ports)\n return np.arange(self.N(src_id))[mask]", "def get_source_id(idx):\n global tgas\n if tgas is None:\n from .cfg import TGASFILE\n tgas = pd.read_hdf(TGASFILE, 'df')\n\n return tgas.iloc[idx].source_id", "def get_sources(self):\n\n self.sources = []\n cur = self.settings['conn'].cursor()\n cur.execute(\"SELECT id, name, fulltext, mediapath, memo, owner, date FROM source\")\n results = cur.fetchall()\n for r in results:\n guid = self.create_guid()\n suffix = \"txt\"\n if r[3] is not None:\n suffix = r[3].split('.')[-1]\n else:\n if '.' in r[1]:\n suffix = r[1].split('.')[-1]\n if suffix == 'transcribed':\n suffix = 'txt'\n filename = guid + '.' + suffix\n\n plaintext_filename = None\n if r[2] is not None:\n plaintext_filename = self.create_guid() + \".txt\"\n source = {'id': r[0], 'name': r[1], 'fulltext': r[2], 'mediapath': r[3],\n 'memo': r[4], 'owner': r[5], 'date': r[6].replace(' ', 'T'), 'guid': guid,\n 'filename': filename, 'plaintext_filename': plaintext_filename,\n 'external': None}\n if source['mediapath'] is not None:\n fileinfo = os.stat(self.settings['path'] + source['mediapath'])\n if fileinfo.st_size >= 2147483647:\n source['external'] = self.settings['directory']\n self.sources.append(source)", "def source(self):\n ret = self._get_attr(\"source\")\n return IEventSource(ret)", "def _source(self) -> Source:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return offset for index `i`.
def offset(self, i): return self.info(i)[1]
[ "def getOffset(self, index: int) -> int:\n ...", "def getMarkPosition(self, i: int) -> int:\n ...", "def getEndPosition(self, i: int) -> int:\n ...", "def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp", "def findPosition(self,i): # TEST\n return self.abstract.findPosition(self.notes[i])", "def offset_at_position(self):\n offset = 0\n for i, curr_line in enumerate(self.doc.iter_lines()):\n if i == self.line:\n break\n offset += len(curr_line)\n\n return offset + self.col", "def ComputeOffset(self, ind: 'itkIndex2') -> \"long long\":\n return _itkImagePython.itkImageBase2_ComputeOffset(self, ind)", "def ComputeOffset(self, ind: 'itkIndex3') -> \"long long\":\n return _itkImagePython.itkImageBase3_ComputeOffset(self, ind)", "def calc_offset(self,path,i,chunk_sz):\n i=int(i)\n chunk_sz=int(chunk_sz)\n if os.path.isfile(path):\n return (path,i*chunk_sz)\n\n self.lock.acquire()\n self.check_key(path) #Don't know if it is THREAD SAFE\n self.lock.release()\n \n dic,other = self.cache[path]\n\n chunk_start = int(i)*int(chunk_sz)\n owner_ind = other.bisect_right(chunk_start)\n owner_key = other.iloc[owner_ind]\n file = other[owner_key]\n\n file_start=0\n if owner_ind!=0:\n file_start = other.iloc[owner_ind-1]\n\n return (file,chunk_start-file_start)", "def __shapeIndex(self, i=None):\r\n shx = self.shx\r\n if not shx:\r\n return None\r\n if not self._offsets:\r\n # File length (16-bit word * 2 = bytes) - header length\r\n shx.seek(24)\r\n shxRecordLength = (unpack(\">i\", shx.read(4))[0] * 2) - 100\r\n numRecords = shxRecordLength / 8\r\n # Jump to the first record.\r\n shx.seek(100)\r\n for r in range(numRecords):\r\n # Offsets are 16-bit words just like the file length\r\n self._offsets.append(unpack(\">i\", shx.read(4))[0] * 2)\r\n shx.seek(shx.tell() + 4)\r\n if not i == None:\r\n return self._offsets[i]", "def ComputeOffset(self, ind: 'itkIndex4') -> \"long long\":\n return _itkImagePython.itkImageBase4_ComputeOffset(self, ind)", "def get_start_end_xy(self, i):\n direction_inc = [\n [-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]\n x_start = self.position[i][1]\n y_start = self.position[i][0]\n x_end = x_start + \\\n (len(self.words[i]) - 1) * direction_inc[self.direction[i]][1]\n y_end = y_start + \\\n (len(self.words[i]) - 1) * direction_inc[self.direction[i]][0]\n return (x_start, y_start), (x_end, y_end)", "def _to_cursor_pos(self, index):\n return index + 1", "def calc_sag_offset_idx(self):\n return self.offset_pnt-1", "def min_idx(a, i):\n pass", "def get_location_by_offset(filename, offset):\n with open(filename, encoding='utf-8', errors='ignore') as f:\n for row, line in enumerate(f, 1):\n length = len(line)\n if length < offset:\n offset -= length\n else:\n return row, offset + 1", "def GetOrigin(self, i: 'unsigned int') -> \"double\":\n return _ITKIOImageBaseBasePython.itkImageIOBase_GetOrigin(self, i)", "def GetOffset(self):\n return self.st_pos", "def image_id_at(self, i):\n return i", "def _offset(self, tile_index: int) -> Tuple[int, int]:\n width_tiles = self.shape_in_tiles[1]\n row = int(tile_index / width_tiles)\n col = tile_index % width_tiles\n\n return row * self.spec.height, col * self.spec.width" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return iterator yielding (source, offset, value) tuples.
def xitems(self): for (value, (source, offset)) in zip(self.data, self.items): yield (source, offset, value)
[ "def offset_iter(self):\n offset = 0\n for lo, hi in self.ranges.ranges():\n yield offset, offset + hi - lo, lo, hi\n offset += hi - lo", "def __iter__(self) -> typing.Iterator[typing.Tuple[ScanPoint, ScanIndex]]:\n # Storing a list of tuples instead of two list hopefully results in better data locality\n return zip(self._point_generator(), self._index_generator())", "def __iter__(self):\n try:\n # Assume it is a file-like object and try treating it as such\n # Things that don't have seek will trigger an exception\n self.source.seek(0)\n for line in self.source:\n yield utils.to_unicode(line).split()\n except AttributeError:\n # If it didn't work like a file, use it as a string filename\n with utils.smart_open(self.source) as fin:\n for line in fin:\n yield utils.to_unicode(line).split()", "def iter_addr_data(self):\r\n for rec_addr, rec_data in self.records():\r\n for offset in range(len(rec_data)):\r\n yield (rec_addr + offset, rec_data[offset])", "def _point_generator(self) -> typing.Iterator[ScanPoint]:\n for values in itertools.product(*self._scans):\n yield self.ScanPoint(**{k: v for k, v in zip(self._keys, values)})", "def _range_iter_ ( self ) :\n _r = self\n _num = _r.size()\n _i = 0\n while _i < _num :\n yield _r.at(_i)\n _i += 1", "def iter_between(self, idx_from, idx_to):\n self.file.seek(idx_from * self.block_size)\n for _ in range(idx_from, idx_to):\n yield self.file.read(self.block_size)", "def _coord_generator(self):\n coord_idx = 0\n while coord_idx < self.coords.shape[0]:\n yield self.coords[coord_idx:coord_idx + self.atoms_per_res]\n coord_idx += self.atoms_per_res", "def __iter__(self):\n for meta_offset in range(0, 4096, 4):\n z_offset, x_offset = divmod(meta_offset // 4, 32)\n x = self.x * 32 + x_offset\n z = self.z * 32 + z_offset\n chunk_location = self.locations[meta_offset:meta_offset + 4]\n offset = chunk_location[0] * (256 ** 2) + chunk_location[1] * 256 + chunk_location[2]\n if offset == 0:\n continue\n else:\n offset -= 2\n sector_count = chunk_location[3]\n yield ChunkColumn(self.data[4096 * offset:5096 * (offset + sector_count)], x=x, z=z)", "def tagIteratorAtAddress(self,addr):\n for index in xrange(self.getTagCountAtAddress(addr)):\n yield self.getTagAtAddressByIndex(addr,index)", "def _p_iteritems_ ( self ) :\n N = len ( self )\n for i in range ( N ) :\n yield i , ( self.x ( i ) , self.y ( i ) )", "def iter(self) -> Iterator[Sequence]:\n ...", "def next(self) -> Tuple[int, RowIndex, DataRow]:\n rowpos, rowidx, values = next(self.reader)\n return rowpos, rowidx, values", "def __next__(self):\n return _core.SwigPyIterator_next(self)", "def _index_generator(self) -> typing.Iterator[ScanIndex]:\n if self._enable_index:\n for indices in itertools.product(*(range(len(s)) for s in self._scans)):\n # Yield a scan index object for every set of indices\n yield self.ScanIndex(**{k: v for k, v in zip(self._keys, indices)})\n else:\n # Create one empty scan index\n si = self.ScanIndex()\n for _ in range(np.prod([len(s) for s in self._scans])):\n # Yield the empty scan index object for all\n yield si", "def pairs(seq):\n iterable, copied = tee(seq)\n next(copied)\n for x, y in zip(iterable, copied):\n yield x, y", "def __next__(self):\n x, y = self.p\n y += 1\n if y >= self.h:\n y = 0\n x += 1\n if x >= self.w:\n raise StopIteration()\n self.p = (x, y)\n return self.p2c(self.p), self.p", "def iter(source, sentinel=None): # known special case of iter\n pass", "def walk(self):\n cursor = self.next_operation\n while cursor is not None:\n yield cursor\n cursor = cursor.next_operation", "def __iter__(self):\n for value in self.cells:\n yield value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trim `length` characters off the beginning of each item, inplace, from index `start` to `end`. No whitespacechecking is done on the trimmed text. Does not affect slice parent.
def trim_left(self, length, start=0, end=sys.maxsize): self.data[start:end] = [line[length:] for line in self.data[start:end]]
[ "def trim(self, start, end):\r\n self.ltrim(start, end)", "def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoBaseList_truncate(self, length)", "def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoChildList_truncate(self, length)", "def trim(self, start: int = None, end: int = None) -> None:\n if start is not None:\n self._data = self._data.iloc[start:]\n if end is not None and end > 0:\n self._data = self._data.iloc[:-end]", "def _trim_start_end(data: pd.DataFrame, start: int, end: int):\n start_idx = data.loc[:, \"start_locus\"].searchsorted(start)\n end_idx = data.loc[:, \"start_locus\"].searchsorted(end, side=\"left\")\n return data.iloc[start_idx:end_idx, :]", "def crop(listtocrop, length, start = 0): \n croppedlist = []\n for row in listtocrop:\n croppedlist.append(row[start:length+start])\n \n \n return croppedlist", "def slice(rec, start=0, end=0):\n\n # Nothing needs to be done\n if not (start or end):\n return rec\n\n # There is end but no start\n if end and not start:\n start = 1\n\n # There start but no end\n if start and not end:\n end = len(rec.seq)\n\n rec.seq = rec.seq[start - 1:end]\n rec.description = f\"{start}:{end} {rec.description}\"\n return rec", "def ltrim(self, key, start, end):\r\n return self.execute_command(\"LTRIM\", key, start, end)", "def truncate(vec, max_length, truncate_tail=True):\n if max_length is None:\n return vec\n if len(vec) <= max_length:\n return vec\n if truncate_tail:\n return vec[:max_length]\n else:\n return vec[-max_length:]", "def trim(self, start=True, end=True):\n if not self.frames.valid.any():\n return\n start_index = self.get_first_frame_index() if start else 0\n end_index = self.get_last_frame_index() if end else (self.size - 1)\n delete = np.full(self.size, True)\n delete[start_index:end_index + 1] = False\n\n if delete.any():\n self.frames.delete_indices(delete)\n if self.dependents is not None:\n for dependent in self.dependents.values():\n dependent.for_frame = dependent.for_frame[~delete]\n\n self.reindex()\n log.debug(f\"Trimmed to {self.size} frames.\")", "def cutseq(seq):\n rem = len(seq) % 3\n if rem != 0:\n return seq #[:-rem]\n else:\n return seq", "def trim_from_start(self, trim_from_start):\n self._trim_from_start = trim_from_start", "def trim_slice(lines, slice_tuple):\n\n def _empty(line):\n return not line or line.strip() == \">\"\n\n if not slice_tuple:\n return None\n\n slice_start, slice_end = slice_tuple\n\n if slice_start is None:\n slice_start = 0\n if slice_end is None:\n slice_end = len(lines)\n\n # Trim from beginning\n while slice_start < slice_end and _empty(lines[slice_start]):\n slice_start += 1\n\n # Trim from end\n while slice_end > slice_start and _empty(lines[slice_end - 1]):\n slice_end -= 1\n\n return (slice_start, slice_end)", "def strip_range(start, end):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = \"\"\n for index, char in enumerate(func(*args, **kwargs)):\n if start <= index < end:\n result += DOT\n else:\n result += char\n return result\n return wrapper\n return decorator", "def truncate(self, startindex: 'int const') -> \"void\":\n return _coin.SoLightPath_truncate(self, startindex)", "def strip_range(start, end):\n\n def actual_decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n # replace characters\n new_str = []\n for i, letter in enumerate(kwargs[\"text\"]):\n if i in range(start, end):\n new_str.append(DOT)\n else:\n new_str.append(letter)\n kwargs[\"text\"] = \"\".join(new_str)\n return f(*args, **kwargs)\n\n return wrapper\n\n return actual_decorator", "def truncate(self, length: 'int const', fit: 'int const'=0) -> \"void\":\n return _coin.SbPList_truncate(self, length, fit)", "def trim_tokens_at_front(x,\n sequence_length,\n keys_to_trim=None,\n **unused_kwargs):\n\n for key in (keys_to_trim or sequence_length.keys()):\n if key in x:\n # trim tokens, leaving room for EOS which gets added later\n x[key] = x[key][-(sequence_length[key] - 1):]\n return x", "def truncate(self, length: 'int const') -> \"void\":\n return _coin.SoPath_truncate(self, length)", "def truncate(self, start, end):\n note_count = 0\n track = self._pattern[0]\n idx = 0\n\n while idx < len(track):\n msg = track[idx]\n if note_count > end and not isinstance(msg, midi.EndOfTrackEvent):\n if isinstance(msg, midi.NoteOnEvent):\n off = self._get_note_off_event(msg.get_pitch(), track, idx)\n track.remove(msg)\n track.remove(off)\n elif not isinstance(msg, midi.NoteOffEvent):\n track.remove(msg)\n else:\n idx += 1\n elif isinstance(msg, midi.NoteOnEvent):\n if note_count < start:\n off = self._get_note_off_event(msg.get_pitch(), track, idx)\n track.remove(msg)\n track.remove(off)\n else:\n idx += 1\n note_count += 1\n else:\n idx += 1\n\n self.notes = self.get_notes()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a contiguous block of text. If `flush_left` is true, raise `UnexpectedIndentationError` if an indented line is encountered before the text block ends (with a blank line).
def get_text_block(self, start, flush_left=False): end = start last = len(self.data) while end < last: line = self.data[end] if not line.strip(): break if flush_left and (line[0] == ' '): source, offset = self.info(end) raise UnexpectedIndentationError(self[start:end], source, offset + 1) end += 1 return self[start:end]
[ "def _extract_block_from_next_pos(self, marker):\n block = ''\n if not self.oom.find_text(marker):\n return block\n\n line = self.oom.current()\n block += \"{}\\n\".format(line)\n for line in self.oom:\n if not line.startswith(' '):\n self.oom.back()\n break\n block += \"{}\\n\".format(line)\n return block", "def get_block_indent(text):\n lines = text.split('\\n')\n cnt = []\n for i in lines:\n if i != '' and not i.isspace():\n cnt.append(get_indent(i))\n return min(cnt)", "def one_paragraph_ragged_left():\n return dedent(\n \"\"\"\n Lorem ipsum dolor sit amet, consectetur adipiscing elit.\n Integer condimentum, orci at auctor venenatis, dolor\n orci congue felis, sit amet luctus dolor est in felis.\n Class aptent taciti sociosqu ad litora torquent per conubia nostra, per\n inceptos himenaeos. Ut imperdiet ex sit amet lacinia condimentum.\"\"\"\n ).strip()", "def _read_text_block(self, deadline=None):\n result = \"\"\n\n while True:\n line = _read_line(self.stdout_queue, deadline, \"utf-8\", \"replace\", False)\n\n if line.endswith(self.eof_marker):\n result += line[:-len(self.eof_marker)]\n break\n elif line.endswith('#EOF\\r\\n'):\n result += line[:-len('#EOF\\r\\n')]\n self.logger.warning('Got a CRLF-terminated #EOF - this is a driver bug.')\n break\n\n result += line\n\n return result", "def line_block_line(self, match, lineno):\r\n indented, indent, line_offset, blank_finish = \\\r\n self.state_machine.get_first_known_indented(match.end(),\r\n until_blank=True)\r\n text = u'\\n'.join(indented)\r\n text_nodes, messages = self.inline_text(text, lineno)\r\n line = nodes.line(text, '', *text_nodes)\r\n if match.string.rstrip() != '|': # not empty\r\n line.indent = len(match.group(1)) - 1\r\n return line, messages, blank_finish", "def continued_indentation(logical_line, tokens, indent_level, hang_closing,\r\n noqa, verbose):\r\n first_row = tokens[0][2][0]\r\n nrows = 1 + tokens[-1][2][0] - first_row\r\n if noqa or nrows == 1:\r\n return\r\n\r\n # indent_next tells us whether the next block is indented; assuming\r\n # that it is indented by 4 spaces, then we should not allow 4-space\r\n # indents on the final continuation line; in turn, some other\r\n # indents are allowed to have an extra 4 spaces.\r\n indent_next = logical_line.endswith(':')\r\n\r\n row = depth = 0\r\n # remember how many brackets were opened on each line\r\n parens = [0] * nrows\r\n # relative indents of physical lines\r\n rel_indent = [0] * nrows\r\n # for each depth, collect a list of opening rows\r\n open_rows = [[0]]\r\n # visual indents\r\n indent_chances = {}\r\n last_indent = tokens[0][2]\r\n # for each depth, memorize the visual indent column\r\n indent = [last_indent[1]]\r\n if verbose >= 3:\r\n print(\">>> \" + tokens[0][4].rstrip())\r\n\r\n for token_type, text, start, end, line in tokens:\r\n\r\n newline = row < start[0] - first_row\r\n if newline:\r\n row = start[0] - first_row\r\n newline = (not last_token_multiline and\r\n token_type not in (tokenize.NL, tokenize.NEWLINE))\r\n\r\n if newline:\r\n # this is the beginning of a continuation line.\r\n last_indent = start\r\n if verbose >= 3:\r\n print(\"... \" + line.rstrip())\r\n\r\n # record the initial indent.\r\n rel_indent[row] = expand_indent(line) - indent_level\r\n\r\n # identify closing bracket\r\n close_bracket = (token_type == tokenize.OP and text in ']})')\r\n\r\n # is the indent relative to an opening bracket line?\r\n valid_hang = 4 if (hang_closing or not close_bracket) else 0\r\n for open_row in reversed(open_rows[depth]):\r\n if rel_indent[row] == rel_indent[open_row] + valid_hang:\r\n break\r\n hang = rel_indent[row] - rel_indent[open_row]\r\n # is there any chance of visual indent?\r\n visual_indent = (not close_bracket and hang > 0 and\r\n indent_chances.get(start[1]))\r\n\r\n if close_bracket and indent[depth]:\r\n # closing bracket for visual indent\r\n if start[1] != indent[depth]:\r\n yield (start, \"E124 closing bracket does not match \"\r\n \"visual indentation\")\r\n elif close_bracket and not hang:\r\n # closing bracket matches indentation of opening bracket's line\r\n if hang_closing:\r\n yield start, \"E133 closing bracket is missing indentation\"\r\n elif indent[depth] and start[1] < indent[depth]:\r\n if visual_indent is not True:\r\n # visual indent is broken\r\n yield (start, \"E128 continuation line \"\r\n \"under-indented for visual indent\")\r\n elif hang == 4 or (indent_next and rel_indent[row] == 8):\r\n # hanging indent is verified\r\n if close_bracket and not hang_closing:\r\n yield (start, \"E123 closing bracket does not match \"\r\n \"indentation of opening bracket's line\")\r\n elif visual_indent is True:\r\n # visual indent is verified\r\n if not indent[depth]:\r\n indent[depth] = start[1]\r\n elif visual_indent in (text, str):\r\n # ignore token lined up with matching one from a previous line\r\n pass\r\n else:\r\n # indent is broken\r\n if hang <= 0:\r\n error = \"E122\", \"missing indentation or outdented\"\r\n elif indent[depth]:\r\n error = \"E127\", \"over-indented for visual indent\"\r\n elif hang % 4:\r\n error = \"E121\", \"indentation is not a multiple of four\"\r\n else:\r\n error = \"E126\", \"over-indented for hanging indent\"\r\n yield start, \"%s continuation line %s\" % error\r\n\r\n # look for visual indenting\r\n if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)\r\n and not indent[depth]):\r\n indent[depth] = start[1]\r\n indent_chances[start[1]] = True\r\n if verbose >= 4:\r\n print(\"bracket depth %s indent to %s\" % (depth, start[1]))\r\n # deal with implicit string concatenation\r\n elif (token_type in (tokenize.STRING, tokenize.COMMENT) or\r\n text in ('u', 'ur', 'b', 'br')):\r\n indent_chances[start[1]] = str\r\n # special case for the \"if\" statement because len(\"if (\") == 4\r\n elif not indent_chances and not row and not depth and text == 'if':\r\n indent_chances[end[1] + 1] = True\r\n\r\n # keep track of bracket depth\r\n if token_type == tokenize.OP:\r\n if text in '([{':\r\n depth += 1\r\n indent.append(0)\r\n if len(open_rows) == depth:\r\n open_rows.append([])\r\n open_rows[depth].append(row)\r\n parens[row] += 1\r\n if verbose >= 4:\r\n print(\"bracket depth %s seen, col %s, visual min = %s\" %\r\n (depth, start[1], indent[depth]))\r\n elif text in ')]}' and depth > 0:\r\n # parent indents should not be more than this one\r\n prev_indent = indent.pop() or last_indent[1]\r\n for d in range(depth):\r\n if indent[d] > prev_indent:\r\n indent[d] = 0\r\n for ind in list(indent_chances):\r\n if ind >= prev_indent:\r\n del indent_chances[ind]\r\n del open_rows[depth + 1:]\r\n depth -= 1\r\n if depth:\r\n indent_chances[indent[depth]] = True\r\n for idx in range(row, -1, -1):\r\n if parens[idx]:\r\n parens[idx] -= 1\r\n break\r\n assert len(indent) == depth + 1\r\n if start[1] not in indent_chances:\r\n # allow to line up tokens\r\n indent_chances[start[1]] = text\r\n\r\n last_token_multiline = (start[0] != end[0])\r\n\r\n if indent_next and expand_indent(line) == indent_level + 4:\r\n yield (last_indent, \"E125 continuation line does not distinguish \"\r\n \"itself from next logical line\")", "def codeblock(self, blk):\n lines = blk.splitlines()\n for l in lines:\n # Adds indentation on non empty lines\n if re.match(\"^\\s*$\", l) is None:\n self.current_code += self.current_level * self.indent_size * ' '\n self.current_code += l\n self.current_code += \"\\n\"", "def continued_indentation(logical_line, tokens, indent_level, hang_closing,\r\n noqa, verbose):\r\n first_row = tokens[0][2][0]\r\n nrows = 1 + tokens[-1][2][0] - first_row\r\n if noqa or nrows == 1:\r\n return\r\n\r\n # indent_next tells us whether the next block is indented; assuming\r\n # that it is indented by 4 spaces, then we should not allow 4-space\r\n # indents on the final continuation line; in turn, some other\r\n # indents are allowed to have an extra 4 spaces.\r\n indent_next = logical_line.endswith(':')\r\n\r\n row = depth = 0\r\n # remember how many brackets were opened on each line\r\n parens = [0] * nrows\r\n # relative indents of physical lines\r\n rel_indent = [0] * nrows\r\n # visual indents\r\n indent_chances = {}\r\n last_indent = tokens[0][2]\r\n indent = [last_indent[1]]\r\n if verbose >= 3:\r\n print(\">>> \" + tokens[0][4].rstrip())\r\n\r\n for token_type, text, start, end, line in tokens:\r\n\r\n newline = row < start[0] - first_row\r\n if newline:\r\n row = start[0] - first_row\r\n newline = (not last_token_multiline and\r\n token_type not in (tokenize.NL, tokenize.NEWLINE))\r\n\r\n if newline:\r\n # this is the beginning of a continuation line.\r\n last_indent = start\r\n if verbose >= 3:\r\n print(\"... \" + line.rstrip())\r\n\r\n # record the initial indent.\r\n rel_indent[row] = expand_indent(line) - indent_level\r\n\r\n if depth:\r\n # a bracket expression in a continuation line.\r\n # find the line that it was opened on\r\n for open_row in range(row - 1, -1, -1):\r\n if parens[open_row]:\r\n break\r\n else:\r\n # an unbracketed continuation line (ie, backslash)\r\n open_row = 0\r\n hang = rel_indent[row] - rel_indent[open_row]\r\n close_bracket = (token_type == tokenize.OP and text in ']})')\r\n visual_indent = (not close_bracket and hang > 0 and\r\n indent_chances.get(start[1]))\r\n\r\n if close_bracket and indent[depth]:\r\n # closing bracket for visual indent\r\n if start[1] != indent[depth]:\r\n yield (start, \"E124 closing bracket does not match \"\r\n \"visual indentation\")\r\n elif close_bracket and not hang:\r\n # closing bracket matches indentation of opening bracket's line\r\n if hang_closing:\r\n yield start, \"E133 closing bracket is missing indentation\"\r\n elif visual_indent is True:\r\n # visual indent is verified\r\n if not indent[depth]:\r\n indent[depth] = start[1]\r\n elif visual_indent in (text, str):\r\n # ignore token lined up with matching one from a previous line\r\n pass\r\n elif indent[depth] and start[1] < indent[depth]:\r\n # visual indent is broken\r\n yield (start, \"E128 continuation line \"\r\n \"under-indented for visual indent\")\r\n elif hang == 4 or (indent_next and rel_indent[row] == 8):\r\n # hanging indent is verified\r\n if close_bracket and not hang_closing:\r\n yield (start, \"E123 closing bracket does not match \"\r\n \"indentation of opening bracket's line\")\r\n else:\r\n # indent is broken\r\n if hang <= 0:\r\n error = \"E122\", \"missing indentation or outdented\"\r\n elif indent[depth]:\r\n error = \"E127\", \"over-indented for visual indent\"\r\n elif hang % 4:\r\n error = \"E121\", \"indentation is not a multiple of four\"\r\n else:\r\n error = \"E126\", \"over-indented for hanging indent\"\r\n yield start, \"%s continuation line %s\" % error\r\n\r\n # look for visual indenting\r\n if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)\r\n and not indent[depth]):\r\n indent[depth] = start[1]\r\n indent_chances[start[1]] = True\r\n if verbose >= 4:\r\n print(\"bracket depth %s indent to %s\" % (depth, start[1]))\r\n # deal with implicit string concatenation\r\n elif (token_type in (tokenize.STRING, tokenize.COMMENT) or\r\n text in ('u', 'ur', 'b', 'br')):\r\n indent_chances[start[1]] = str\r\n # special case for the \"if\" statement because len(\"if (\") == 4\r\n elif not indent_chances and not row and not depth and text == 'if':\r\n indent_chances[end[1] + 1] = True\r\n\r\n # keep track of bracket depth\r\n if token_type == tokenize.OP:\r\n if text in '([{':\r\n depth += 1\r\n indent.append(0)\r\n parens[row] += 1\r\n if verbose >= 4:\r\n print(\"bracket depth %s seen, col %s, visual min = %s\" %\r\n (depth, start[1], indent[depth]))\r\n elif text in ')]}' and depth > 0:\r\n # parent indents should not be more than this one\r\n prev_indent = indent.pop() or last_indent[1]\r\n for d in range(depth):\r\n if indent[d] > prev_indent:\r\n indent[d] = 0\r\n for ind in list(indent_chances):\r\n if ind >= prev_indent:\r\n del indent_chances[ind]\r\n depth -= 1\r\n if depth:\r\n indent_chances[indent[depth]] = True\r\n for idx in range(row, -1, -1):\r\n if parens[idx]:\r\n parens[idx] -= 1\r\n rel_indent[row] = rel_indent[idx]\r\n break\r\n assert len(indent) == depth + 1\r\n if start[1] not in indent_chances:\r\n # allow to line up tokens\r\n indent_chances[start[1]] = text\r\n\r\n last_token_multiline = (start[0] != end[0])\r\n\r\n if indent_next and expand_indent(line) == indent_level + 4:\r\n yield (last_indent, \"E125 continuation line does not distinguish \"\r\n \"itself from next logical line\")", "def _indentblock(self, text, level):\n if not self.prefs.lineSeparator:\n return text\n return self.prefs.lineSeparator.join(\n ['%s%s' % (level * self.prefs.indent, line)\n for line in text.split(self.prefs.lineSeparator)]\n )", "def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)", "def line_tokenize(text):\n current_line = []\n after_equals = False\n\n for block in tokenize(text):\n\n if block.startswith('\\n'):\n if len(current_line) > 1:\n if after_equals != False:\n current_line.append(' '.join(after_equals))\n after_equals = False\n yield current_line\n elif len(current_line) == 1 and len(current_line[0].strip()):\n yield current_line\n\n current_line = [block[1:]]\n\n elif after_equals != False:\n after_equals.append(block)\n\n else:\n if block == '=':\n after_equals = []\n current_line.append(block)\n elif len(block):\n current_line.append(block)", "def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)", "def test_dedent_too_much(one_paragraph_ragged_left):\n dedented_text = console.dedent(console.indent(one_paragraph_ragged_left, 2), num_spaces=4)\n assert dedented_text == one_paragraph_ragged_left", "def test_hanging_indent(text):\n width = len(text) // 2 # Should force fill to three lines\n filled_text = console.fill(text, width=width, hanging=4)\n num_lines = filled_text.count(\"\\n\") + 1\n assert filled_text.count(\"\\n \") == 0 # 5 spaces indent\n assert filled_text.count(\"\\n \") == num_lines - 1 # 4 spaces indent", "def split_by_newline(text, start=0):\r\n index = start\r\n while 1:\r\n new_index = text.find('\\n', index)\r\n if new_index == -1:\r\n yield (-1, text[index:])\r\n break\r\n yield (new_index + 1, text[index:new_index])\r\n index = new_index + 1", "def delete_till_beginning_of_line(text):\n if text.rfind(\"\\n\") == -1:\n return ''\n return text[0:text.rfind(\"\\n\") + 1]", "def _split_markdown_lines(markdown):\n block_fenced = False\n indent_fenced = False\n for line_number, line in enumerate(markdown.splitlines(True)):\n open_fence_this_iteration = False\n indent_fenced = line.startswith(\" \") or (indent_fenced and WHITE_SPACE_ONLY_PATTERN.match(line))\n if not block_fenced:\n if BLOCK_FENCE_START.match(line):\n open_fence_this_iteration = True\n block_fenced = True\n yield (line, block_fenced or indent_fenced, open_fence_this_iteration, line_number)\n if not open_fence_this_iteration and BLOCK_FENCE_END.match(line):\n block_fenced = False", "def dedent(text, tabsize=8, skip_first_line=False):\n lines = text.splitlines(1)\n _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)\n return ''.join(lines)", "def test_md027_good_block_quote_ordered_list_indented_code_block():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\",\n \"resources\",\n \"rules\",\n \"md027\",\n \"good_block_quote_ordered_list_indented_code_block.md\",\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "def make_block(text, blocksize=60, spaces=False, newlines=False):\n if not spaces:\n # Simple block by chars.\n return (text[i:i + blocksize] for i in range(0, len(text), blocksize))\n if newlines:\n # Preserve newlines\n lines = []\n for line in text.split('\\n'):\n lines.extend(make_block(line, blocksize=blocksize, spaces=True))\n return lines\n\n # Wrap on spaces (ignores newlines)..\n words = text.split()\n lines = []\n curline = ''\n for word in words:\n possibleline = ' '.join((curline, word)) if curline else word\n\n if len(possibleline) > blocksize:\n lines.append(curline)\n curline = word\n else:\n curline = possibleline\n if curline:\n lines.append(curline)\n return lines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pad all doublewidth characters in self by appending `pad_char` to each. For East Asian language support.
def pad_double_width(self, pad_char): if hasattr(unicodedata, 'east_asian_width'): east_asian_width = unicodedata.east_asian_width else: return # new in Python 2.4 for i in range(len(self.data)): line = self.data[i] if isinstance(line, str): new = [] for char in line: new.append(char) if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width new.append(pad_char) self.data[i] = ''.join(new)
[ "def padIt(str: unicode, padlen: int, endchar: int, padded: bool) -> unicode:\n ...", "def _pad(string, length, char=None):\r\n \r\n if char == None:\r\n addchar = ' '\r\n else:\r\n addchar = char\r\n while len(string) < length:\r\n string += addchar\r\n return string", "def padCharacters(self):\n #Find the longest word in the dataset\n\n # maxCharLen is the longest word\n maxCharLen = 0\n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n for sentence in data:\n for token in sentence['characters']:\n maxCharLen = max(maxCharLen, len(token))\n \n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n #Pad each other word with zeros\n for sentenceIdx in range(len(data)):\n for tokenIdx in range(len(data[sentenceIdx]['characters'])):\n token = data[sentenceIdx]['characters'][tokenIdx]\n data[sentenceIdx]['characters'][tokenIdx] = np.pad(token, (0,maxCharLen-len(token)), 'constant')\n \n self.maxCharLen = maxCharLen", "def padCharacters(self):\n #Find the longest word in the dataset\n maxCharLen = 0\n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n for sentence in data:\n for token in sentence['characters']:\n maxCharLen = max(maxCharLen, len(token))\n \n for data in [self.dataset['trainMatrix'], self.dataset['devMatrix'], self.dataset['testMatrix']]: \n #Pad each other word with zeros\n for sentenceIdx in range(len(data)):\n for tokenIdx in range(len(data[sentenceIdx]['characters'])):\n token = data[sentenceIdx]['characters'][tokenIdx]\n data[sentenceIdx]['characters'][tokenIdx] = np.pad(token, (0,maxCharLen-len(token)), 'constant')\n \n self.maxCharLen = maxCharLen", "def pad(fingering, width):\n return ''.join(str(f).ljust(width) for f in str(fingering))", "def pad(s: str) -> str:\n return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)", "def input_pad_to_len(words, padded_word_len, padded_char_len, word_padding=0, char_padding=0):\n if len(words) < padded_word_len:\n words += [[word_padding]] * (padded_word_len - len(words))\n elif len(words) > padded_word_len:\n words = words[:padded_word_len]\n else:\n pass\n words = [word + [char_padding] * (padded_char_len - len(word)) if len(word) < padded_char_len else word for word in words]\n return words", "def pad(text, length):\n return (' '*max(0, length-len(text)))+text", "def right_pad(string, length, character):\r\n add_len = length - len(string)\r\n return f'{string}{character * add_len}'", "def right_zfill(chars, size):\n for i in range(len(chars)):\n char = '{:<016d}'.format(chars[i])\n chars[i] = char\n return chars", "def _pad_to(val, length=10, right_align=False):\n ret = str(val)\n if len(ret) < 10:\n padding = \" \"*(10-len(ret))\n\n if right_align:\n ret = padding + ret\n else:\n ret = ret + padding\n\n return ret", "def input_pad_to_len(words, padded_word_len, padded_char_len, word_padding=0, char_padding=0):\n if len(words) < padded_word_len:\n words += [[word_padding]] * (padded_word_len - len(words))\n words = [word + [char_padding] * (padded_char_len - len(word)) if len(word) < padded_char_len else word for word in words]\n return words", "def pad(self,\n length: int,\n pad_id: Optional[int] = 0,\n pad_type_id: Optional[int] = 0,\n pad_token: Optional[str] = \"[PAD]\",\n direction: Optional[str] = \"right\"):\n pass", "def pad(self, suffix):", "def pad_left(item, width, pad_char=\"0\"):\n return str(item).rjust(width,pad_char)", "def sep ( char=\"-\", pad=20 ):\n print( char * pad )", "def rjust(self, width: int, fillchar: str = ' ') -> BetterString:\r\n ret = self.string.rjust(int(width), str(fillchar))\r\n\r\n return BetterString(ret)", "def pad_sequence(sequence, n, pad_left: bool = ..., pad_right: bool = ..., left_pad_symbol: Optional[Any] = ..., right_pad_symbol: Optional[Any] = ...):\n ...", "def text_extend(text, width, padchar=\" \"):\n out = text.ljust(width, padchar)\n if len(out) > width:\n return \"...\"+out[(-1*width)+2:-1]\n return out", "def fill_with_space_to_length(string_to_fill, new_length, align='left'):\n delta = new_length - len(string_to_fill)\n if delta > 0:\n if align == 'left':\n return string_to_fill + ' ' * delta\n return ' ' * delta + string_to_fill\n return string_to_fill" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and return the promotion candidate and its index. Return (None, None) if no valid candidate was found.
def candidate_index(self, node): index = node.first_child_not_matching_class( nodes.PreBibliographic) if index is None or len(node) > (index + 1) or \ not isinstance(node[index], nodes.section): return None, None else: return node[index], index
[ "def get_tag(self, candidate):\n return self.ordered.index(candidate)", "def __index__(self, product, options=[]):\n cart_items = self.cart_serializable\n for i in range(len(cart_items)):\n if cart_items[i]['product_pk'] == product.pk:\n if sorted(cart_items[i]['option_pks']) == sorted([i.pk for i in options]):\n return i\n return -1", "def find_index(self, path):\n for i in self.index:\n candidate = os.path.join(path, i)\n if os.path.isfile(candidate):\n return i, candidate\n return None, None", "def get_version(self, index):\r\n for verneed, vernaux_iter in self.iter_versions():\r\n for vernaux in vernaux_iter:\r\n if vernaux['vna_other'] == index:\r\n return verneed, vernaux\r\n\r\n return None", "def find(cls, promotion_id):\n try:\n document = cls.database[promotion_id]\n except KeyError:\n return None\n if '_rev' in document:\n return Promotion().deserialize(document)\n return None", "def action_index(self, act_slot_response):\r\n for (i, action) in enumerate(self.feasible_actions):\r\n if act_slot_response == action:\r\n return i\r\n raise Exception(\"Action Index Not Found\")\r\n return None", "def get_match(target, candidates, w2vmodel):\n # parse target string into a list of tokens\n new_s1 = get_token_list(target)\n scores = {candidates.index(s): pulp.value(word_mover_distance_probspec(new_s1, s, w2vmodel).objective) for\n s in\n candidates}\n return candidates[min(scores, key=scores.get)]", "def index_by_chunk(self, chunk):\n for i, result in enumerate(self.result):\n if str(chunk).strip() == str(result.chunk).strip():\n return int(i)\n return None", "def index(self):\n self_component = self.parent_component()\n if self_component is None:\n return None\n for idx, component_data in self_component.iteritems():\n if component_data is self:\n return idx\n return None", "def __call__(self, numbers: Sequence, target: Number) -> OptionalNumber:\r\n\t\t# iterate over the index, value pairings in the sequence\r\n\t\tfor index, number in enumerate(numbers):\r\n\t\t\t# if the current number is equal to the target\r\n\t\t\tif number == target:\r\n\t\t\t\t# return the index associated with the target\r\n\t\t\t\treturn index\r\n\t\t# target not found in the sequence, return None\r\n\t\treturn None", "def get_index(sta, ver, svn, fix):\r\n for n, status, version, subver, hotfix in zip(range(len(version_n)), status_n, version_n, subver_n, hotfix_n):\r\n if sta == status and ver == version and svn == subver and fix == hotfix:\r\n return n\r\n return -1", "def get_index(self, key):\r\n index = self.horner_hash(key)\r\n j = 0\r\n for i in range(0, self.table_size):\r\n j = (index + i ** 2) % self.table_size\r\n if self.hash_table[j] and self.hash_table[j].key == key:\r\n return j\r\n return None", "def GetMatch(self, command, index):\n self.reason = ''\n token = command[index]\n found_close_match = False\n close_matches = 0\n matching_token = None\n for value in self.match:\n if value == token:\n return value\n if value.startswith(token):\n close_matches += 1\n if not found_close_match:\n # The \"closest\" is the first non-exact find if we don't\n # find an exact match.\n matching_token = value\n found_close_match = True\n if found_close_match and close_matches == 1:\n return matching_token\n\n self.reason = 'Must match one of: %s' % ','.join(self.match)\n return None", "def find_candidate(num_list):\n\n\tif num_list is None or len(num_list) == 0:\n\t\treturn None\n\n\tmajority_index = 0\n\tcount = 1\n\n\tfor i in range(1, len(num_list)):\n\t\tif num_list[majority_index] == num_list[i]:\n\t\t\tcount += 1\n\t\telse:\n\t\t\tcount -= 1\n\n\t\tif count == 0:\n\t\t\tmajority_index = i\n\t\t\tcount = 1\n\n\treturn num_list[majority_index]", "def find(target, items):\n for i in range(len(items)):\n if target == items[i]:\n return i\n return -1", "def compute_closest_waypt_idx(self, desired_waypt_config, waypt_configs):\n # TODO: Potentially add linear and angular velocity here\n diff_pos_nk2 = desired_waypt_config.position_nk2() - waypt_configs.position_nk2()\n diff_heading_nk1 = angle_normalize(desired_waypt_config.heading_nk1().numpy() -\n waypt_configs.heading_nk1().numpy())\n diff = tf.concat([diff_pos_nk2, diff_heading_nk1], axis=2)\n idx = tf.argmin(tf.norm(diff, axis=2))\n return idx.numpy()[0]", "def _getInformantBestPos(self,particle, swarm):\n best_fitness = sys.float_info.max\n best_pos = None\n for i in particle.informants:\n if best_fitness > swarm[i].fitness:\n best_fitness = swarm[i].fitness\n best_pos = swarm[i].position\n return best_pos", "def get_candidate(self, id):\n return self.candidate_hash[id]\n #for c in self.candidates:\n # if c.id == id:\n # return c\n #return False", "def __get_index_from_random_card(self):\n return self.cards.index(self.get_random_card())", "def getCase(self,position):\n for i in range(len(self.cases)):\n if position in self.cases[i]:\n return i" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set document['title'] metadata title from the following
def set_metadata(self): if not self.document.hasattr('title'): if self.document.settings.title is not None: self.document['title'] = self.document.settings.title elif len(self.document) and isinstance(self.document[0], nodes.title): self.document['title'] = self.document[0].astext()
[ "def title(self, title):\r\n doc.title = title", "def set_title(self, value):\n return self._set_one_attribute(self.AttributeNames.TITLE, value)", "def _write_title(self) -> None:\n self.doc.preamble.append(Command('title', self.json_summary[\"title\"]))\n self.doc.preamble.append(Command('author', f\"FastEstimator {fe.__version__}\"))\n self.doc.preamble.append(Command('date', NoEscape(r'\\today')))\n self.doc.append(NoEscape(r'\\maketitle'))", "def set_title(self, new_title):\n\n\t\tself.title(new_title)", "async def set_title(self, title: str):\n self.preview_embed.title = title", "def updateTitle(self, newTitle):\n if self.title == None: \n self.title = newTitle", "def create_title(self):\n self.data['Title'] = (self.data['Name']\n .str.extract(r' ([A-Za-z]+)\\.', expand=False)\n .replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer',\n 'Lady', 'Major', 'Rev', 'Sir', 'Dona'], 'Rare')\n .replace('Mlle', 'Miss')\n .replace('Mme', 'Mrs')\n .replace('Ms', 'Miss'))", "def set_photo_title_from_template(\n photo: osxphotos.PhotoInfo, title_template: str, dry_run: bool\n):\n if not title_template:\n return\n\n # don't render None values\n render_options = RenderOptions(none_str=\"\")\n\n title_string, _ = photo.render_template(title_template, render_options)\n title_string = [ts for ts in title_string if ts]\n if not title_string:\n verbose(\n f\"No title returned from template, nothing to do: [bold]{title_template}\"\n )\n return\n\n if len(title_string) > 1:\n echo_error(\n f\"[error] Title template must return a single string: [bold]{title_string}\"\n )\n sys.exit(1)\n\n verbose(f\"Setting [i]title[/i] to [bold]{title_string[0]}\")\n if not dry_run:\n ps_photo = photoscript_photo(photo)\n ps_photo.title = title_string[0]", "def save(self, *args, **kwargs):\n\t\tif self.title == None or len(self.title) == 0: self.title = str(self.doc)\n\t\tif self.title.rfind('/') != -1: self.title = self.title[self.title.rfind('/') + 1:]\n\t\tsuper(Document, self).save(*args, **kwargs)", "def __build_title_stuff( self, data_dict ):\n title_info = etree.SubElement( self.mods, self.MODS+'titleInfo' )\n title = etree.SubElement( title_info, self.MODS+'title' )\n title.text = data_dict[ 'object_title' ] or 'No title'", "def fetch_title( f ):\n return f.Info['/Title']", "def add_titles(self, tag):\n self.title_rom = tag.get('data-romaji')\n self.title_en = tag.get('data-english')\n self.title_other = tag.get('data-alternate').split(',')", "def set_title(self, title, **kwargs):\n self.fig.suptitle(t=title, **kwargs)", "def page_title(self, page_title):\n\n self._page_title = page_title", "def edit_title(self, new_title):\n self.title = new_title", "def _write_title(self, **kwargs):\n\t\tif kwargs.get('write_title'):\n\t\t\tself.fig.suptitle(self.fig_title)\n\t\telse:\n\t\t\tpass", "def title(self):\n if self._score.metadata is not None:\n return self._score.metadata.title\n return self._filename", "def _xml_title(self):\n title = E.titles(E.title(self.title))\n\n return title", "def get_title_text(doc_id):\n data=read_data(\"doc-data.json\")\n\n text = data.get(doc_id).get(\"Text\")\n\n title = data.get(doc_id).get(\"Title\")\n\n return title[0] + text", "def meta_track_with_title(meta_track: MidiTrack, title: str) -> MidiTrack:\n result = meta_track.copy()\n title_msg = MetaMessage('track_name', name=title)\n\n # insert title message at the very beginning, it takes precedence over\n # any later title messages lolidk\n result.insert(0, title_msg)\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mask the email address in `ref` and return a replacement node. `ref` is returned unchanged if it contains no email address. For email addresses such as "user", mask the address as "user at host" (text) to thwart simple email address harvesters (except for those listed in `non_masked_addresses`). If a PEP number (`pepno`) is given, return a reference including a default email subject.
def mask_email(ref, pepno=None): if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'): if ref['refuri'][8:] in non_masked_addresses: replacement = ref[0] else: replacement_text = ref.astext().replace('@', '&#32;&#97;t&#32;') replacement = nodes.raw('', replacement_text, format='html') if pepno is None: return replacement else: ref['refuri'] += '?subject=PEP%%20%s' % pepno ref[:] = [replacement] return ref else: return ref
[ "def mask_email(email: str) -> str:\n if email.count(\"@\") != 1:\n raise ValueError(\"Invalid email address, should have exactly one @\")\n address, domain = email.split(\"@\")\n if not address:\n raise ValueError(\"Invalid email address, address should not be empty\")\n if not domain:\n raise ValueError(\"Invalid email address, domain should not be empty\")\n domain_fore, _, domain_tld = domain.rpartition(\".\")\n if not domain_fore:\n raise ValueError(\"Invalid email address, cannot work out domain\")\n if not domain_tld:\n raise ValueError(\"Invalid email address, cannot work out domain tld\")\n return f\"{address[:2]}***@{domain_fore[:2]}***.{domain_tld}\"", "def perturb_reference(self, reference: Reference, rng: Random) -> Reference:\n return replace(reference, output=Output(text=self.perturb(reference.output.text, rng)), tags=reference.tags)", "def get_email(self, node, expression='.', *, error=True):\n\n matches = []\n # If the text would be split across multiple sub-tags.\n for match in node.xpath('{}//*[contains(text(), \"@\")]'.format(expression)):\n matches.append(match.text_content())\n # The text version is more likely to be correct, as it is more visible,\n # e.g. ca_bc has one `href` of `mailto:first.last.mla@leg.bc.ca`.\n for match in node.xpath('{}//a[contains(@href,\"mailto:\")]'.format(expression)):\n matches.append(unquote(match.attrib['href']))\n # If the node has no sub-tags.\n if not matches:\n for match in node.xpath('{}//text()[contains(., \"@\")]'.format(expression)):\n matches.append(match)\n if matches:\n for match in matches:\n match = email_re.search(match)\n if match:\n return match.group(1)\n if error:\n raise Exception('No email pattern in {}'.format(matches))\n elif error:\n raise Exception('No email node in {}'.format(etree.tostring(node)))", "def quoteaddr(addr):\r\n m = (None, None)\r\n try:\r\n m = email.Utils.parseaddr(addr)[1]\r\n except AttributeError:\r\n pass\r\n if m == (None, None): # Indicates parse failure or AttributeError\r\n # something weird here.. punt -ddm\r\n return \"<%s>\" % addr\r\n elif m is None:\r\n # the sender wants an empty return address\r\n return \"<>\"\r\n else:\r\n return \"<%s>\" % m", "def GetFullAddress(name):\n if \"@\" not in name:\n domain = ezmail.CONFIG.get(\"domain\")\n if domain:\n return \"%s@%s\" % (name, domain)\n else:\n return \"%s@%s\" % (name, ezmail._get_hostname())\n else:\n return name", "def mask_ip_addr(addr, keep_last=True):\n tip = ''\n if isinstance(addr, str):\n tl = addr.split('.')\n for i in range(0, len(tl) - 1):\n tip += 'xxx.'\n if keep_last:\n tip += tl[len(tl) - 1]\n else:\n tip += 'xxx'\n return tip", "def get_email_without_link(email):\n\tif not frappe.get_all(\"Email Account\", filters={\"enable_automatic_linking\": 1}):\n\t\treturn email\n\n\ttry:\n\t\t_email = email.split(\"@\")\n\t\temail_id = _email[0].split(\"+\", 1)[0]\n\t\temail_host = _email[1]\n\texcept IndexError:\n\t\treturn email\n\n\treturn f\"{email_id}@{email_host}\"", "def replace_emails(text, replace_with=\"<EMAIL>\"):\n result = re.sub(EMAIL_REGEX, replace_with, text)\n return result", "def maskedAccount(account):\n suffix = None\n if '@' in account:\n name, _, suffix = account.partition('@')\n else:\n name = account\n _len = len(name)\n if _len <= 3:\n return account\n plen = 2 if _len > 3 else 1\n name = '%s%s%s' % (name[:plen], '*' * (_len - 2 * plen), name[_len - plen:])\n return '%s@%s' % (name, suffix) if suffix else name", "def _fix_email(self, email):\n result = {\n 'invalid': 0,\n 'cleaned': 0,\n 'unchecked': 0,\n 'removed': 0\n }\n if re.search(r'^\\s*$', email.text) is not None:\n result['removed'] = 1\n email.decompose()\n return result\n\n if re.search(r'%20', email['href']) is not None:\n result['cleaned'] = 1\n email['href'] = re.sub(r'%20', '', email['href'])\n\n address = email['href'][7:] # strip off the leading mailto:\n info = cache.get_default().get_email(address)\n\n if not info.is_valid:\n if info.reason == 'accepted_email':\n result['unchecked'] = 1\n email.insert(0, '*UNCHECKED*')\n else:\n result['invalid'] = 1\n email.insert(0, '*INVALID {:s}*'.format(info.reason))\n return result", "def AS_newreference(self, ref):\n\t\tif isinstance(ref, GenericReference):\n\t\t\treturn ref.AS_resolve(Reference, self.AS_appdata)\n\t\telif isinstance(ref, aem.Query):\n\t\t\treturn Reference(self.AS_appdata, ref)\n\t\telif ref is None:\n\t\t\treturn Reference(self.AS_appdata, aem.app)\n\t\telse:\n\t\t\treturn Reference(self.AS_appdata, aem.customroot(ref))", "def link_emails_to_crm(ticket, noop):\n if ticket.fields.resolution is not None:\n # don't handle closed tickets now\n return\n if not ticket.fields.description:\n # if the description is empty there are no emails to parse\n return\n\n crm = Hatchbuck(os.environ.get(\"HATCHBUCK_APIKEY\"), noop)\n # logging.debug(ticket.fields.description)\n\n def repl(match):\n \"\"\"\n match is a tuple and looks like one of:\n ('e@example.com', 'e@example.com', '')\n ('<e@example.com>', 'e@example.com', '')\n ('<e@example.com> (https://example.com...)',\n 'e@example.com', ' (https://example.com...)')\n ( full match that will be replaced;\n pure email only;\n rest of string with link if present)\n :param match: tuple\n :return: email with brackets and link to crm\n \"\"\"\n # logging.debug(\"%s %s %s\", match[0], match[1], match[2])\n profile = crm.search_email(match[1])\n if profile is None:\n # email not found in CRM\n return match[0] # don't create contacts at the moment\n # if ticket.fields.resolution is not None:\n # ticket closed, don't create contact and don't replace any link\n # return match[0]\n # profile = {'emails': {'address': match[1], 'type': 'Work'}}\n # if not noop:\n # profile = crm.create(profile)\n # else:\n # profile['contactUrl'] = \"https://example.com/\"\n return \"<\" + match[1] + \"> (\" + profile[\"contactUrl\"] + \")\"\n\n description = re.sub(\n r\"[<]?([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)[>]?\"\n r\"( \\(https:\\/\\/app.hatchbuck.com\\/\\/Contact\\/ContactDetail\\?eid=[a-zA-Z0-9]+\\))?\",\n repl,\n ticket.fields.description,\n )\n if description != ticket.fields.description:\n logging.warning(\"%s new description: %s\", ticket, description)\n if not noop:\n ticket.update(description=description)", "def email_reply_to_address(self) -> ConfigNodePropertyString:\n return self._email_reply_to_address", "def strip_mail_ext_address(mail, delimiters=None):\n\n if not delimiters:\n delimiters = settings.RECIPIENT_DELIMITERS\n\n (_orig_user, _domain) = mail.split('@', 1)\n for delimiter in delimiters:\n if delimiter in _orig_user:\n (_user, _ext) = _orig_user.split(delimiter, 1)\n return _user + '@' + _domain\n\n return mail", "def rereference(edf: mne.io.RawArray, desired_ref: str, current_ref: str=None, pick_chans: list=None) -> Tuple[mne.io.RawArray, str]:\n if pick_chans is None:\n chans = edf.ch_names\n else:\n chans = pick_chans\n if current_ref == desired_ref:\n return edf, current_ref\n\n if desired_ref in ['linked_ear'] and 'M1' not in chans or 'M2' not in chans:\n warnings.warn('Trying to reference to linked ear, but missing M1 and M2 channels. EEG file will not be re-referenced', EEGWarning)\n return edf, current_ref\n\n\n if current_ref == 'contra_mastoid':\n to_reref = [ch for ch in chans if ch not in ['M1','M2']]\n left = [ch for ch in to_reref if len([n for n in ['1','3','5','7','9'] if n in ch])>0]\n right = [ch for ch in to_reref if len([n for n in ['2','4','6','8','10'] if n in ch])>0]\n if len(left) > 0 and 'M2' not in chans:\n warnings.warn(\n 'Trying to reference to left channels to M2 ear, but missing M2 channel. left channels cannot be unreferenced')\n left_ref = []\n left = []\n else:\n left_ref = ['M2'] * len(left)\n if len(right) > 0 and 'M1' not in chans:\n warnings.warn(\n 'Trying to reference to right channels to M1 ear, but missing M1 channel. right channels cannot be unreferenced')\n right_ref = []\n right = []\n else:\n right_ref = ['M1'] * len(right)\n edf = edf.apply_function(lambda x: -x, picks=['M1', 'M2'])\n edf = mne.set_bipolar_reference(edf, left+right, left_ref+right_ref, drop_refs=False, verbose=False)\n edf = edf.drop_channels(left + right)\n edf.rename_channels({ch: ch.split('-')[0] for ch in edf.ch_names})\n edf = edf.apply_function(lambda x: -x, picks=['M1', 'M2'])\n\n ref_type = desired_ref\n if desired_ref == 'contra_mastoid':\n if current_ref == 'linked_ear':\n edf = edf.apply_function(lambda x: -x, picks=['M1','M2'])\n edf, _ = mne.set_eeg_reference(edf, ref_channels=['M1', 'M2'], verbose=False)\n edf = edf.apply_function(lambda x: -x, picks=['M1', 'M2'])\n to_reref = [ch for ch in chans if ch not in ['M1','M2']]\n left = [ch for ch in to_reref if len([n for n in ['1','3','5','7','9','z'] if n in ch])>0]\n right = [ch for ch in to_reref if len([n for n in ['2','4','6','8','10'] if n in ch])>0]\n if len(left) > 0 and 'M2' not in chans:\n warnings.warn(\n 'Trying to reference to left channels to M2 ear, but missing M2 channel. left channels will not be re-referenced')\n left_ref = []\n left = []\n ref_type = 'contra_right_only'\n else:\n left_ref = ['M2'] * len(left)\n if len(right) > 0 and 'M1' not in chans:\n warnings.warn(\n 'Trying to reference to right channels to M1 ear, but missing M1 channel. right channels will not be re-referenced')\n right_ref = []\n right = []\n ref_type = 'contra_left_only'\n else:\n right_ref = ['M1'] * len(right)\n edf = mne.set_bipolar_reference(edf, left+right, left_ref+right_ref, drop_refs=False, verbose=False)\n edf = edf.drop_channels(left + right)\n edf.rename_channels({ch:ch.split('-')[0] for ch in edf.ch_names})\n elif desired_ref == 'linked_ear':\n edf, _ = mne.set_eeg_reference(edf, ref_channels=['M1','M2'], verbose=False)\n else:\n edf, _ = mne.set_eeg_reference(edf, ref_channels=desired_ref, verbose=False)\n\n return edf, ref_type", "def resolve_one_reference(key, val, fmt, meta):\n\n if key == \"Link\":\n text = stringify(val[1])\n target = val[2][0]\n m = re.match(r\"#(.+)$\", target)\n if m:\n # pandoc automatically makes labels for headings.\n label = m.group(1).lower()\n label = re.sub(r\"[^\\w-]+\", \"\", label) # Strip HTML entities\n text = re.sub(r\"_\", r\"\\_\", text) # Escape underscores in display text\n return RawInline(\"tex\", rf\"\\hyperref[{label}]{{{text}}}\")\n\n # Other elements will be returned unchanged.", "def preProcess(email: str):\n # Make entire email to lower case\n email = email.lower()\n \n # Strip html tags (strings that look like <blah> where 'blah' does not\n # contain '<' or '>')... replace with a space\n email = re.sub('<[^<>]+>', ' ', email)\n\n # Replace any number with a string 'number'\n email = re.sub('[0-9]+', 'number', email)\n\n # Anything starting with http or https:// replaced with 'httpaddr'\n email = re.sub('(http|https)://[^\\s]*', 'httpaddr', email)\n\n # Strings with \"@\" in the middle are considered emails --> 'emailaddr'\n email = re.sub('[^\\s]+@[^\\s]+', 'emailaddr', email)\n\n # Replace $ with 'dollar'\n email = re.sub('[$]+' , 'dollar', email)\n\n return email", "def fix_refuris(self, tree):\r\n fname = \"__\" + self.config.master_doc + \"__\"\r\n for refnode in tree.traverse(nodes.reference):\r\n if 'refuri' not in refnode:\r\n continue\r\n refuri = refnode['refuri']\r\n hashindex = refuri.find('#')\r\n if hashindex < 0:\r\n continue\r\n hashindex = refuri.find('#', hashindex + 1)\r\n if hashindex >= 0:\r\n refnode['refuri'] = fname + refuri[hashindex:]", "def marking_ref(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"marking_ref\")", "def apply_addr_mask(mask, val):\n b = [d for d in format(val, \"036b\")]\n for i, d in enumerate(mask):\n if d == \"X\":\n b[i] = \"X\"\n if d == \"1\":\n b[i] = \"1\"\n return gen_addrs(\"\".join(b))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign numbers to autonumbered footnotes. For labeled autonumbered footnotes, copy the number over to corresponding footnote references.
def number_footnotes(self, startnum): for footnote in self.document.autofootnotes: while True: label = str(startnum) startnum += 1 if label not in self.document.nameids: break footnote.insert(0, nodes.label('', label)) for name in footnote['names']: for ref in self.document.footnote_refs.get(name, []): ref += nodes.Text(label) ref.delattr('refname') assert len(footnote['ids']) == len(ref['ids']) == 1 ref['refid'] = footnote['ids'][0] footnote.add_backref(ref['ids'][0]) self.document.note_refid(ref) ref.resolved = 1 if not footnote['names'] and not footnote['dupnames']: footnote['names'].append(label) self.document.note_explicit_target(footnote, footnote) self.autofootnote_labels.append(label) return startnum
[ "def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn", "def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta", "def _ExpandFootnotes(self, filename, json_val):\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes", "def makeFootnoteRefId(self, id):\r\n return 'fnref:%s' % id", "def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]", "def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()", "def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote", "def set_n(self, temp, n):\n self.temp_dict[temp]['n'] = n", "def set_name_notes(ibs, nid_list, notes_list):\n ibsfuncs.assert_lblannot_rowids_are_type(ibs, nid_list, ibs.lbltype_ids[constants.INDIVIDUAL_KEY])\n ibs.set_lblannot_notes(nid_list, notes_list)", "def renumber(apps, schema_editor):\n # Issue = apps.get_model('issues', 'Issue')\n from apps.issues.models import Issue\n Issue.objects.renumber()", "def write_note(note, data, current_point, counts_num):\n column = np.where(data[0] == note)[0]\n data[current_point:current_point + counts_num, column] = 1", "def reindex(self):\n for idx, line in enumerate(self.line_map):\n line.index = idx\n if line.annotations:\n for x in line.annotations:\n x.line_num = idx", "def add_motif_counts(self, list_of_counts):\n self.motif_counts = list_of_counts", "def offset_references(self, offset: int) -> None:\n self.stream_dict.offset_references(offset)\n self.object_number += offset", "def makeFootnoteId(self, id):\r\n return 'fn:%s' % id", "def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1", "def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div", "def assign_family_tree_internal_IDs(family_tree):\n internal_id = 1\n for key in keys_sorted_by_weight(family_tree):\n node = family_tree[key]\n node['.id'] = internal_id\n internal_id += 1\n # be sure that the DEFAULT_NODE_KEY has the id DEFAULT_NODE_ID\n node = family_tree[DEFAULT_NODE_KEY]\n offset = node['.id']\n for node in family_tree.itervalues():\n node['.id'] += (DEFAULT_NODE_ID - offset)", "def set_elems_number(self, elems_number):\n assert len(elems_number) == self.natoms\n self.elems = [elements.number.keys()[i] for i in elems_number]\n return", "def apply_pseudocounts(cls, motif, pseudocounts):\n if pseudocounts is not None:\n if pseudocounts == \"jaspar\":\n pseudocounts = motifs.jaspar.calculate_pseudocounts(motif)\n motif.pseudocounts = pseudocounts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign numbers to autonumbered footnote references.
def number_footnote_references(self, startnum): i = 0 for ref in self.document.autofootnote_refs: if ref.resolved or ref.hasattr('refid'): continue try: label = self.autofootnote_labels[i] except IndexError: msg = self.document.reporter.error( 'Too many autonumbered footnote references: only %s ' 'corresponding footnotes available.' % len(self.autofootnote_labels), base_node=ref) msgid = self.document.set_id(msg) for ref in self.document.autofootnote_refs[i:]: if ref.resolved or ref.hasattr('refname'): continue prb = nodes.problematic( ref.rawsource, ref.rawsource, refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) ref.replace_self(prb) break ref += nodes.Text(label) id = self.document.nameids[label] footnote = self.document.ids[id] ref['refid'] = id self.document.note_refid(ref) assert len(ref['ids']) == 1 footnote.add_backref(ref['ids'][0]) ref.resolved = 1 i += 1
[ "def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta", "def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn", "def makeFootnoteRefId(self, id):\r\n return 'fnref:%s' % id", "def References(self, document):\n self.sequence = list(map(document.Reference, self.sequence))", "def offset_references(self, offset: int) -> None:\n self.stream_dict.offset_references(offset)\n self.object_number += offset", "def pdf_footnote(self, pdf_footnote):\n\n self._pdf_footnote = pdf_footnote", "def _ExpandFootnotes(self, filename, json_val):\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes", "def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]", "def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()", "def owner_references(self, owner_references):\n\n self._owner_references = owner_references", "def renumber(apps, schema_editor):\n # Issue = apps.get_model('issues', 'Issue')\n from apps.issues.models import Issue\n Issue.objects.renumber()", "def set_n(self, temp, n):\n self.temp_dict[temp]['n'] = n", "def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1", "def optSetRefNr(*args):\n return _optcc.optSetRefNr(*args)", "def __mixing_references_on_n(self, list_refs):\n all = {x: [0, ''] for x in set.union(*map(set, list_refs))}\n for ref in list_refs:\n for word, origin in ref.items():\n all[word][0] += 1\n all[word][1] = origin\n return {word: origin for word, (count, origin) in all.items() if count >= self.mix}", "def register_reference(target, count=1):", "def makeFootnoteId(self, id):\r\n return 'fn:%s' % id", "def update_reference(self, index, uri, text):\n el = self.xpath('./person/ref')[index]\n assert el.tag == 'ref' #check sanity\n el.set('target', uri)\n el.text = text\n return el", "def set_name_notes(ibs, nid_list, notes_list):\n ibsfuncs.assert_lblannot_rowids_are_type(ibs, nid_list, ibs.lbltype_ids[constants.INDIVIDUAL_KEY])\n ibs.set_lblannot_notes(nid_list, notes_list)", "def set_reference_index(self, value): # pragma: no cover\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add symbols indexes to "[]"style footnotes and references.
def symbolize_footnotes(self): labels = [] for footnote in self.document.symbol_footnotes: reps, index = divmod(self.document.symbol_footnote_start, len(self.symbols)) labeltext = self.symbols[index] * (reps + 1) labels.append(labeltext) footnote.insert(0, nodes.label('', labeltext)) self.document.symbol_footnote_start += 1 self.document.set_id(footnote) i = 0 for ref in self.document.symbol_footnote_refs: try: ref += nodes.Text(labels[i]) except IndexError: msg = self.document.reporter.error( 'Too many symbol footnote references: only %s ' 'corresponding footnotes available.' % len(labels), base_node=ref) msgid = self.document.set_id(msg) for ref in self.document.symbol_footnote_refs[i:]: if ref.resolved or ref.hasattr('refid'): continue prb = nodes.problematic( ref.rawsource, ref.rawsource, refid=msgid) prbid = self.document.set_id(prb) msg.add_backref(prbid) ref.replace_self(prb) break footnote = self.document.symbol_footnotes[i] assert len(footnote['ids']) == 1 ref['refid'] = footnote['ids'][0] self.document.note_refid(ref) footnote.add_backref(ref['ids'][0]) i += 1
[ "def prepare_symbols(self):", "def extract_footnotes(kanji: dict):\n n = 0\n fn = {}\n for field in FN_FIELDS:\n notes = re.findall(FOOTNOTE, kanji[field])\n for note in notes:\n fn[note] = n\n kanji[field] = re.sub(note, f\"{n}\", kanji[field])\n n += 1\n\n kanji[\"footnotes\"] = fn", "def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]", "def add_symbol(self):\r\n for j in range(len(self.atom)):\r\n self.ax.text(\r\n self.coord[j][0] + 0.1,\r\n self.coord[j][1] + 0.1,\r\n self.coord[j][2] + 0.1,\r\n f\"{self.atom[j]},{j}\",\r\n fontsize=9,\r\n )", "def _ExpandFootnotes(self, filename, json_val):\n footnotes = []\n with self.getter.Fetch(filename) as f:\n reader = DictReader(f)\n for row in reader:\n row['@type'] = 'StatisticalAnnotation'\n row['@id'] = GetSchemaId(json_val) + '#footnote='\n row['@id'] += row['codeValue']\n row['dataset'] = GetSchemaId(json_val)\n footnotes.append(row)\n return footnotes", "def reset(self):\r\n self.footnotes = markdown.odict.OrderedDict()", "def add_symbols(self, symbols):\n for symbol in symbols:\n self.add_symbol(symbol)", "def show_refs(index):\n indent = \" : \"\n for ref, defn in index.links:\n print(format_ref(ref))\n if defn:\n print(indent, defn.format())\n for loc in index.locs[defn.id]:\n print(indent, format_def_with_location(defn, loc.location))\n else:\n print(indent, \"None\")\n continue", "def _set_tag_indexes(self):\n self.opening_lexes = {}\n self.closing_lexes = {}\n for l in self.lexes:\n self.opening_lexes[l[0]] = l\n self.closing_lexes[l[1]] = l\n self.opening_sents = {}\n self.closing_sents = {}\n for s in self.sentences:\n self.opening_sents[s[0]] = s\n self.closing_sents[s[1]] = s", "def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta", "def add_info(lines:list, infos:list, start_index:int):\n\tfor line, info in zip(lines,infos):\n\t\tend_index = start_index + len(info)\n\t\tline.extend(['']*(end_index-len(line)))\n\t\tline[start_index:end_index] = info", "def generate_head2(head2, index, link_index):\n if head2[:2] == '##':\n head2 = head2[2:]\n \n return f'\\t{index}. [{head2}](#p{link_index})'", "def testFootnotes(self, b, u):\n rx = re.compile(r'\\\\f \\+ [^\\\\][^f][^r]')\n if not rx.search(u) == None:\n print('Footnote without back reference in: ' + b)", "def add_escapement_back_in_unit_ref(unit_name):\n escaped_text = \"\"\n for c in unit_name:\n if ( c == ESCAPE_SYM or is_boundary_sym(c) or is_comment_sym(c)\n or is_unit_ref_modifier_sym(c)):\n escaped_text += ESCAPE_SYM + c\n else:\n escaped_text += c\n return add_escapement_back_for_not_comments(escaped_text)", "def OutputSymbolExtraLinks(symbol):\n desc = ''\n\n if False: # NEW FEATURE: needs configurability\n sstr = uri_escape(symbol)\n mstr = uri_escape(MODULE)\n desc += '''<ulink role=\"extralinks\" url=\"http://www.google.com/codesearch?q=%s\">code search</ulink>\n<ulink role=\"extralinks\" url=\"http://library.gnome.org/edit?module=%s&amp;symbol=%s\">edit documentation</ulink>\n''' % (sstr, mstr, sstr)\n\n return desc", "def OutputSymbolTraits(symbol):\n\n desc = ''\n\n if symbol in Since:\n link_id = \"api-index-\" + Since[symbol]\n desc += \"<para role=\\\"since\\\">Since: <link linkend=\\\"%s\\\">%s</link></para>\" % (link_id, Since[symbol])\n\n if symbol in StabilityLevel:\n stability = StabilityLevel[symbol]\n if stability in AnnotationDefinition:\n AnnotationsUsed[stability] = True\n stability = \"<acronym>%s</acronym>\" % stability\n desc += \"<para role=\\\"stability\\\">Stability Level: %s</para>\" % stability\n return desc", "def glue_notes(notes, add_marks=True):\n all_notes=[]\n for n in notes:\n if add_marks:\n all_notes=all_notes+[350]+list(n)+[351] \n else:\n all_notes=all_notes+list(n)\n return np.array(all_notes)", "def add_symbols(self, symbols: List[Symbol]):\n if len(symbols) == 0:\n return\n for symbol in symbols:\n self.add_symbol(symbol)", "def add_all_refs(self, line):\n # find lone strings with no brackets\n p = re.compile(r'.*\\:\\s*([^\\s\\[\\]]+).*')\n self.add_ref_pattern(p, line)\n # find objects in one or more bracket sets with possible first token and comma\n p = re.compile(r'.*\\[(?:(.*),\\s*)?((?:\\[??[^\\[]*?))\\]')\n self.add_ref_pattern(p, line)\n p = re.compile(r'.*Optional\\[Union\\[([^,]+)')\n self.add_ref_pattern(p, line)\n return line", "def list_symbols(self) -> str:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Link manuallylabeled footnotes and citations to/from their references.
def resolve_footnotes_and_citations(self): for footnote in self.document.footnotes: for label in footnote['names']: if label in self.document.footnote_refs: reflist = self.document.footnote_refs[label] self.resolve_references(footnote, reflist) for citation in self.document.citations: for label in citation['names']: if label in self.document.citation_refs: reflist = self.document.citation_refs[label] self.resolve_references(citation, reflist)
[ "def _generate_biblio_ref_content(self, doc, out_buffer):\n out_buffer.write(\"\\nDocument contains the following Bibliography References:\\n\")\n\n for biblio_ref in doc.get_biblio_refs():\n out_buffer.write(\"- Reference to [{}]\\n\".format(biblio_ref.get_name()))", "def result_nodes(\n self,\n document: \"docutils.nodes.document\",\n env: \"BuildEnvironment\",\n node: \"docutils.nodes.Element\",\n is_ref: bool,\n ) -> Tuple[List[\"docutils.nodes.Node\"], List[\"docutils.nodes.system_message\"]]:\n if not node.get(\"refdomain\"):\n assert node[\"reftype\"] == \"footcite\"\n node[\"refdomain\"] = \"footcite\"\n node[\"reftype\"] = \"p\"\n foot_domain = cast(\"BibtexFootDomain\", self.env.get_domain(\"footcite\"))\n keys = [key.strip() for key in self.target.split(\",\")] # type: ignore\n try:\n foot_bibliography = env.temp_data[\"bibtex_foot_bibliography\"]\n except KeyError:\n env.temp_data[\n \"bibtex_foot_bibliography\"\n ] = foot_bibliography = foot_domain.bibliography_header.deepcopy()\n foot_old_refs = env.temp_data.setdefault(\"bibtex_foot_old_refs\", set())\n foot_new_refs = env.temp_data.setdefault(\"bibtex_foot_new_refs\", set())\n style = find_plugin(\n \"pybtex.style.formatting\", self.config.bibtex_default_style\n )()\n references = []\n domain = cast(\"BibtexDomain\", self.env.get_domain(\"cite\"))\n # count only incremented at directive, see foot_directives run method\n footbibliography_count = env.temp_data.setdefault(\n \"bibtex_footbibliography_count\", 0\n )\n footcite_names = env.temp_data.setdefault(\"bibtex_footcite_names\", {})\n for key in keys:\n entry = domain.bibdata.data.entries.get(key)\n if entry is not None:\n formatted_entry = style.format_entry(label=\"\", entry=entry)\n if key not in (foot_old_refs | foot_new_refs):\n footnote = docutils.nodes.footnote(auto=1)\n # no automatic ids for footnotes: force non-empty template\n template: str = (\n env.app.config.bibtex_footcite_id\n if env.app.config.bibtex_footcite_id\n else \"footcite-{key}\"\n )\n raw_id = template.format(\n footbibliography_count=footbibliography_count + 1, key=entry.key\n )\n # format name with make_id for consistency with cite role\n name = make_id(raw_id)\n footnote[\"names\"] += [name]\n footcite_names[entry.key] = name\n footnote += domain.backend.paragraph(formatted_entry)\n document.note_autofootnote(footnote)\n document.note_explicit_target(footnote, footnote)\n node_text_transform(footnote)\n foot_bibliography += footnote\n foot_new_refs.add(key)\n references.append(\n (\n entry,\n formatted_entry,\n FootReferenceInfo(\n key=entry.key,\n refname=footcite_names[entry.key],\n document=document,\n ),\n )\n )\n else:\n logger.warning(\n 'could not find bibtex key \"%s\"' % key,\n location=(env.docname, self.lineno),\n type=\"bibtex\",\n subtype=\"key_not_found\",\n )\n ref_nodes = format_references(\n foot_domain.reference_style, node[\"reftype\"], references\n ).render(domain.backend)\n return ref_nodes, []", "def add_pdf_references(text, document):\n document.get_pdf_url()\n\n def replace_docs(_):\n return '<a href=%s>(ver documento original)</a>' % document.get_pdf_url()\n\n text = re.sub('\\(ver documento original\\)', replace_docs, text)\n\n return text", "def _resolve_references(self):\n self._log_msg(\"Processing inline citations\", level=3)\n for paragraph in self.parsed_xml.findall('/body/sec/p'):\n self._process_node_for_references(paragraph)", "def test_anchorRef(self):\r\n listing = Element('a')\r\n listing.setAttribute('href', 'http://example.com/foo')\r\n self.spitter.visitNode(listing)\r\n self.assertEqual(\r\n ''.join(self.output),\r\n \"\\\\footnote{http://example.com/foo}\")", "def show_refs(index):\n indent = \" : \"\n for ref, defn in index.links:\n print(format_ref(ref))\n if defn:\n print(indent, defn.format())\n for loc in index.locs[defn.id]:\n print(indent, format_def_with_location(defn, loc.location))\n else:\n print(indent, \"None\")\n continue", "def process_faqrefs(app, doctree):\n process_blocrefs_generic(\n app, doctree, bloc_name=\"faqref\", class_node=faqref_node)", "def make_pdf_link(self):\n return", "def references_to_markdown(references):\n\n pybtex_style = find_plugin('pybtex.style.formatting', 'plain')()\n pybtex_md_backend = find_plugin('pybtex.backends', 'markdown')\n pybtex_parser = Parser()\n\n # hack to not print labels (may remove this later)\n def write_entry(self, key, label, text):\n self.output(u'%s \\n' % text)\n pybtex_md_backend.write_entry = write_entry\n pybtex_md_backend = pybtex_md_backend()\n\n data = pybtex_parser.parse_stream(StringIO(references))\n data_formatted = pybtex_style.format_entries(data.entries.itervalues())\n output = StringIO()\n pybtex_md_backend.write_to_stream(data_formatted, output)\n\n # add blockquote style\n references_md = '> {}'.format(output.getvalue())\n references_md.replace('\\n', '\\n> ')\n\n return references_md", "def resolve_one_reference(key, val, fmt, meta):\n\n if key == \"Link\":\n text = stringify(val[1])\n target = val[2][0]\n m = re.match(r\"#(.+)$\", target)\n if m:\n # pandoc automatically makes labels for headings.\n label = m.group(1).lower()\n label = re.sub(r\"[^\\w-]+\", \"\", label) # Strip HTML entities\n text = re.sub(r\"_\", r\"\\_\", text) # Escape underscores in display text\n return RawInline(\"tex\", rf\"\\hyperref[{label}]{{{text}}}\")\n\n # Other elements will be returned unchanged.", "def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):\n # TODO Deal with xref so that they keep the proper path. Atm it'll just strip the path and leave only the id\n file_to_id_map = info['file_to_id_map']\n current_dir = cwd or os.path.dirname(src_file)\n cleaned_content = remove_conditional_content(content, info, tag=tag)\n links = LINKS_RE.finditer(cleaned_content)\n\n for link in links:\n link_text = link.group(0)\n link_file = link.group(1)\n link_anchor = link.group(2)\n link_title = link.group(3)\n\n if link_file is not None:\n fixed_link_file = link_file.replace(\".html\", \".adoc\")\n fixed_link_file_abs = os.path.abspath(os.path.join(current_dir, fixed_link_file))\n if fixed_link_file_abs in file_to_id_map:\n if fixed_link_file_abs.startswith(book_dir + os.sep) or fixed_link_file_abs == src_file:\n # We are dealing with a cross reference within the same book here\n if link_anchor is None:\n # Cross reference to the top of a topic, without an id being specified\n link_anchor = \"#\" + file_to_id_map[fixed_link_file_abs]\n\n fixed_link = \"xref:\" + link_anchor.replace(\"#\", \"\") + link_title\n else:\n # We are dealing with a cross reference to another book here\n external_link = EXTERNAL_LINK_RE.search(link_file)\n book_dir_name = external_link.group(1)\n\n # Find the book name\n book_name = book_dir_name\n for book in info['data']:\n if check_node_distro_matches(book, info['distro']) and book['Dir'] == book_dir_name:\n book_name = book['Name']\n break\n\n fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)\n\n if link_anchor is None:\n fixed_link = \"link:\" + fixed_link_file + \"#\" + file_to_id_map[fixed_link_file_abs] + link_title\n else:\n fixed_link = \"link:\" + fixed_link_file + link_anchor + link_title\n else:\n # Cross reference or link that isn't in the docs suite\n fixed_link = link_text\n if EXTERNAL_LINK_RE.search(link_file) is not None:\n rel_src_file = src_file.replace(os.path.dirname(book_dir) + \"/\", \"\")\n has_errors = True\n log.error(\"ERROR (%s): \\\"%s\\\" appears to try to reference a file not included in the \\\"%s\\\" distro\", rel_src_file, link_text.replace(\"\\n\", \"\"), info['distro'])\n sys.exit(-1)\n else:\n fixed_link = \"xref:\" + link_anchor.replace(\"#\", \"\") + link_title\n\n content = content.replace(link_text, fixed_link)\n\n return content", "def Footnote(self, footnotes):\n xbl = .05 # bottom left in inches\n ybl = .05 # bottom left in inches\n lsp = .20 # Line spacing in inches.\n x = xbl/self.width\n y = (ybl + len(footnotes)*lsp)/self.height\n delta = lsp/self.height\n for footnote in footnotes:\n self.fig.text(x, y, footnote, size='large')\n y -= delta", "def makeFootnotesDiv (self, doc) :\r\n\r\n if not self.footnotes.keys() :\r\n return None\r\n\r\n div = doc.createElement(\"div\")\r\n div.setAttribute('class', 'footnote')\r\n hr = doc.createElement(\"hr\")\r\n div.appendChild(hr)\r\n ol = doc.createElement(\"ol\")\r\n div.appendChild(ol)\r\n\r\n footnotes = [(self.used_footnotes[id], id)\r\n for id in self.footnotes.keys()]\r\n footnotes.sort()\r\n\r\n for i, id in footnotes :\r\n li = doc.createElement('li')\r\n li.setAttribute('id', self.makeFootnoteId(i))\r\n\r\n self.md._processSection(li, self.footnotes[id].split(\"\\n\"), looseList=1)\r\n\r\n #li.appendChild(doc.createTextNode(self.footnotes[id]))\r\n\r\n backlink = doc.createElement('a')\r\n backlink.setAttribute('href', '#' + self.makeFootnoteRefId(i))\r\n backlink.setAttribute('class', 'footnoteBackLink')\r\n backlink.setAttribute('title',\r\n 'Jump back to footnote %d in the text' % 1)\r\n backlink.appendChild(doc.createTextNode(FN_BACKLINK_TEXT))\r\n\r\n if li.childNodes :\r\n node = li.childNodes[-1]\r\n if node.type == \"text\" :\r\n\t\t li.appendChild(backlink)\r\n\t\telif node.nodeName == \"p\":\r\n node.appendChild(backlink)\r\n\t\telse:\r\n\t\t p = doc.createElement('p')\r\n\t\t p.appendChild(backlink)\r\n\t\t li.appendChild(p)\r\n\r\n ol.appendChild(li)\r\n\r\n return div", "def format_link(self, ind):", "def _process_biblio_ref(self, item):\n a_ref = nodes.Reference()\n logger = logging.getLogger(self.__class__.__name__)\n\n a_ref.set_name(item[\"name\"])\n a_ref.set_title(item.get(\"title\", \"\"))\n a_ref.set_organization(item.get(\"organization\", \"\"))\n a_ref.set_category(item.get(\"category\", \"\"))\n a_ref.set_date(item.get(\"date\", \"\"))\n\n logger.debug(\"Processing Bibliography Reference: \\\"{}\\\"\".format(a_ref.get_name()))\n\n if \"hyperlink\" in item:\n if isinstance(item[\"hyperlink\"], list):\n for hyperlink_item in item[\"hyperlink\"]:\n a_ref.add_hyperlink(hyperlink_item)\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(hyperlink_item))\n else:\n a_ref.add_hyperlink(item[\"hyperlink\"])\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(item[\"hyperlink\"]))\n\n return a_ref", "def reference_section(self):\n reference = self.study.get('reference', \"\")\n\n # Allow \"0001111\", \"PMID:0001111\", \"PMID: 0001111\"\n pmid = self.study.get('PMID', \"\").split(':')[-1].strip()\n\n if not (reference or pmid):\n return \"\"\n\n if pmid:\n pmid = 'http://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(pmid)\n return dedent(\n \"\"\"\n Reference\n ---------\n {0}\n\n {1}\n \"\"\").format(reference, pmid)", "def process_link(\n self,\n env: BuildEnvironment,\n refnode: Element,\n has_explicit_target: bool,\n title: str,\n target: str,\n ) -> Tuple[str, str]:\n\n if has_explicit_target:\n note_path = target\n else:\n filename = ws_re.sub(\"_\", target).casefold()\n note_path = str(Path(\"/documents\", filename))\n\n return title, note_path", "def _replace_references(self, references):\n el_references = self.get_root().xpath('./person/ref')\n for el_ref in el_references:\n el_ref.getparent().remove(el_ref)\n for url, text in references:\n self.add_reference(uri=url, text=text)", "def _do_links(self, text):\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\n\n # `anchor_allowed_pos` is used to support img links inside\n # anchors, but not anchors inside anchors. An anchor's start\n # pos must be `>= anchor_allowed_pos`.\n anchor_allowed_pos = 0\n\n curr_pos = 0\n while True: # Handle the next link.\n # The next '[' is the start of:\n # - an inline anchor: [text](url \"title\")\n # - a reference anchor: [text][id]\n # - an inline img: ![text](url \"title\")\n # - a reference img: ![text][id]\n # - a footnote ref: [^id]\n # (Only if 'footnotes' extra enabled)\n # - a footnote defn: [^id]: ...\n # (Only if 'footnotes' extra enabled) These have already\n # been stripped in _strip_footnote_definitions() so no\n # need to watch for them.\n # - a link definition: [id]: url \"title\"\n # These have already been stripped in\n # _strip_link_definitions() so no need to watch for them.\n # - not markup: [...anything else...\n try:\n try:\n start_idx = text.index('[[', curr_pos)\n is_img=False\n except:\n start_idx = text.index('{{', curr_pos)\n is_img=True\n except ValueError:\n break\n\n text_length = len(text)\n\n # Find the matching closing ']]' or '}}'.\n bracket_depth = 0\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\n text_length)):\n ch = text[p:p+2]\n if ch in [']]', '}}']:\n bracket_depth -= 1\n if bracket_depth < 0:\n break\n elif ch in ['[[', '{{']:\n bracket_depth += 1\n else:\n # Closing bracket not found within sentinel length.\n # This isn't markup.\n curr_pos = start_idx + 1\n continue\n link_text = text[start_idx+2:p]\n\n # Now determine what this is by the remainder.\n p += 1\n if p == text_length:\n return text\n\n if is_img:\n\n ########## syntax: link ##############\n result_head = '![%s]' % link_text\n result = '%s(%s)' % (result_head, link_text)\n text = text[:start_idx] + result + text[p+1:]\n ########## syntax: link END ##############\n\n elif start_idx >= anchor_allowed_pos:\n\n if '|' in link_text:\n link_re=re.compile('(.+)\\\\|(.+)',re.X | re.M)\n else:\n link_re=re.compile('(:|\\\\+|\\\\b)(.+)',re.X | re.M)\n\n m1=link_re.match(link_text)\n if m1 == None:\n url = \"\"\n link = link_text\n else:\n url,link=m1.groups()\n\n ########## syntax: link ##############\n result_head = '[%s]' % link\n url=parseLink(link, url, self.file)\n result = '%s(%s)' % (result_head, url)\n text = text[:start_idx] + result + text[p+1:]\n ########## syntax: link END ##############\n else:\n # Anchor not allowed here.\n curr_pos = start_idx + 1\n continue\n\n\n return text", "def update_links():\n hn_soup = get_hn_soup()\n hn_links = get_hn_links(hn_soup)\n store_links(hn_links)\n print ''\n for i in range(len(hn_links)):\n j = i+1\n print_link(j, hn_links[i][0], hn_links[i][1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up a lexical analyzer for `code` in `language`.
def __init__(self, code, language, tokennames='short'): self.code = code self.language = language self.tokennames = tokennames self.lexer = None # get lexical analyzer for `language`: if language in ('', 'text') or tokennames == 'none': return if not with_pygments: raise LexerError('Cannot analyze code. ' 'Pygments package not found.') try: self.lexer = get_lexer_by_name(self.language) except pygments.util.ClassNotFound: raise LexerError('Cannot analyze code. ' 'No Pygments lexer found for "%s".' % language)
[ "def get_lexems(code):\n\n g.clear()\n lexer()\n g.lexer.input(code.lower())\n result = list(g.lexer)\n return g.error_list, result", "def __init__(self, lang='sl', type='standard'):\n if lang not in ['sl', 'hr', 'sr', 'bg', 'mk']:\n raise Exception(\"Reldi tokenizer is currently only allowed in Slovene, Croatian and Serbian pipelines.\")\n\n check_reldi()\n from classla.submodules.reldi_tokeniser import tokeniser\n self.nlp = tokeniser\n self.lang = lang\n self.type = type", "def fol_language():\n def make_symbols(start):\n \"\"\"E.g., if start='a', then returns ['a1', ..., 'a9', 'b1', ..., 'c9'].\"\"\"\n return [chr(ord(start) + i) + str(n)\n for i in range(0, 3)\n for n in range(1, 10)]\n\n return Language(\n collections.OrderedDict([\n (IDENTITY_SYMBOL, 0),\n (NEGATION_SYMBOL, 1),\n (AND_SYMBOL, 2),\n (OR_SYMBOL, 2),\n (XOR_SYMBOL, 2),\n (IMPLIES_SYMBOL, 2),\n (FOR_ALL_SYMBOL, 2),\n (EXISTS_SYMBOL, 2),\n (RELATION_SYMBOL.format(1), 2), # unary-relation\n (RELATION_SYMBOL.format(2), 3), # binary-relation\n ]),\n predicates=make_symbols('p'),\n constants=make_symbols('a'),\n variables=make_symbols('x'),\n )", "def get_lexer(self, language: str) -> Any:\n import pygments.lexers as lexers # type: ignore\n trace = 'coloring' in g.app.debug\n try:\n # #1520: always define lexer_language.\n lexer_name = 'python3' if language == 'python' else language\n lexer = lexers.get_lexer_by_name(lexer_name)\n except Exception:\n # One of the lexer's will not exist.\n # pylint: disable=no-member\n if trace and language not in self.unknown_languages:\n self.unknown_languages.append(language)\n g.trace(f\"\\nno pygments lexer for {language!r}. Using python 3 lexer\\n\")\n lexer = lexers.Python3Lexer()\n return lexer", "def changeLexer(self, language: str) -> None:\n c = self.c\n wrapper = c.frame.body.wrapper\n w = wrapper.widget # A Qsci.QsciSintilla object.\n self.lexer = self.lexersDict.get(language, self.nullLexer) # type:ignore\n w.setLexer(self.lexer)", "def set_lexer(self) -> Any:\n if self.language == 'patch':\n self.language = 'diff'\n key = f\"{self.language}:{id(self)}\"\n lexer = self.lexers_dict.get(key)\n if not lexer:\n lexer = self.get_lexer(self.language)\n lexer = self.patch_lexer(self.language, lexer)\n self.lexers_dict[key] = lexer\n return lexer", "def lex(code, lexer):\n try:\n return lexer.get_tokens(code)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.lexer import RegexLexer\n if isinstance(lexer, type) and issubclass(lexer, RegexLexer):\n raise TypeError('lex() argument must be a lexer instance, '\n 'not a class')\n raise", "def configure_lexer(self) -> None:\n # c = self.leo_c\n lexer = self\n # To do: use c.config setting.\n # pylint: disable=no-member\n font = QtGui.QFont(\"DejaVu Sans Mono\", 14)\n lexer.setFont(font)", "def __init__(self, language, parent=None):\n super(PreferencesLexer, self).__init__(parent)\n \n # These default font families are taken from QScintilla\n if Globals.isWindowsPlatform():\n self.__defaultFontFamily = \"Courier New\"\n elif Globals.isMacPlatform():\n self.__defaultFontFamily = \"Courier\"\n else:\n self.__defaultFontFamily = \"Bitstream Vera Sans Mono\"\n \n # instantiate a lexer object for the given language\n import QScintilla.Lexers\n self.__lex = QScintilla.Lexers.getLexer(language)\n if self.__lex is None:\n raise PreferencesLexerLanguageError(language)\n \n # read the last stored values from preferences file\n self.__lex.readSettings(Preferences.Prefs.settings, \"Scintilla\")\n if self.__lex.hasSubstyles():\n self.__lex.loadSubstyles()", "def analyze(app):\n # Normalize config values:\n source_paths = [app.config.js_source_path] if isinstance(app.config.js_source_path, str) else app.config.js_source_path\n abs_source_paths = [normpath(join(app.confdir, path)) for path in source_paths]\n root_for_relative_paths = root_or_fallback(\n normpath(join(app.confdir, app.config.root_for_relative_js_paths)) if app.config.root_for_relative_js_paths else None,\n abs_source_paths)\n\n # Pick analyzer:\n try:\n analyzer = {'javascript': JsAnalyzer,\n 'typescript': TsAnalyzer}[app.config.js_language]\n except KeyError:\n raise SphinxError('Unsupported value of js_language in config: %s' % app.config.js_language)\n\n # Analyze source code:\n app._sphinxjs_analyzer = analyzer.from_disk(abs_source_paths,\n app,\n root_for_relative_paths)", "def create_test_language(name: str, code: str) -> Language:\r\n lang = Language(name=name, code=code)\r\n lang.full_clean()\r\n lang.save()\r\n return lang", "def __init__(self, language):\n if language.lower() in self.languages_rev:\n self._language = language.lower()\n elif language.upper() in self.languages:\n self._language = self.languages[language.upper()]\n else:\n raise ValueError(\"No such language: %s\" % language)", "def __init__(self, lexeme, token_type, line_num):\n self.type = token_type\n self.lexeme = lexeme\n self.lineNum = line_num\n self.lexicalError = None", "def create_lexer(self):\n raise NotImplementedError()", "def prepare_scan():\n\n # Start a new grammar.\n grammar = LexicalGrammar()\n\n # Regular context.\n query = grammar.add_rule('query')\n\n # Whitespace characters and comments (discarded).\n query.add_token(r'''\n SPACE: [\\s]+ | [#] [^\\0\\r\\n]*\n ''', is_junk=True)\n\n # A sequence of characters encloses in single quotes.\n query.add_token(r'''\n STRING: ['] ( [^'\\0] | [']['] )* [']\n ''', unquote=(lambda t: t[1:-1].replace(\"''\", \"'\")))\n\n # An opening quote character without a closing quote.\n query.add_token(r'''\n BAD_STRING: [']\n ''', error=\"cannot find a matching quote mark\")\n\n # A number in exponential notation.\n query.add_token(r'''\n FLOAT: ( [0-9]+ ( [.] [0-9]* )? | [.] [0-9]+ ) [eE] [+-]? [0-9]+\n ''')\n\n # A number with a decimal point.\n query.add_token(r'''\n DECIMAL:\n [0-9]+ [.] [0-9]* | [.] [0-9]+\n ''')\n\n # An unsigned integer number.\n query.add_token(r'''\n INTEGER:\n [0-9]+\n ''')\n\n # A sequence of alphanumeric characters (not starting with a digit).\n query.add_token(r'''\n NAME: [\\w]+\n ''')\n\n # Operators and punctuation characters. The token code coincides\n # with the token value.\n query.add_token(r'''\n SYMBOL: [~] | [!][~] | [<][=] | [<] | [>][=] | [>] |\n [=][=] | [=] | [!][=][=] | [!][=] |\n [\\^] | [?] | [-][>] | [@] | [:][=] |\n [!] | [&] | [|] | [+] | [-] | [*] | [/] |\n [(] | [)] | [{] | [}] | [.] | [,] | [:] | [;] | [$]\n ''', is_symbol=True)\n\n # The `[` character starts an identity constructor.\n query.add_token(r'''\n LBRACKET:\n [\\[]\n ''', is_symbol=True, push='identity')\n\n # An unmatched `]`.\n query.add_token(r'''\n BAD_RBRACKET:\n [\\]]\n ''', error=\"cannot find a matching '['\")\n\n # The input end.\n query.add_token(r'''\n END: $\n ''', is_symbol=True, pop=1)\n\n # Identity constructor context.\n identity = grammar.add_rule('identity')\n\n # Whitespace characters (discarded).\n identity.add_token(r'''\n SPACE: [\\s]+\n ''', is_junk=True)\n\n # Start of a nested label group.\n identity.add_token(r'''\n LBRACKET:\n [\\[] | [(]\n ''', is_symbol=True, push='identity')\n\n # End of a label group or the identity constructor.\n identity.add_token(r'''\n RBRACKET:\n [\\]] | [)]\n ''', is_symbol=True, pop=1)\n\n # Label separator.\n identity.add_token(r'''\n SYMBOL: [.]\n ''', is_symbol=True)\n\n # Unquoted sequence of alphanumeric characters and dashes.\n identity.add_token(r'''\n LABEL: [\\w-]+\n ''')\n\n # A sequence of characters encloses in single quotes.\n identity.add_token(r'''\n STRING: ['] ( [^'\\0] | [']['] )* [']\n ''', unquote=(lambda t: t[1:-1].replace(\"''\", \"'\")))\n\n # An opening quote character without a closing quote.\n identity.add_token(r'''\n BAD_STRING: [']\n ''', error=\"cannot find a matching quote mark\")\n\n # A reference indicator.\n identity.add_token(r'''\n REFERENCE:\n [$]\n ''', is_symbol=True, push='name')\n\n # Unexpected end of input.\n identity.add_token(r'''\n END: $\n ''', error=\"cannot find a matching ']'\")\n\n # A context for an identifier following the `$` indicator\n # in an identity constructor. We need a separate rule because\n # `%NAME` and `%LABEL` productions intersect.\n name = grammar.add_rule('name')\n\n # Whitespace characters (discarded).\n name.add_token(r'''\n SPACE: [\\s]+\n ''', is_junk=True)\n\n # An integer number; not expected here, but ensures that the following\n # `%NAME` production does not start with a digit.\n name.add_token(r'''\n INTEGER:\n [0-9]+\n ''', pop=1)\n\n # A sequence of alphanumeric characters (not starting with a digit).\n name.add_token(r'''\n NAME: [\\w]+\n ''', pop=1)\n\n # Anything else.\n name.add_token(r'''\n OTHER: ()\n ''', is_junk=True, pop=1)\n\n # Add a `%DIRSIG` token in front of `+` and `-` direction indicators\n # to distinguish them from addition/subtraction operators.\n grammar.add_signal('''\n DIRSIG: ( `+` | `-` )+ ( `:` | `,` | `;` | `)` | `}` )\n ''')\n\n # Add `%PIPESIG` in front of `/:` pipe indicator to prevent it from\n # being recognized as a division operator.\n grammar.add_signal('''\n PIPESIG:\n `/` `:`\n ''')\n\n # Add `%LHSSIG` in front of a left-hand side of an assignment expression.\n grammar.add_signal('''\n LHSSIG: `$`? %NAME ( `.` `$`? %NAME )*\n ( `(` ( `$`? %NAME ( `,` `$`? %NAME )* `,`? )? `)` )?\n `:=`\n ''')\n\n # Generate and return the scanner.\n return grammar()", "def set_interpreter(self, name):\n self.lang.kill()\n\n try:\n self.lang=langtypes[name]()\n \n except ExecutableNotFoundError as e:\n\n print(e)\n\n self.lang = DummyInterpreter()\n\n s = \"Changing interpreted lanaguage to {}\".format(repr(self.lang))\n print(\"\\n\" + \"=\"*len(s))\n print(s)\n print(\"\\n\" + \"=\"*len(s))\n\n self.lang.start()\n\n return", "def lex(program_str):\n def lex_helper(program_str, acc):\n if program_str == \"\":\n return acc\n\n token, length = match_keywords(program_str, 0)\n if token != None:\n return lex_helper(program_str[length:], acc + [token])\n\n token, length = match_variable(program_str, 0)\n if token != None:\n return lex_helper(program_str[length:], acc + [token])\n\n token, length = match_int(program_str, 0)\n if token != None:\n return lex_helper(program_str[length:], acc + [token])\n\n if program_str[0] == DOUBLE_QUOTE:\n token, length = match_string(program_str, 0)\n return lex_helper(program_str[length:], acc + [token])\n\n if program_str[0] == SPACE:\n return lex_helper(program_str[1:], acc)\n\n raise TOKENIZATION_ERROR(\"Could not consume characters\")\n\n return lex_helper(preprocess(program_str), [])", "def prepare_context(grammar=None, lexer=None, lkt_file=None,\n warning_set=default_warning_set,\n symbol_canonicalizer=None, show_property_logging=False,\n types_from_lkt=False, lkt_semantic_checks=False,\n case_insensitive: bool = False,\n version: Optional[str] = None,\n build_date: Optional[str] = None,\n standalone: bool = False,\n property_exceptions: Set[str] = set()):\n\n # Have a clean build directory\n if P.exists('build'):\n shutil.rmtree('build')\n os.mkdir('build')\n\n # Try to emit code\n ctx = CompileCtx(lang_name='Foo', short_name='foo', lexer=lexer,\n grammar=grammar,\n symbol_canonicalizer=symbol_canonicalizer,\n show_property_logging=show_property_logging,\n lkt_file=lkt_file,\n types_from_lkt=types_from_lkt,\n lkt_semantic_checks=lkt_semantic_checks,\n case_insensitive=case_insensitive,\n version=version,\n build_date=build_date,\n standalone=standalone,\n property_exceptions=property_exceptions)\n ctx.warnings = warning_set\n ctx.pretty_print = pretty_print\n\n return ctx", "def language_mode_grammar(self, grammar_file):\n self.__rg__.json_loads(grammar_file)\n '''learn word2vec embedding'''\n w2v_folder = os.path.join(self.__lm_folder__, 'word2vec')\n self.__rg__.train_word2vec_model_by_prompt(15, 1, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(30, 1, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(50, 1, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(15, 0, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(30, 0, w2v_folder)\n self.__rg__.train_word2vec_model_by_prompt(50, 0, w2v_folder)\n\n if not os.path.exists(self.__lm_folder__):\n os.makedirs(self.__lm_folder__)\n\n '''learn ngram language models'''\n sample_folder = os.path.join(self.__lm_folder__, 'samples')\n if not os.path.exists(sample_folder):\n os.makedirs(sample_folder)\n ngram_lm_folder = os.path.join(self.__lm_folder__, 'ngrams')\n if not os.path.exists(ngram_lm_folder):\n os.makedirs(ngram_lm_folder)\n self.__rg__.train_response_lm(self.__ngram_count_file__, sample_folder, ngram_lm_folder, 5)\n '''learn LDA model'''\n lda_folder = os.path.join(self.__lm_folder__, 'lda')\n if not os.path.exists(lda_folder):\n os.makedirs(lda_folder)\n self.__rg__.train_lda_model(lda_folder, rare_threshold=2, topic_count=50)\n '''learn ngram language models of error sentences'''\n read_grammar.train_grammatical_error_lm(self.__ngram_count_file__, self.__ge_folder__, 5)", "def __init__(self, language=\"en-GB\", lang_dir=None):\n lang_dirs = [\"/usr/share/pico/lang/\", _LANG_DIR]\n if lang_dir:\n lang_dirs.insert(0, lang_dir)\n\n self.__e = None\n for ldir in lang_dirs:\n try:\n self.__e = ctts.engine_create(language_dir=ldir, language=language)\n except RuntimeError as ex:\n pass # Try next directory to find language...\n if self.__e:\n break\n\n if self.__e is None:\n raise RuntimeError(\"Could not instantiate TTS engine with language \" + language)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge subsequent tokens of same tokentype. Also strip the final newline (added by pygments).
def merge(self, tokens): tokens = iter(tokens) (lasttype, lastval) = next(tokens) for ttype, value in tokens: if ttype is lasttype: lastval += value else: yield(lasttype, lastval) (lasttype, lastval) = (ttype, value) if lastval.endswith('\n'): lastval = lastval[:-1] if lastval: yield(lasttype, lastval)
[ "def merge(self, tokens):\r\n tokens = iter(tokens)\r\n (lasttype, lastval) = tokens.next()\r\n for ttype, value in tokens:\r\n if ttype is lasttype:\r\n lastval += value\r\n else:\r\n yield(lasttype, lastval)\r\n (lasttype, lastval) = (ttype, value)\r\n if lastval.endswith('\\n'):\r\n lastval = lastval[:-1]\r\n if lastval:\r\n yield(lasttype, lastval)", "def _MergeOrAddToken(self, text, token_type):\n if not text:\n return\n if (not self._tokens or\n self._tokens[-1][self.TOKEN_TYPE_INDEX] != token_type):\n self._tokens.append((token_type, text))\n elif self._tokens[-1][self.TOKEN_TYPE_INDEX] == Token.Markdown.Section:\n # A section header with no content.\n prv_text = self._tokens[-1][self.TOKEN_TEXT_INDEX]\n prv_indent = re.match('( *)', prv_text).group(1)\n new_indent = re.match('( *)', text).group(1)\n if prv_indent == new_indent:\n # Same indentation => discard the previous empty section.\n self._tokens[-1] = (token_type, text)\n else:\n # Insert newline to separate previous header from the new one.\n self._NewLine()\n self._tokens.append((token_type, text))\n else:\n self._tokens[-1] = (token_type,\n self._tokens[-1][self.TOKEN_TEXT_INDEX] + text)", "def recombine_tokens(tokens):\n result = \"\"\n for type, value in tokens:\n if type == Token.Whitespace:\n result += \" \"\n if type == Token.NonbreakableWhitespace:\n result += \"~\"\n elif type == Token.Text:\n result += value\n elif type == Token.EscapedText:\n result += \"\\\\{}\".format(value)\n elif type == Token.Command:\n result += \"\\\\{}\".format(value)\n elif type == Token.InlineFormulaDelimiter:\n result += \"$\"\n elif type == Token.DisplayFormulaDelimiter:\n result += \"$$\"\n elif type == Token.CurlyBraketOpen:\n result += \"{\"\n elif type == Token.CurlyBraketClose:\n result += \"}\"\n elif type == Token.SquareBraketOpen:\n result += \"[\"\n elif type == Token.SquareBraketClose:\n result += \"]\"\n elif type == Token.DoubleNewLine:\n result += \"\\n\\n\"\n return result", "def _merge_entities_with_whitespace_between(\n self,\n text: str,\n analyzer_results: List[RecognizerResult]\n ) -> List[RecognizerResult]:\n merged_results = []\n prev_result = None\n for result in analyzer_results:\n if prev_result is not None:\n if prev_result.entity_type == result.entity_type:\n if re.search(r'^( )+$', text[prev_result.end:result.start]):\n merged_results.remove(prev_result)\n result.start = prev_result.start\n merged_results.append(result)\n prev_result = result\n return merged_results", "def tokenize(self):", "def merge_text_nodes(self):\n ...", "def _annotate_tokens(self, tokens):\n # Make a preliminary pass through the document, marking likely\n # sentence breaks, abbreviations, and ellipsis tokens.\n tokens = self._annotate_first_pass(tokens)\n\n # Make a second pass through the document, using token context\n # information to change our preliminary decisions about where\n # sentence breaks, abbreviations, and ellipsis occurs.\n tokens = self._annotate_second_pass(tokens)\n\n return tokens", "def start_new_line(self) -> None:\n if self._unmatched_accumulator:\n assert self.unmatched_identifier is not None\n\n self.tokens[-1].append(\n Token(\n identifier=self.unmatched_identifier,\n content=''.join(self._unmatched_accumulator),\n position=self._unmatched_pos,\n lineno=self._unmatched_lineno))\n\n self._unmatched_accumulator = []\n\n self.tokens.append([])", "def end_token(self) -> str:", "def _tokenize_source_with_nl(source):\n if source[-1:] != \"\\n\":\n source += \"\\n\"\n nl_added = True\n else:\n nl_added = False\n return _tokenize_source(source), nl_added", "def fix_empty_line(source, tokens):\n nb = 0\n for char in reversed(source):\n if char in (\" \", \"\\t\"):\n nb += 1\n else:\n break\n tokens[-1].string = source[-nb:]", "def _get_tokenized_rep(self, field):\n return \" \".join([x.text for x in self._tokenizer.tokenize(field.strip())])", "def _remove_tags(rtf_text):\n # remove all tags except the pars converted to newlines\n re_tag = re.compile(r\"(\\\\.*?) \")\n re_tag_newline = re.compile(r\"(\\\\.*?)(?=\\n)\")\n rtf_text = re_tag.sub(r\"\", rtf_text)\n # there are stragglers because of the newlines. We need two regular expressions\n return re_tag_newline.sub(r\"\", rtf_text)", "def right_truncations (tokens):\n while tokens:\n yield tokens\n tokens = tokens [1 :]", "def untokenize(tokens) :\n if len(tokens)>0 and tokens and hasattr(tokens[0], '__iter__') :\n return [untokenize(t) for t in tokens]\n return \"\".join([\" \"+i if not i.startswith(\"'\") and i not in punctuation else i for i in tokens]).strip()", "def join(self, tokens):\n return \" \".join(tokens)", "def __rehydrate_blank_line(\n context: MarkdownTransformContext,\n current_token: MarkdownToken,\n previous_token: Optional[MarkdownToken],\n ) -> str:\n # if (\n # self.context.block_stack\n # and self.context.block_stack[-1].is_fenced_code_block\n # and (previous_token and previous_token.is_text)\n # ):\n # extra_newline_after_text_token = ParserHelper.newline_character\n # else:\n _ = previous_token, context\n extra_newline_after_text_token = \"\"\n\n current_blank_token = cast(BlankLineMarkdownToken, current_token)\n return f\"{extra_newline_after_text_token}{current_blank_token.extracted_whitespace}{ParserHelper.newline_character}\"", "def emit(self, typ):\n # Check if we have some text in this chunk:\n if self._chunk_index > self._chunk_start:\n text = self._chunk[2][self._chunk_start:self._chunk_index]\n self.current_text.append(text)\n # Grab all pieces of text from start to here:\n val = \"\".join(self.current_text)\n location = self._start_loc\n assert location\n token = Token(typ, val, location)\n self.token_buffer.append(token)\n self._mark_start()", "def _encode_and_add_eos(line, subtokenizer):\n return [tokenizer.CLS_ID] + subtokenizer.encode(line)", "def handle_newline(self, token_type: int) -> None:\n assert self.processor is not None\n if token_type == tokenize.NEWLINE:\n self.run_logical_checks()\n self.processor.reset_blank_before()\n elif len(self.processor.tokens) == 1:\n # The physical line contains only this token.\n self.processor.visited_new_blank_line()\n self.processor.delete_first_token()\n else:\n self.run_logical_checks()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse self.code and yield "classified" tokens.
def __iter__(self): if self.lexer is None: yield ([], self.code) return tokens = pygments.lex(self.code, self.lexer) for tokentype, value in self.merge(tokens): if self.tokennames == 'long': # long CSS class args classes = str(tokentype).lower().split('.') else: # short CSS class args classes = [_get_ttype_class(tokentype)] classes = [cls for cls in classes if cls not in unstyled_tokens] yield (classes, value)
[ "def itercodelines(self):\r\n codeline = CodeLine(0)\r\n for token in self.itertokens():\r\n codeline.append(token)\r\n if codeline.complete:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline\r\n codeline = CodeLine(codeline.end_row + 1)\r\n if codeline.string:\r\n codeline.string = '\\n'.join(s.rstrip(' ') \r\n for s in codeline.string.split('\\n'))\r\n yield codeline", "def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")", "def parse(self, tokens):\n self.logger.debug(\"Parsing some nice C code!\")\n self.init_lexer(tokens)\n self.typedefs = set()\n cu = self.parse_translation_unit()\n self.logger.info(\"Parsing finished\")\n return cu", "def tokenize(code: str):\n\n tk = _Tokenizer(code)\n tk.tokenize()\n\n return tk.tokens", "def __handle_start_fenced_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n token_parts = [output_html]\n if (output_html.endswith(\"</ol>\") or output_html.endswith(\"</ul>\")) or (\n output_html and output_html[-1] != ParserHelper.newline_character\n ):\n token_parts.append(ParserHelper.newline_character)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n True,\n )\n token_parts.append(\"<pre><code\")\n if next_token.extracted_text:\n token_parts.extend([' class=\"language-', next_token.extracted_text, '\"'])\n token_parts.append(\">\")\n return \"\".join(token_parts)", "def tokens(self):\n for t in self._ast.tokens:\n yield t", "def __handle_start_indented_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n _ = next_token\n\n token_parts = []\n if (\n not output_html\n and transform_state.transform_stack\n and transform_state.transform_stack[-1].endswith(\"<li>\")\n ):\n token_parts.append(ParserHelper.newline_character)\n elif output_html and output_html[-1] != ParserHelper.newline_character:\n token_parts.extend([output_html, ParserHelper.newline_character])\n else:\n token_parts.append(output_html)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n True,\n False,\n )\n token_parts.append(\"<pre><code>\")\n return \"\".join(token_parts)", "def _parse(self):\n logger.debug('Parsing file: %s', self.filename)\n self._context = []\n self._last_popped = None\n self.statement_pre_read = None\n self.sw = None\n while self.can_read():\n token = self.next_token()\n if token is None:\n continue\n if token.model is None:\n continue\n if self.find_context_top(cond=lambda x: x != token and x.isinstance(CodeBlock)) is None:\n # this token model has no parents, we must save it separately\n self._save_model(token.model)\n self.parsed = True", "def get_all_tokens(self):\n word = \"\"\n begin_string = False\n i = 0\n\n while i < len(self.code):\n char = self.code[i]\n # Ignore white space\n if char in [' ', '\\t', '\\n'] and begin_string == False: \n i = i + 1 \n word = \"\" \n continue\n \n word = word + char\n if word in KEYWORDS and self.code[i + 1] in SYMBOLS + SKIPABLE:\n self.tokens.append(Token(\"keyword\", word))\n word = \"\"\n elif char == '\"' or begin_string: # Check for string\n if char == '\"':\n begin_string = not begin_string\n if not begin_string:\n self.tokens.append(Token(\"stringConstant\", word[1:-1]))\n word = \"\"\n elif word in SYMBOLS:\n self.tokens.append(Token(\"symbol\", word))\n word = \"\"\n elif self.code[i + 1] in SKIPABLE + SYMBOLS:\n if word.isdigit():\n self.tokens.append(Token(\"integerConstant\", word))\n else:\n self.tokens.append(Token(\"identifier\", word))\n word = \"\"\n i = i + 1", "def tokenize_python(code):\n codeReader = BytesIO(code.encode('utf-8')).readline\n raw_tokens = tokenize.tokenize(codeReader)\n tokens = []\n last = None\n for token in raw_tokens:\n if token.type == 62: continue # this is an encoding token. Skip it.\n if last:\n # the python tokenizer doesn't always include whitespace\n # so when we detect whitespace is missing, we put it back in.\n # Uses the \"last\" token and checks for space between the end\n # and the start of the current token\n same_line = last.end[0] == token.start[0]\n same_pos = last.end[1] == token.start[1]\n is_start_of_line = token.start[1] == 0\n if not same_line and not is_start_of_line:\n whitespace = token.line[:token.start[1]]\n add_space(tokens, whitespace)\n elif same_line and not same_pos:\n whitespace = token.line[last.end[1]:token.start[1]]\n add_space(tokens, whitespace)\n tokens.append({\n 'text':token.string,\n 'type':get_token_type(token)\n })\n last = token\n return tokens", "def compile_class(self):\n self.tokenizer.advance() # class\n self.class_name = self.tokenizer.advance()[TOKEN_NAME]\n self.tokenizer.advance() # {\n # compile the variables declaration part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods o functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine_dec()\n self.tokenizer.advance() # }", "def tokenize(self):", "def get_tokens(code: str) -> List[Tuple[str, Union[str, int, float]]]:\n tokens.clear()\n parser = Lark(tokens_grammar, parser=\"lalr\", transformer=TestTransformer())\n try:\n parser.parse(code)\n except:\n tokens.append(('UNDEFINED_TOKEN',))\n return tokens", "def __handle_end_fenced_code_block_token(\n cls, output_html, next_token, transform_state\n ):\n fenced_token = transform_state.actual_token_index - 1\n while not transform_state.actual_tokens[fenced_token].is_fenced_code_block:\n fenced_token -= 1\n\n # TODO can we store this in the begin so we don't have to compute it again?\n inner_tag_parts = [\"<code\"]\n if transform_state.actual_tokens[fenced_token].extracted_text:\n inner_tag_parts.extend(\n [\n ' class=\"language-',\n transform_state.actual_tokens[fenced_token].extracted_text,\n '\"',\n ]\n )\n inner_tag_parts.append(\">\")\n inner_tag = \"\".join(inner_tag_parts)\n\n POGGER.debug(f\"inner_tag>>:{inner_tag}:<<\")\n POGGER.debug(f\"output_html>>:{output_html}:<<\")\n POGGER.debug(\n f\"last_token>>:{transform_state.actual_tokens[transform_state.actual_token_index - 1]}:<<\"\n )\n\n token_parts = [output_html]\n if (\n not output_html.endswith(inner_tag)\n and output_html[-1] != ParserHelper.newline_character\n ):\n token_parts.append(ParserHelper.newline_character)\n POGGER.debug(\"#1\")\n elif (\n output_html[-1] == ParserHelper.newline_character\n and transform_state.last_token.is_text\n ):\n POGGER.debug(\"#2:$\", transform_state.last_token)\n if not (\n next_token.was_forced\n and transform_state.last_token.token_text.endswith(\"\\n\\x03\")\n ):\n token_parts.append(ParserHelper.newline_character)\n transform_state.is_in_code_block, transform_state.is_in_fenced_code_block = (\n False,\n False,\n )\n token_parts.extend([\"</code></pre>\", ParserHelper.newline_character])\n return \"\".join(token_parts)", "def parse_code(code: List[str]) -> List[Tuple[str, int]]:\n return [parse_line(line) for line in code]", "def lex(code, lexer):\n try:\n return lexer.get_tokens(code)\n except TypeError:\n # Heuristic to catch a common mistake.\n from pip._vendor.pygments.lexer import RegexLexer\n if isinstance(lexer, type) and issubclass(lexer, RegexLexer):\n raise TypeError('lex() argument must be a lexer instance, '\n 'not a class')\n raise", "def _create_tokenize_gen(self, a_starting_pos=-1):\n ordered_tokens = self._tok_c.get_ordered_tokens_list()\n tokens_re = self._tok_c.get_tokens_re()\n \n # position 0 in io stream\n if a_starting_pos != -1:\n self._io_prog.seek(a_starting_pos)\n \n for line in self._io_prog:\n #print(\"line to read=[%s].len(line)=%d\\n\"%(line,len(line)))\n \n self._line_num += 1\n \n self._file_pos = self._io_prog.tell()\n \n self._line_pos, max = 0, len(line)\n \n while self._line_pos < max:\n \n b_found = False\n # This code provides some short-circuit code for whitespace, tabs, and other ignored characters\n if line[self._line_pos] in IGNORED_LITERALS:\n self._line_pos += 1\n continue\n \n #print(\"Try to match from [%s]\\n\"%(line[pos:]))\n \n for key in ordered_tokens:\n regexp = tokens_re[key]\n match = regexp.match(line, self._line_pos)\n if match:\n \n val = match.group()\n start, end = self._line_pos, (self._line_pos+len(val)-1)\n \n # when it is an ID check if this is a WCID\n if key == TokenCreator.TokenNames.ID:\n type = self._get_ID_type(val)\n else:\n type = key\n \n self._tok = Token(type, val, start, end, self._line_num, line, self._file_pos)\n \n #update pos\n self._line_pos = end +1\n \n #print(\"Token = %s\\n\"%(self._tok))\n b_found = True\n \n #return token using yield and generator\n yield self._tok\n \n #found on so quit for loop\n break\n \n \n if not b_found:\n raise IllegalCharacterError(self._line_num, line, self._line_pos) \n \n # All lines have been read return ENDMARKER Token\n self._tok = ENDMARKERToken(self._line_num)\n yield self._tok", "def parse(code, path=None):\n\n\tparser = Parser(code, path)\n\tast = parser.run()\n\treturn ast", "def __handle_inline_code_span_token(cls, output_html, next_token, transform_state):\n _ = transform_state\n\n return \"\".join(\n [\n output_html,\n \"<code>\",\n ParserHelper.resolve_all_from_text(next_token.span_text),\n \"</code>\",\n ]\n )", "def parse(self, token_list):\n\n classname = \"\"\n tlist = None\n\n bracket_stack = []\n\n # consume everything till the first TokenSymbol\n p1 = 0\n p2 = 0\n\n while p1 < len(token_list) and not isinstance(token_list[p1], t.TokenSymbol):\n p1 += 1\n\n if not isinstance(token_list[p1+1], t.TokenFunctionBracket):\n return None\n\n if isinstance(token_list[p1+2], t.TokenString):\n classname = token_list[p1+2].value\n\n if not isinstance(token_list[p1+3], t.TokenComma):\n return None\n\n if not isinstance(token_list[p1+4], t.TokenBlockBracket):\n return None\n\n p1 = p1 + 4\n p2 = p1 + 1\n bracket_stack.append(token_list[p1].value)\n\n\n print 'entering loop', bracket_stack, p2\n\n while p2 < len(token_list) and bracket_stack:\n tok = token_list[p2]\n if isinstance(tok, t.TokenBlockBracket) and tok.value == '{':\n bracket_stack.append('{')\n elif isinstance(tok, t.TokenBlockBracket) and tok.value == '}':\n bracket_stack.pop()\n\n p2 += 1\n\n tlist = token_list[p1:p2]\n\n return Eclass(classname, tlist)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }