query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Validate that the children values, passed as either a file or a json string, are correct.
def validate_children(self, source, **kwargs): # TODO cache this loaded data keyed on a hashed version of kwargs children = self._load_json("children", source, **kwargs) self._validate_against_schema("children", children) strand = getattr(self, "children", []) # Loop the children and accumulate values so we have an O(1) check children_keys = {} for child in children: children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1 # Check there is at least one child for each item described in the strand # TODO add max, min num specs to the strand schema and check here for item in strand: strand_key = item["key"] if children_keys.get(strand_key, 0) <= 0: raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}") # Loop the strand and add unique keys to dict so we have an O(1) check strand_keys = {} for item in strand: strand_keys[item["key"]] = True # Check that each child has a key which is described in the strand for child in children: child_key = child["key"] if not strand_keys.get(child_key, False): raise exceptions.InvalidValuesContents( f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine." ) # TODO Additional validation that the children match what is set as required in the Twine return children
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_json(self):\n pass", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def _validate(self, path, obj):\r\n if isinstance(obj, str):\r\n if path[-1] != \"pattern\":\r\n self._validate_string(path, obj)\r\n elif isinstance(obj, dict):\r\n for key, value in obj.items():\r\n new_path = path.copy()\r\n new_path.append('%s' % key)\r\n self._validate_string(new_path, key, True)\r\n self._validate(new_path, value)\r\n elif isinstance(obj, list):\r\n for index, value in enumerate(obj):\r\n new_path = path.copy()\r\n new_path.append('%d' % index)\r\n self._validate(new_path, value)\r\n elif isinstance(obj, bool):\r\n pass\r\n elif isinstance(obj, int):\r\n pass\r\n elif isinstance(obj, float):\r\n pass\r\n elif isinstance(obj, type(None)):\r\n pass\r\n else:\r\n print(type(obj))\r\n pass\r\n # raise Exception()\r", "def check_children_attributes(self, branch):\n attributes = branch.get_attributes()\n for attr in attributes:\n if not isinstance(attributes[attr], str) and not isinstance(attributes[attr], list) :\n print('Attribute '+str(attr)+' of '+ branch.__class__.__name__ + ' should be str or list')\n self.assertTrue(False)\n children = branch.get_children()\n for child in children:\n self.check_children_attributes(child)", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def assert_correct_json_response(self, json_response):\r\n self.assertIsNotNone(json_response['display_name'])\r\n self.assertIsNotNone(json_response['id'])\r\n self.assertIsNotNone(json_response['category'])\r\n self.assertIsNotNone(json_response['is_draft'])\r\n self.assertIsNotNone(json_response['is_container'])\r\n if json_response['is_container']:\r\n for child_response in json_response['children']:\r\n self.assert_correct_json_response(child_response)\r\n else:\r\n self.assertFalse('children' in json_response)", "def validate(self, config_json):\n pass", "def validate(self, root):\n if not isinstance(root, list):\n parser.error('Root object is not a list')\n if root:\n expected_type = type(root[0])\n if expected_type not in [list, dict]:\n parser.error('First row is a {}, not a list or dictionary'.format(expected_type))\n for item in root:\n if type(item) != expected_type:\n parser.error('Row type does not match first row')\n for col in item:\n if type(col) not in [str, unicode, int, float, bool]:\n parser.error('Row column is unexpected type')", "def _assert_valid_deep(value):\n if isinstance(value, dict):\n for v in value.itervalues():\n _assert_valid_deep(v)\n elif isinstance(value, list):\n for v in value:\n _assert_valid_deep(v)\n else:\n if hasattr(value, \"assert_valid\"):\n value.assert_valid()", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def _check_allowed_values(self, key: str, value: Any):\n allowedValues = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedValues\", None)\n if allowedValues is not None and value not in allowedValues:\n raise Exception(\n f\"Value '{value}' is not an allowed value for '{key}'. Allowed values are: {', '.join(allowedValues)}\"\n )", "def _check_children(self):\n def froze_list(l):\n return frozenset(frozenset(child) for child in l)\n children, values = self._get_children()\n if froze_list(children) != froze_list(self.children) or frozenset(values) != frozenset(self.values):\n self._children_watcher()", "def test_load_json_value_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('\"two\"')\n\n assert load_json(fname) == \"two\"\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a dict\"\n ):\n load_json_object(fname)\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)", "def _check_data_type(self, key: str, value: Any):\n allowedDataType = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedDataType\", None)\n if allowedDataType is not None and not isinstance(value, allowedDataType):\n raise Exception(\n f\"Value '{value}' is not of the correct type. The allowed data type is: {allowedDataType.__name__}\"\n )", "def is_valid_child(self, child):\n return isinstance(child, baseobject.PBXBaseObject) \\\n and child.isa in self.allow_children_types()", "def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path", "def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)", "def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def validate_fields(self, tree):\n # Check fields\n fields = list(tree.keys())\n for k in self.fields:\n assert (k in fields)", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def test_json_reader_data_contents(process_data):\n json_data = process_data(file_name_or_type='scooter_data.json')\n for val in json_data:\n assert(isinstance(val['id'], int))\n assert(isinstance(val['name'], str))\n assert(isinstance(val['vin_number'], str))\n assert(isinstance(val['electric_scooter'], bool))\n assert(isinstance(val['city'], str))\n assert(isinstance(val['usage'], str))\n assert(isinstance(val['cost_usd'], float))\n assert(isinstance(val['total_years_of_use'], int))", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def assert_sanity(self):\n # Maybe in the future: Check whether commands can be found in path\n # For now, let the OS handle this\n\n # Check whether command dictionary has a correct structure. Namely,\n # that:\n #\n # 1. Toplevel children may only be called \"commands\" or \"paths\".\n if len(self.command_dict) > 2:\n raise CommandDictSanityError(\"Only two toplevel children allowed.\")\n for key in self.command_dict.keys():\n if key not in (\"commands\",\"paths\"):\n raise CommandDictSanityError(\n f\"Invalid toplevel child found: {key}.\")\n # 2. \"paths\" node must be a list, and must only contain string\n # children.\n if \"paths\" in self.command_dict:\n if type(self.command_dict[\"paths\"]) != list:\n raise CommandDictSanityError(\n \"The \\\"paths\\\" node must be a list.\")\n for path in self.command_dict[\"paths\"]:\n if type(path) != str:\n raise CommandDictSanityError(\"Defined paths must be strings.\")\n # 3. \"commands\" node chilren (henceforth command nodes) must be\n # dictionaries, \n # 4. and may contain only the following keys:\n # \"regex\", \"cmd\", \"help\", \"markdown_convert\", \"formatted\",\n # \"code\" and \"split\".\n # 5. The command node children may only be strings.\n # 6. Command node children with keys \"markdown_convert\",\n # \"formatted\" or \"code\" may only be defined as \"true\" or as\n # \"false\".\n if \"commands\" in self.command_dict.keys():\n for com in self.command_dict[\"commands\"]:\n # Implement rule 3\n if type(self.command_dict[\"commands\"][com]) != dict:\n raise CommandDictSanityError(\n \"Defined commands must be dictionaries.\")\n for opt in self.command_dict[\"commands\"][com].keys():\n # Implement rule 4\n if opt not in (\"regex\",\n \"cmd\",\n \"help\",\n \"markdown_convert\",\n \"formatted\",\n \"code\",\n \"split\"):\n raise CommandDictSanityError(\n f\"In command \\\"{com}\\\", invalid option found: \" \\\n f\"\\\"{opt}\\\".\")\n # Implement rule 6\n elif opt in (\"markdown_convert\", \"formatted\", \"code\"):\n if type(self.command_dict[\"commands\"][com][opt]) != bool:\n raise CommandDictSanityError(\n f\"In command \\\"{com}\\\", invalid value for option \"\n f\"\\\"{opt}\\\" found: \" \\\n f\"\\\"{self.command_dict['commands'][com][opt]}\\\"\")\n # Implement rule 5\n else:\n if type(self.command_dict[\"commands\"][com][opt]) != str:\n raise CommandDictSanityError(\n f\"In command \\\"{com}\\\", command option \" \\\n f\"\\\"{opt}\\\" must be a string.\")\n\n return", "def testCheck(self):\r\n from pydsl.Grammar.Definition import JsonSchema\r\n from pydsl.Check import JsonSchemaChecker\r\n schema = {\r\n \"type\" : \"string\",\r\n \"items\" : {\r\n \"type\" : [\"string\", \"object\"],\r\n \"properties\" : {\r\n \"foo\" : {\"enum\" : [1, 3]},\r\n #\"bar\" : { #See https://github.com/Julian/jsonschema/issues/89\r\n # \"type\" : \"array\",\r\n # \"properties\" : {\r\n # \"bar\" : {\"required\" : True},\r\n # \"baz\" : {\"minItems\" : 2},\r\n # }\r\n #}\r\n }\r\n }\r\n }\r\n grammardef = JsonSchema(schema)\r\n checker = JsonSchemaChecker(grammardef)\r\n self.assertTrue(checker.check(\"a\"))\r\n self.assertFalse(checker.check([1, {\"foo\" : 2, \"bar\" : {\"baz\" : [1]}}, \"quux\"]))", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def validate(self, value):\n if isinstance(value, dict):\n if set(value.keys()) == {\"type\", \"coordinates\"}:\n if value[\"type\"] != self._type:\n self.error(f'{self._name} type must be \"{self._type}\"')\n return self.validate(value[\"coordinates\"])\n else:\n self.error(\n \"%s can only accept a valid GeoJson dictionary\"\n \" or lists of (x, y)\" % self._name\n )\n return\n elif not isinstance(value, (list, tuple)):\n self.error(\"%s can only accept lists of [x, y]\" % self._name)\n return\n\n validate = getattr(self, \"_validate_%s\" % self._type.lower())\n error = validate(value)\n if error:\n self.error(error)", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def test_json(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(input_file_path), 'r') as input_file:\n with open(attach_path(answer_file_path), 'r') as answer_file:\n assert str(read_json(input_file.read().strip())) == answer_file.read().strip()", "def validate_input(json_object):\n try:\n if type(json_object) is not list:\n return False\n for machine_config in json_object:\n if (type(machine_config[\"ip\"]) is not str) or not validate_ip(machine_config[\"ip\"]):\n return False\n if type(machine_config[\"community\"]) is not str:\n return False\n if type(machine_config[\"config\"]) is not list:\n return False\n for actual_config in machine_config[\"config\"]:\n if (type(actual_config[\"segment\"]) is not int) or not validate_segment(actual_config[\"segment\"]):\n return False\n if type(actual_config[\"ports\"]) is not list:\n return False\n for actual_port in actual_config[\"ports\"]:\n if (type(actual_port) is not int) or not validate_port(actual_port):\n return False\n except KeyError as ke:\n # Formato incorrecto debido a que algun campo no existe\n return False\n # Todos los campos existen y estan bien\n return True", "def check_data_struct():\n if not os.path.exists(PROJECT_ROOT+'/data'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data')\n\n if not os.path.exists(PROJECT_ROOT+'/data/CUB_200_2011'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/CUB_200_2011')\n\n if not os.path.exists(PROJECT_ROOT+'/data/segmentations'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/segmentations')\n\n if not os.path.exists(PROJECT_ROOT+'/data/attributes.txt'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/attributes.txt')", "def Validate(self, relative_file, contents):\n pass", "def validateProp(filename):\n\n # does the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Prop file (%s) does not exist' % (filename))\n return False\n\n # can I read it\n try:\n propFile = open(filename, 'r')\n prop = json.load(propFile)\n propFile.close()\n except (ValueError, OSError):\n LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))\n return False\n\n # does the prop have the correct value\n for key in ('name', 'md5', 'description', 'size', 'contact'):\n if (key not in prop):\n LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))\n return False\n\n return True", "def check_job_json(job_info):\n job_type_list = [job_type.value for _, job_type in JobType.__members__.items()]\n if 'source_id' not in job_info:\n raise ValueError(\"Json string Errors, key:source_id not found.\")\n if 'job_id' not in job_info:\n raise ValueError(\"Json string Errors, key:job_id not found.\")\n if 'job_type' not in job_info or not job_info['job_type']:\n raise ValueError(\"Json string Errors, key:job_type not found.\")\n if job_info['job_type'] not in job_type_list:\n raise ValueError(\"Invalid job type: {}.\".format(job_info['job_type']))\n if 'job_content' not in job_info:\n raise ValueError(\"Json string Errors, key:job_content not found.\")", "def _clean_child(self):\n if not self.parent_id:\n raise ValidationError(_(\"A child product needs a parent.\"))\n if self.parent_id and not self.parent.is_parent:\n raise ValidationError(\n _(\"You can only assign child products to parent products.\")\n )\n if self.product_class:\n raise ValidationError(_(\"A child product can't have a product class.\"))\n if self.pk and self.categories.exists():\n raise ValidationError(_(\"A child product can't have a category assigned.\"))\n # Note that we only forbid options on product level\n if self.pk and self.product_options.exists():\n raise ValidationError(_(\"A child product can't have options.\"))", "def validate(self, json_data):\n self._errors = None\n success = True\n for item in self._schema:\n if not item.validate(json_data):\n success = False\n\n return success", "def validate_json_schema(path, name, data, schema, full_schema=not is_extension):\n errors = 0\n\n # The standard repository has an example extension.\n if 'docs/examples/organizations/organizational_units/ocds_divisionCode_extension' in path:\n full_schema = False\n\n # Kingfisher Collect uses JSON Schema files to validate Scrapy items.\n code_repo = repo_name == 'kingfisher-collect'\n\n # Non-OCDS schema don't:\n # * pair \"enum\" and \"codelist\"\n # * disallow \"null\" in \"type\" of \"items\"\n # * UpperCamelCase definitions and lowerCamelCase properties\n # * allow \"null\" in the \"type\" of optional fields\n # * include \"id\" fields in objects within arrays\n # * require \"title\", \"description\" and \"type\" properties\n json_schema_exceptions = {\n 'json-schema-draft-4.json',\n 'meta-schema.json',\n 'meta-schema-patch.json',\n }\n ocds_schema_exceptions = {\n 'dereferenced-release-schema.json',\n # standard-maintenance-scripts\n 'codelist-schema.json',\n 'extension-schema.json',\n # extension_registry\n 'extensions-schema.json',\n 'extension_versions-schema.json',\n # spoonbill\n 'ocds-simplified-schema.json',\n }\n schema_exceptions = json_schema_exceptions | ocds_schema_exceptions\n\n validate_items_type_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n '/definitions/AmendmentUnversioned/properties/changes/items', # deprecated\n '/definitions/record/properties/releases/oneOf/0/items', # 1.1\n },\n }\n\n def validate_codelist_enum_allow_missing(codelist):\n return is_extension and codelist in external_codelists\n\n validate_codelist_enum_kwargs = {\n 'fallback': {\n '/definitions/Metric/properties/id': ['string'],\n '/definitions/Milestone/properties/code': ['string', 'null'],\n },\n 'allow_missing': validate_codelist_enum_allow_missing,\n }\n\n validate_letter_case_kwargs = {\n 'property_exceptions': {'former_value'}, # deprecated\n 'definition_exceptions': {'record'}, # 1.1\n }\n\n def validate_metadata_presence_allow_missing(pointer):\n return 'links' in pointer.split('/') or code_repo # ocds_pagination_extension\n\n validate_metadata_presence_kwargs = {\n 'allow_missing': validate_metadata_presence_allow_missing,\n }\n\n def validate_object_id_allow_missing(pointer):\n parts = pointer.split('/')\n return 'versionedRelease' in parts or parts[-1] in {\n 'changes', # deprecated\n 'records', # uses `ocid` not `id`\n '0', # linked releases\n }\n\n validate_object_id_kwargs = {\n 'allow_missing': validate_object_id_allow_missing,\n 'allow_optional': {\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Amendment',\n '/definitions/Organization',\n '/definitions/OrganizationReference',\n '/definitions/RelatedProcess',\n },\n }\n if repo_name == 'infrastructure':\n validate_object_id_kwargs['allow_optional'].add('/definitions/Classification')\n\n validate_null_type_kwargs = {\n # OCDS allows null. OC4IDS disallows null.\n 'no_null': repo_name == 'infrastructure' or code_repo,\n 'allow_object_null': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n # See https://github.com/open-contracting/standard/pull/738#issuecomment-440727233\n '/definitions/Organization/properties/details',\n },\n 'allow_no_null': {\n '/definitions/Amendment/properties/changes/items/properties/property', # deprecated\n\n # Children of fields with omitWhenMerged.\n '/definitions/Link/properties/rel',\n '/definitions/Link/properties/href',\n\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Organization/properties/id',\n '/definitions/OrganizationReference/properties/id',\n '/definitions/RelatedProcess/properties/id',\n },\n }\n\n validate_array_items_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n '/definitions/Location/properties/geometry/properties/coordinates/items', # recursion\n },\n }\n\n validate_deep_properties_kwargs = {\n 'allow_deep': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n },\n }\n if is_extension: # avoid repetition in extensions\n validate_deep_properties_kwargs['allow_deep'].add('/definitions/Item/properties/unit')\n\n validator = Draft4Validator(schema, format_checker=FormatChecker())\n\n errors += validate_schema(path, data, validator)\n if errors:\n warnings.warn(f'{path} is not valid JSON Schema ({errors} errors)')\n\n if name not in schema_exceptions:\n if 'versioned-release-validation-schema.json' in path:\n validate_items_type_kwargs['additional_valid_types'] = ['object']\n errors += validate_array_items(path, data, **validate_array_items_kwargs)\n errors += validate_items_type(path, data, **validate_items_type_kwargs)\n if not code_repo:\n errors += validate_codelist_enum(path, data, **validate_codelist_enum_kwargs)\n errors += validate_letter_case(path, data, **validate_letter_case_kwargs)\n errors += validate_merge_properties(path, data)\n\n # `full_schema` is set to not expect extensions to repeat information from core.\n if full_schema:\n exceptions_plus_versioned = schema_exceptions | {\n 'versioned-release-validation-schema.json',\n }\n\n exceptions_plus_versioned_and_packages = exceptions_plus_versioned | {\n 'project-package-schema.json',\n 'record-package-schema.json',\n 'release-package-schema.json',\n }\n\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_ref(path, data)\n\n if name not in exceptions_plus_versioned:\n # Extensions aren't expected to repeat `title`, `description`, `type`.\n errors += validate_metadata_presence(path, data, **validate_metadata_presence_kwargs)\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_object_id(path, jsonref.replace_refs(data), **validate_object_id_kwargs)\n\n if name not in exceptions_plus_versioned_and_packages:\n # Extensions aren't expected to repeat `required`. Packages don't have merge rules.\n errors += validate_null_type(path, data, **validate_null_type_kwargs)\n # Extensions aren't expected to repeat referenced codelist CSV files\n # TODO: This code assumes each schema uses all codelists. So, for now, skip package schema.\n errors += validate_schema_codelists_match(path, data, cwd, is_extension, is_profile, external_codelists)\n\n else:\n # Don't count these as errors.\n validate_deep_properties(path, data, **validate_deep_properties_kwargs)\n\n assert not errors, 'One or more JSON Schema files are invalid. See warnings below.'", "def test_invalid_data(self):\n\n json_data = {\n \"input\" : {\n 'version': 'BAD',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def assertStructure(self, root, dirDict):\n children = [x.basename() for x in root.children()]\n for x in dirDict:\n child = root.child(x)\n if isinstance(dirDict[x], dict):\n self.assertTrue(child.isdir(), \"%s is not a dir!\"\n % (child.path,))\n self.assertStructure(child, dirDict[x])\n else:\n a = child.getContent().replace(os.linesep, '\\n')\n self.assertEquals(a, dirDict[x], child.path)\n children.remove(x)\n if children:\n self.fail(\"There were extra children in %s: %s\"\n % (root.path, children))", "def check_attachment_fields(self):\n for field_name, field in self.fields.items():\n if isinstance(field, serializers.ListSerializer):\n if hasattr(field.child, \"field\"):\n for child_name, child in field.child.field.items():\n self.handle_attachment_field(child, child_name)\n else:\n self.handle_attachment_field(field, field_name)", "def test_json_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n # empty - should parse\n spec = {}\n read_wrapper(spec, ps)\n\n # empty array - should parse\n spec = {'constraints': []}\n read_wrapper(spec, ps)\n\n # empty element - should fail\n spec = {'constraints': [{}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching block - should fail\n spec = {'constraints': [{'block': 'a'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching variable - should fail\n spec = {'constraints': [{'variable': 'c'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner option - should fail\n spec = {'constraints': [{'option': 'a1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner block - should parse\n spec = {'constraints': [{'block': 'A', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # block and option - should parse\n spec = {'constraints': [{'block': 'A', 'option': 'a1', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variable and option - should parse\n spec = {'constraints': [{'variable': 'a', 'option': '2.5', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # weird option - should parse\n # fixme: {'option': '[1,2]'} will fail\n spec = {'constraints': [{'variable': 'c', 'option': '[1, 2]', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H==b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H.index==1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)", "def _check_fields(self, content: JsonDict) -> None:\n self.assertIn(\"id\", content)\n self.assertIn(\"received_ts\", content)\n self.assertIn(\"room_id\", content)\n self.assertIn(\"event_id\", content)\n self.assertIn(\"user_id\", content)\n self.assertIn(\"sender\", content)\n self.assertIn(\"canonical_alias\", content)\n self.assertIn(\"name\", content)\n self.assertIn(\"event_json\", content)\n self.assertIn(\"score\", content)\n self.assertIn(\"reason\", content)\n self.assertIn(\"auth_events\", content[\"event_json\"])\n self.assertIn(\"type\", content[\"event_json\"])\n self.assertIn(\"room_id\", content[\"event_json\"])\n self.assertIn(\"sender\", content[\"event_json\"])\n self.assertIn(\"content\", content[\"event_json\"])", "def _check_for_children(self):\n if len(self.node.get_children()) > 0:\n raise ValueError('This Node is not a leaf node. Children of this node '\n 'are {}'.format(self.client.get_children()))", "def validate_json() -> bool:\n with Path(ROOT_DIR, \"seals\", \"seals.json\").open() as f:\n seals = json.load(f)\n\n seals_in_json = [k for k, v in seals.items() if v[\"has_seal\"]]\n\n seals = [\n x.split(\"/\")[-1][:-4] for x in glob.glob(f\"{ROOT_DIR}/seals/orig/*\")\n ]\n missing_seals = sorted(list(set(seals_in_json) ^ set(seals)))\n if not missing_seals:\n return True\n\n raise Exception(f\"Missing entry for: {' '.join(missing_seals)}\")", "def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')", "def json_attribs_check(func):\n @wraps(func)\n def inner_func(jsonStr):\n gslvtsSchema = {\"type\":\"object\",\n \"properties\":{\n \"tagID\": {\"type\":\"number\"}, \n \"UTC\": {\"type\":\"string\",\n \"format\":\"date-time\"}\n\t\t\t},\n\t\t\t\"required\":[\"tagID\",\"UTC\"]\n }\n try:\n jsonGslvts=json.loads(jsonStr)\n for elem in jsonGslvts:\n try: \n validate(elem, gslvtsSchema, format_checker=FormatChecker())\n except ValidationError, e:\n print \"[-] Invalid json post data. Check it, brah.\"\n print e\n raise AttributeError \n except (AttributeError, ValueError):\n print \"[-] IDk what that was, but it wasn't JSON.\"\n raise AttributeError\n\n return(func(jsonStr)) \n return inner_func", "def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))", "def test_correct_upload_item(upload_items: List[JSONDict]) -> None:\n validated = UploadItem(**upload_items[0])\n assert validated.dict() == upload_items[0]", "def test_build_children_mismatch_is_dir(self):\n r_a = SystemFile(\"a\", 0, True)\n r_aa = SystemFile(\"aa\", 0, True)\n r_a.add_child(r_aa)\n l_a = SystemFile(\"a\", 0, True)\n l_aa = SystemFile(\"aa\", 0, False)\n l_a.add_child(l_aa)\n self.model_builder.set_remote_files([r_a])\n self.model_builder.set_local_files([l_a])\n with self.assertRaises(ModelError) as context:\n self.model_builder.build_model()\n self.assertTrue(str(context.exception).startswith(\"Mismatch in is_dir between child\"))", "def check_attributes(self):\n for key in self.json_parsed_file.keys():\n if key not in self.HARDCODED_REQUIRED_JSON_FIELDS:\n print(key)\n self.output_message += \"All JSON attribute key are not correct\\n\"\n self.is_parsed_pdf_valid = False\n\n for key in self.HARDCODED_REQUIRED_JSON_FIELDS:\n if key not in self.json_parsed_file.keys():\n self.output_message += \"All required attribute keys are not in the parsed information\\n\"\n self.is_parsed_pdf_valid = False", "def test_load_unsupported_type(self):\n expected = {\n \"name\": \"Kevin\",\n \"age\": 21,\n \"pet\": {\n \"name\": \"Trippy Jack\",\n \"age\": 20762,\n \"__type__\": \"hyperdimensional.hamster\"\n }\n }\n with open('tests/unsupported_type.json', 'r') as json_file:\n self.assertEqual(expected, morejson.load(json_file))", "def assertValidJSON(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_json(data)", "def validator(data_json):\n fields = spec[\"fields\"]\n data = json.loads(data_json, object_pairs_hook=collections.OrderedDict)\n for k, v in fields.items():\n if v.get(\"required\"):\n found = False\n if k in data:\n found = True\n elif \".\" in k:\n # Dotted keys could be nested, like ecs.version\n subkeys = k.split(\".\")\n subval = data\n for subkey in subkeys:\n subval = subval.get(subkey, {})\n if subval:\n found = True\n if not found:\n raise ValidationError(\"Missing required key {}\".format(k))\n if k in data:\n if v[\"type\"] == \"string\" and not (\n isinstance(data[k], str) or isinstance(data[k], basestring)\n ):\n raise ValidationError(\n \"Value {0} for key {1} should be string, is {2}\".format(\n data[k], k, type(data[k])\n )\n )\n if v[\"type\"] == \"datetime\":\n try:\n datetime.datetime.strptime(data[k], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n raise ValidationError(\n \"Value {0} for key {1} doesn't parse as an ISO datetime\".format(\n data[k], k\n )\n )\n if v.get(\"index\") and list(data.keys())[v.get(\"index\")] != k:\n raise ValidationError(\"Key {0} is not at index {1}\".format(k, index))\n\n return data_json", "def _check_fields(self, content: List[JsonDict]) -> None:\n for c in content:\n self.assertIn(\"id\", c)\n self.assertIn(\"received_ts\", c)\n self.assertIn(\"room_id\", c)\n self.assertIn(\"event_id\", c)\n self.assertIn(\"user_id\", c)\n self.assertIn(\"sender\", c)\n self.assertIn(\"canonical_alias\", c)\n self.assertIn(\"name\", c)\n self.assertIn(\"score\", c)\n self.assertIn(\"reason\", c)", "def validate_required_ref(dictionary, yaml_file):\n\n validate_dict_contains_value(dictionary, 'defaults', 'ref', yaml_file)\n validate_type(dictionary['ref'], 'ref', str, 'str', yaml_file)\n validate_ref_type(dictionary, yaml_file)\n del dictionary['ref']", "def validate_yaml_values(yaml_values, multicar):\n # Verify if all the yaml keys required for launching models have same number of values\n same_len_values = [MODEL_S3_BUCKET_YAML_KEY, MODEL_S3_PREFIX_YAML_KEY, MODEL_METADATA_FILE_S3_YAML_KEY,\n CAR_COLOR_YAML_KEY]\n LOG.info(yaml_values)\n if not all(map(lambda param: len(yaml_values[param]) == len(yaml_values[same_len_values[0]]), same_len_values)):\n raise Exception('Incorrect number of values for these yaml parameters {}'.format(same_len_values))\n\n # Verify if all yaml keys have 2 values for multi car racing\n if multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 2:\n raise Exception('Incorrect number of values for multicar racing yaml parameters {}'.format(same_len_values))\n\n # Verify if all yaml keys have 1 value for single car racing\n if not multicar and len(yaml_values[MODEL_S3_PREFIX_YAML_KEY]) != 1:\n raise Exception('Incorrect number of values for single car racing yaml parameters {}'.format(same_len_values))", "def _validate(self):\n if not isinstance(self.parameter_schema, dict):\n raise TypeError(\"parameter_schema must be a dictionary\")\n # TODO: Settle on an input file schema and validation library\n self._parameter_names = list(self.parameter_schema.keys())\n # List, sets, and tuples are the supported PyYAML iterables that will support expected behavior\n for name in self._parameter_names:\n if not isinstance(self.parameter_schema[name], (list, set, tuple)):\n raise TypeError(f\"Parameter '{name}' is not one of list, set, or tuple\")", "def validate(self, json_data):\n try:\n self.process_json(json_data)\n except ValueError as e:\n # self.process_errors.append(e.args[0])\n self.process_errors = [e.args[0]]\n\n self.errors = list(self.process_errors)\n\n # Run validators\n if not self.errors:\n chain = itertools.chain(self.validators)\n self._run_validation_chain(chain)\n\n return len(self.errors) == 0", "def check_if_nested(data):\n if isinstance(data, dict):\n for k in data:\n if isinstance(data[k], (list, dict)):\n return True\n elif isinstance(data, list):\n for i in data:\n if isinstance(i, (list, dict)):\n return True\n return False", "def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst", "def test_case_3(self):\n with open(f'{TEST_DATA_DIR}/r1.json') as file:\n data = json.load(file)\n self.assertIsInstance(data, dict)\n\n task_1 = Task.new(data=data)\n self.assertTrue(task_1.validate())\n\n with self.assertRaises(GCGValidationError):\n task_2 = Task.new(data={'data': 'bad_data'})", "def _check_yaml(self, yaml):\n if type(yaml['datasets']) == dict:\n logging.error(\n \"[ERROR] \\\"datasets\\\" section of config file must be a list, not a dictionary...\" \n )\n sys.exit()", "def validate(self, data):\n # calling subserializer validate method (fields, and presets)\n data = super(FormidableSerializer, self).validate(data)\n # we check every field define in presets are define inside the form.\n if 'fields' in data and 'presets' in data:\n data = self.check_presets_cohesion(data)\n return data", "def assertStructure(self, root, dirDict):\n children = [each.basename() for each in root.children()]\n for pathSegment, expectation in dirDict.items():\n child = root.child(pathSegment)\n if callable(expectation):\n self.assertTrue(expectation(child))\n elif isinstance(expectation, dict):\n self.assertTrue(child.isdir(), \"{} is not a dir!\".format(child.path))\n self.assertStructure(child, expectation)\n else:\n actual = child.getContent().decode().replace(os.linesep, \"\\n\")\n self.assertEqual(actual, expectation)\n children.remove(pathSegment)\n if children:\n self.fail(\"There were extra children in {}: {}\".format(root.path, children))", "def test_json(self):\n\n cases_dir = pathlib.Path(__file__).parent / 'cases'\n\n asn_strs = {\n asn_path.stem: asn_path.read_text()\n for asn_path in cases_dir.glob('*.asn')\n }\n json_strs = {\n json_path.stem: json_path.read_text()\n for json_path in cases_dir.glob('*.json')\n }\n\n assert set(asn_strs.keys()) == set(json_strs.keys())\n assert len(asn_strs) > 0\n\n for key in asn_strs:\n with self.subTest(key=key):\n res_json = asn1vnparser.parse_asn1_value_assignment(\n asn_strs[key], as_json=True)\n res_py = json.loads(res_json)\n self.maxDiff = None\n self.assertEqual(res_py, json.loads(json_strs[key]))", "def _ConstructParseAndCheckJSON(\n self, inputfiles, logfiles, graphs):\n logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs)\n index = 0\n for filename in logfiles:\n graph_name = graphs[index]\n actual = logs[graph_name]\n path = os.path.join(self.data_directory, filename)\n expected = json.load(open(path))\n self.assertEqual(expected, actual, 'JSON data in %s did not match '\n 'expectations.' % filename)\n\n index += 1", "def test_parent_does_not_exist(self):\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')", "def _perform_validation(self, path, value, results):\n super(ObjectSchema, self)._perform_validation(path, value, results)\n\n if value == None:\n return\n\n name = path if path != None else \"value\"\n properties = ObjectReader.get_properties(value)\n\n # Process defined properties\n if self.properties != None:\n for property_schema in self.properties:\n processed_name = None\n\n for (key, value) in properties.items():\n # Find properties case insensitive\n if property_schema.name != None and key.lower() == property_schema.name.lower():\n property_schema._perform_validation(path, value, results)\n processed_name = key\n break\n\n if processed_name == None:\n property_schema._perform_validation(path, None, results)\n else:\n del properties[processed_name]\n\n # Process unexpected properties\n for (key, value) in properties.items():\n property_path = key if path == None or len(path) == 0 else path + \".\" + key\n\n results.append(\n ValidationResult(\n property_path,\n ValidationResultType.Warning,\n \"UNEXPECTED_PROPERTY\",\n name + \" contains unexpected property \" + str(key),\n None,\n key\n )\n )", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def validate_json(d):\n if d['type'] != 'FeatureCollection':\n raise Exception('JSON file is not a \\\"FeatureColleciton\\\".')\n\n if len(d['features']) != 1:\n raise Exception('JSON file should contain excactly one feature.')\n\n f = d['features'][0]\n\n if 'reference' not in f['properties'].keys():\n raise Exception('Feature property dictionary should contain '\n '\\\"referencey\\\" key.')\n\n if f['type'] != 'Feature':\n raise Exception('Feature type should be \\\"Feature\\\".')\n\n geom = f['geometry']\n\n if geom['type'] != 'MultiPolygon':\n raise Exception('Geometry type should be \\\"MultiPolygon\\\".')\n\n if 'coordinates' not in geom.keys():\n raise Exception('Geometry dictionary should contain \\\"coordinates\\\" '\n 'key.')\n\n polygons = geom['coordinates'][0]\n\n n_polygons = len(polygons)\n for i in range(n_polygons):\n p = polygons[i]\n n_points = len(p)\n if n_points % 2 == 0:\n raise Exception('Number of points in polyon must be odd.')\n\n if p[0] != p[-1]:\n raise Exception('First and last points in polygon must be '\n 'identical.')\n\n n_pairs = int((n_points - 1) / 2)\n for j in range(n_pairs):\n #------------------------------------------------------------------\n # Points are paired and in each pair the top is first, as in:\n #\n # _.-P1-._\n # P0' 'P2---P3\n # | \\\n # P7---P6----P5-------P4\n #\n # Pairs: P0-P7, P1-P6, P2-P5, P3-P4\n #------------------------------------------------------------------\n top_depth = p[j][2]\n bot_depth = p[-(j + 2)][2]\n if top_depth > bot_depth:\n raise Exception(\n 'Top points must be ordered before bottom points.')", "def _validate_parameter(value):\n if isinstance(value, (dict)):\n if any([not isinstance(key, string_types) for key in value.keys()]):\n raise TypeError(\"Invalid parameter. Dictionary keys must be strings.\")\n [_validate_parameter(item) for item in value.values()]\n elif isinstance(value, (list, tuple)):\n [_validate_parameter(item) for item in value]\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool))\n ):\n pass\n else:\n raise TypeError(\"Invalid parameter type. Got '%s'.\" % type(value))", "def __verify_values(self, tmpl_key, tmpl_value, parent):\n output = \"\"\n if tmpl_key not in parent:\n output = tmpl_value\n elif parent[tmpl_key] is None:\n output = tmpl_value\n else:\n if isinstance(parent[tmpl_key], list):\n for i in range(0, len(parent[tmpl_key])):\n for k, v in tmpl_value.items():\n parent[tmpl_key][i][k] = self.__verify_values(k, v, parent[tmpl_key][i])\n output = parent[tmpl_key]\n elif isinstance(tmpl_value, OrderedDict):\n for k, v in list(tmpl_value.items()):\n parent[tmpl_key][k] = self.__verify_values(k, v, parent[tmpl_key])\n output = parent[tmpl_key]\n else:\n output = parent[tmpl_key] if parent[tmpl_key].strip() != \"\" else tmpl_value\n return output", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")", "def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")", "def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(2, len(sub_ch))\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual('v1', c.get_content())\n else:\n self.assertEqual('v2', c.get_content())", "def _validate_nested_list_type(self, name, obj, nested_level, *args):\n if nested_level <= 1:\n self._validate_list_type(name, obj, *args)\n else:\n if obj is None:\n return\n if not isinstance(obj, list):\n raise TypeError(self.__class__.__name__ + '.' + name + ' contains value of type ' +\n type(obj).__name__ + ' where a list is expected')\n for sub_obj in obj:\n self._validate_nested_list_type(name, sub_obj, nested_level - 1, *args)", "def validate_optional_ref(dictionary, yaml_file):\n\n if 'ref' in dictionary:\n validate_type(dictionary['ref'], 'ref', str, 'str', yaml_file)\n validate_ref_type(dictionary, yaml_file)\n del dictionary['ref']", "def validate_multipart_records():\n\n def validate_multipart_relation(multipart, volumes):\n relations = multipart.relations.get().get('multipart_monograph', [])\n titles = [volume['title'] for volume in volumes if 'title' in volume]\n count = len(set(v['volume'] for v in volumes))\n if count != len(relations):\n click.echo(\n '[Multipart {}] Incorrect number of volumes: {} '\n '(expected {})'.format(multipart['pid'], len(relations), count)\n )\n for relation in relations:\n child = Document.get_record_by_pid(\n relation['pid'],\n pid_type=relation['pid_type']\n )\n if child['title'] not in titles:\n click.echo(\n '[Multipart {}] Title \"{}\" does not exist in '\n 'migration data'.format(\n multipart['pid'],\n child['title']\n )\n )\n\n search = SeriesSearch().filter(\n 'term',\n mode_of_issuance='MULTIPART_MONOGRAPH'\n )\n for multipart_hit in search.scan():\n # Check if any child is missing\n if 'volumes' in multipart_hit._migration:\n volumes = multipart_hit._migration.volumes\n multipart = Series.get_record_by_pid(multipart_hit.pid)\n validate_multipart_relation(multipart, volumes)\n\n click.echo('Multipart validation check done!')", "def test_input_loadjson(self, fname, groups, hosts):\n with open(fname,'r') as fd:\n fcon = json.loads(fd.read())\n inventory = Inventory()\n inventory.load_inventoryjson(fcon)\n assert inventory.count_groups() == len(groups)\n assert inventory.count_hosts() == len(hosts)", "def validateDictionaries(self):\n self.logger.info(\"Validating Dictionaries\")\n\n message = ''\n shader_dict = {}\n disp_dict = {}\n attr_dict = {}\n layers_dict = {}\n namespace_str = ''\n\n shader_attr = self.getAttr(\"shadersAssignation\")\n disp_attr = self.getAttr(\"displacementsAssignation\")\n attr_attr = self.getAttr(\"attributes\")\n layers_attr = self.getAttr(\"layersOverride\")\n namespace_attr = self.getAttr(\"shadersNamespace\")\n\n shaders = None\n disp = None\n attr = None\n layers = None\n namespace = None\n\n fail = False\n\n if shader_attr:\n try:\n shader_dict = json.loads(shader_attr)\n if shader_dict.has_key('shaders'):\n fail = True\n shaders = 'please remove the shaders key'\n except ValueError as e:\n shaders = e\n fail = True\n\n if disp_attr:\n try:\n disp_dict = json.loads(disp_attr)\n if disp_dict.has_key('displacement'):\n fail = True\n disp = 'please remove the displacement key'\n except ValueError as e:\n disp = e\n fail = True\n\n if attr_attr:\n try:\n attr_dict = json.loads(attr_attr)\n if attr_dict.has_key('attributes'):\n fail = True\n attr = 'please remove the attributes key'\n except ValueError as e:\n attr = e\n fail = True\n\n if layers_attr:\n try:\n layers_dict = json.loads(layers_attr)\n if layers_dict.has_key('layers'):\n fail = True\n layers = 'please remove the layers key'\n except ValueError as e:\n layers = e\n fail = True\n\n if namespace_attr:\n try:\n namespace_str = ast.literal_eval(namespace_attr)\n if type(namespace_attr) == dict:\n if namespace_attr.has_key('namespace'):\n fail = True\n namespace = 'please remove the namespace key'\n\n except ValueError as e:\n namespace = e\n fail = True\n\n if not fail:\n self.logger.info(\"Valid\")\n return True\n else:\n if shaders:\n self.logger.error(\"%s.shadersAssignation : %s\" % (self.data['shapeNode'], shaders))\n if disp:\n self.logger.error(\"%s.displacementsAssignation : %s\" % (self.data['shapeNode'], disp))\n if attr:\n self.logger.error(\"%s.attributes : %s\" % (self.data['shapeNode'], attr))\n if layers:\n self.logger.error(\"%s.layersOverride : %s\" % (self.data['shapeNode'], layers))\n if namespace:\n self.logger.error(\"%s.shadersNamespace : %s\" % (self.data['shapeNode'], namespace))\n self.logger.info(\"Invalid\")\n return False", "def validate(self, data):\n age = data.get(\"age\", None)\n age = age.split(\",\")\n size = data.get(\"size\", None)\n size = size.split(\",\")\n gender = data.get(\"gender\", None)\n gender = gender.split(\",\")\n for i in age:\n if i not in ['b', 'y', 'a', 's']:\n raise serializers.ValidationError(\n \"Age must be either 'b' for baby, 'y' for young,\"\n \" 'a' for adult, or 's' for senior. Can do multiple with\"\n \" commas, ex: a,y,e\")\n for i in size:\n if i not in ['s', 'm', 'l', 'xl']:\n raise serializers.ValidationError(\n \"Size must be either 's' for small, 'm' for medium, 'l' \"\n \"for large, or 'xl' for extra large. Can do multiple with\"\n \" commas, ex: s,l,xl\")\n for i in gender:\n if i not in ['m', 'f']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, or 'f' for female. Can\"\n \" have both using commas, ex: m,f\")\n return data", "def test(cls, pathHolder, parentCrawler):\n if not super(AsciiCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in ['json']", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(len(sub_ch), 2)\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual(c.get_content(), 'v1')\n else:\n self.assertEqual(c.get_content(), 'v2')", "def validate_payload(payload):\n\n if not isinstance(payload, dict):\n raise Exception(\"payload is a %s, not a dictionary\" % type(payload))\n\n if \"nmo\" not in payload:\n raise Exception(\"No nmo in payload\")\n\n if \"job\" not in payload[\"nmo\"]:\n raise Exception(\"No job in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n if \"task\" not in payload[\"nmo\"]:\n raise Exception(\"No task in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n try:\n isGroup = payload['nmo']['source']['misc']['isGroup']\n except:\n isGroup = False\n\n if \"jsonld\" not in payload and not isGroup:\n raise Exception(\"No jsonld in payload \\nPayload is:- %s\" % payload)", "def test_json_loads_array() -> None:\n assert json_loads_array('[{\"c\":1.2}]') == [{\"c\": 1.2}]\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'dict'>\"\n ):\n json_loads_array(\"{}\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'bool'>\"\n ):\n json_loads_array(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'NoneType'>\"\n ):\n json_loads_array(\"null\")", "def test_json_loads_array() -> None:\n assert json_loads_array('[{\"c\":1.2}]') == [{\"c\": 1.2}]\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'dict'>\"\n ):\n json_loads_array(\"{}\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'bool'>\"\n ):\n json_loads_array(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'NoneType'>\"\n ):\n json_loads_array(\"null\")", "def check_add_child_node(data):\n\n # check nodeDisplay\n \"\"\"\n\n :rtype :\n \"\"\"\n if 'nodeDisplay' not in data:\n raise ValueError(\"No nodeDisplay in given node.\")\n\n # check nodeDescription\n if 'nodeDescription' not in data:\n raise ValueError(\"No nodeDescription in given node.\")\n\n # check nodeTags\n if 'nodeTags' not in data:\n data[\"nodeTags\"] = []\n\n # check nodeParents\n if 'nodeParents' not in data or len(data[\"nodeParents\"]) == 0:\n raise ValueError(\"No nodeParents in given node.\")\n else:\n parent = data[\"nodeParents\"][0]\n if '_id' not in parent:\n raise ValueError(\"Malformed node parent array: lack of parent node id \\\"_id\\\"\")\n else:\n parent_node = Nodes().retrieveById(parent[\"_id\"])\n if parent_node.status_code == 404:\n raise ValueError(\n \"Parent node information does not exist in database: parent _id=%s\" % parent[\"_id\"])\n else:\n return parent_node", "def _check_nested(self, key, self_val, nested):\n nested_val = getattr(nested, key)\n assert self_val == nested_val, \\\n \"selector['{}']='{}' in '{}' doesn't match header['{}']='{}' in nested file '{}'.\".format(\n key, self_val, self.filename, key, nested_val, nested.filename)", "def test_cli_validate_with_invalid_check_relations():\n result = CliRunner().invoke(\n cli.cli,\n [\n \"validate\",\n \"tests/data/invalid-relations/datapackage.json\",\n \"--check-relations\",\n ],\n )\n assert result.exit_code == 0\n assert '\"valid\": true' in result.output\n assert \"Foreign key\" in result.output", "def check_data(dataname):\n oname = data_path(dataname, \"org\")\n data = load_data(data_path(dataname, \"py\"))\n root = load(oname)\n\n for (i, (node, kwds)) in enumerate(zip(root[1:], data)):\n for key in kwds:\n val = value_from_data_key(node, key)\n eq_(kwds[key], val,\n msg=('check value of {0}-th node of key \"{1}\" from \"{2}\".'\n '\\n\\nParsed:\\n{3}\\n\\nReal:\\n{4}'\n ).format(i, key, dataname, val, kwds[key]))\n\n eq_(root.env.filename, oname)", "def validate(self, obj):\n if 'tags' in obj and not isinstance(obj['tags'], list):\n raise aomi.exceptions.Validation('tags must be a list')\n\n if self.present:\n check_obj(self.required_fields, self.name(), obj)" ]
[ "0.6158009", "0.6144193", "0.5932691", "0.5760641", "0.56944185", "0.56927955", "0.5685668", "0.56744194", "0.56555223", "0.5653026", "0.5635682", "0.559688", "0.55957067", "0.5592181", "0.5564809", "0.5561077", "0.55481446", "0.55238324", "0.551551", "0.5502962", "0.5483416", "0.54796106", "0.5474002", "0.5427207", "0.5421077", "0.541044", "0.539442", "0.5382479", "0.5380251", "0.53705496", "0.53701514", "0.53581035", "0.5346261", "0.53433555", "0.5342292", "0.5329069", "0.53288", "0.53213733", "0.53213733", "0.5309387", "0.5295073", "0.52812946", "0.52781576", "0.5278148", "0.52745014", "0.5271311", "0.5258945", "0.52508134", "0.5241873", "0.5240219", "0.52186847", "0.5216055", "0.52154535", "0.5204639", "0.52042097", "0.5201654", "0.5196283", "0.5193557", "0.5187263", "0.5185595", "0.5181486", "0.51779383", "0.5174569", "0.5155624", "0.51496255", "0.5145836", "0.51436144", "0.5141012", "0.5128034", "0.51244444", "0.51167786", "0.5113234", "0.51042175", "0.5100454", "0.5099832", "0.50995183", "0.5097125", "0.50949615", "0.50843644", "0.50789547", "0.50784993", "0.50784993", "0.50763655", "0.50763124", "0.50749946", "0.5074138", "0.5073505", "0.5073447", "0.50662404", "0.5052796", "0.5052364", "0.50522834", "0.5044534", "0.50356895", "0.50356895", "0.50352925", "0.50314313", "0.50189215", "0.50017655", "0.49976388" ]
0.6725077
0
Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system.
def validate_credentials(self, *args, dotenv_path=None, **kwargs): if not hasattr(self, "credentials"): return set() # Load any variables from the .env file into the environment. dotenv_path = dotenv_path or os.path.join(".", ".env") load_dotenv(dotenv_path) for credential in self.credentials: if credential["name"] not in os.environ: raise exceptions.CredentialNotFound( f"Credential {credential['name']!r} missing from environment or .env file." ) return self.credentials
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_credentials(self):\n\n # There should be a client_id and client secret\n return \"client_id\" in self.credentials.keys() and \"client_secret\" in self.credentials.keys() \\\n and self.credentials[\"client_id\"] and self.credentials[\"client_secret\"]", "def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok", "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")", "def check_for_credential_file(self):\r\n if 'AWS_CREDENTIAL_FILE' in os.environ:\r\n path = os.environ['AWS_CREDENTIAL_FILE']\r\n path = os.path.expanduser(path)\r\n path = os.path.expandvars(path)\r\n if os.path.isfile(path):\r\n fp = open(path)\r\n lines = fp.readlines()\r\n fp.close()\r\n for line in lines:\r\n if line[0] != '#':\r\n if '=' in line:\r\n name, value = line.split('=', 1)\r\n if name.strip() == 'AWSAccessKeyId':\r\n if 'aws_access_key_id' not in self.args:\r\n value = value.strip()\r\n self.args['aws_access_key_id'] = value\r\n elif name.strip() == 'AWSSecretKey':\r\n if 'aws_secret_access_key' not in self.args:\r\n value = value.strip()\r\n self.args['aws_secret_access_key'] = value\r\n else:\r\n print 'Warning: unable to read AWS_CREDENTIAL_FILE'", "def test_missing_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with self.assertRaises(exceptions.CredentialNotFound):\n twine.validate_credentials()", "def test_validate_credentials(self):\n pass", "def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"", "def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def get_credentials():\n credential_dir = os.getcwd()\n credential_path = os.path.join(credential_dir,\n 'smarking_error_check.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def test_credentials(self):\r\n data = self._deep_clean('zekebarge@gmail.com')\r\n error = data.get(ERROR_CODE, None)\r\n if error in (1,2):\r\n raise InvalidCredentialsError(\"Credentials are invalid for user '{}'\".format(self._username))\r\n return True", "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials", "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials", "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or credentials.access_token_expired):\n return None\n return credentials", "def cfg_credentials(context):\n arguments = {\n '--config': context.config_file,\n 'authorize': False,\n 'account_summary': False\n }\n pychex_cli = PychexCli(arguments)\n pychex_cli.read_config()\n # Check that the values pulled from the read_config method match what we\n # know\n print(pychex_cli.username)\n assert pychex_cli.username == context.username\n assert pychex_cli.security_image_path == context.security_image_path\n assert pychex_cli.password == context.password\n # Check that the unencrypted values are not present\n with open(arguments['--config']) as cfg:\n cfg_txt = cfg.read()\n assert cfg_txt.find(context.username) == -1\n assert cfg_txt.find(context.security_image_path) == -1\n assert cfg_txt.find(context.password) == -1", "def __validate_google_credentials(self):\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n self.service = build('sheets', 'v4', credentials=creds)", "def check_credentials(self) -> None:\n # Checks the GitHub token is defined\n configuration.get_value(ConfigurationVariable.GIT_TOKEN)", "def check_credentials_validation(credentials):\n spec = {'_id': credentials['username'], 'password': credentials['password']}\n if not current_app.mongo.observer.users.find_one(spec):\n raise Unauthorized('invalid credentials')", "def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def verify_credentials(self):\n try:\n self.api.VerifyCredentials()\n logging.info('Successfully verified')\n return True\n except TwitterError as e:\n logging.error('Error verifying credentials: %s', e.message[0]['message'])\n return False", "def has_credentials(credentials_file=CREDENTIALS_FILE):\n return os.path.exists(credentials_file)", "def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials", "def validate_env(self) -> None:\n errors = []\n\n self.user_name = env.str('USER_NAME')\n if not self.user_name:\n errors.append('USER_NAME environment variable needs to be set to your MyQ user name')\n\n self.password = env.str('PASSWORD')\n if not self.password:\n errors.append('PASSWORD environment variable needs to be set to your MyQ password')\n\n self.left_door = env.int('EDGEWOOD', 0)\n self.right_door = 1 - self.left_door\n\n self.only_close = env.bool('ONLY_CLOSE', True)\n\n if errors:\n raise Exception(','.join(errors))", "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "def _make_sure_credentials_are_set(self):\n if self.backend_options:\n if not os.environ.get('APCA_API_KEY_ID') and \\\n self.backend_options['key_id']:\n os.environ['APCA_API_KEY_ID'] = self.backend_options['key_id']\n if not os.environ.get('APCA_API_SECRET_KEY') and \\\n self.backend_options['secret']:\n os.environ['APCA_API_SECRET_KEY'] = self.backend_options[\n 'secret']\n if not os.environ.get('APCA_API_BASE_URL') and \\\n self.backend_options['base_url']:\n os.environ['APCA_API_BASE_URL'] = self.backend_options[\n 'base_url']", "def resolve_credentials():\n path = os.getenv('GOOGLE_APPLICATION_CREDENTIALS', '')\n\n if os.path.exists(path):\n return True\n\n credentials = os.getenv('GOOGLE_SERVICE_KEY', None)\n if credentials:\n with open(path, 'w') as credentials_file:\n credentials_file.write(credentials)", "def test_no_credentials(self):\n twine = Twine(source=VALID_SCHEMA_TWINE)\n twine.validate_credentials()", "def authenticate(self):\r\n\r\n config_data = {}\r\n\r\n # Step 1: try getting username/password from environment\r\n config_data = self.read_config_environment(config_data)\r\n\r\n # Step 2: if credentials were not in env read in configuration file\r\n if self.CONFIG_NAME_USER not in config_data \\\r\n or self.CONFIG_NAME_KEY not in config_data:\r\n if os.path.exists(self.config):\r\n config_data = self.read_config_file(config_data)\r\n else:\r\n raise IOError('Could not find {}. Make sure it\\'s located in'\r\n ' {}. Or use the environment method.'.format(\r\n self.config_file, self.config_dir))\r\n\r\n # Step 3: load into configuration!\r\n self._load_config(config_data)", "def authenticate(self, credentials=None):\n if credentials is None: # pragma: no cover\n credentials['AWS_ACCESS_KEY_ID'] = getpass.getpass(prompt=\"Enter AWS_ACCESS_KEY_ID: \")\n credentials['AWS_SECRET_KEY'] = getpass.getpass(prompt=\"Enter AWS_SECRET_KEY: \")\n\n self._credentials = credentials", "def command_check_credentials():\n \n # now calling STS service with the credentials retrieved for verification\n if not aws.check_credentials():\n print(\"credential check failed. exiting program with exit code 1\")\n sys.exit(1)", "def check_credentials(self, cli_credentials, default_prompt, enable_prompt, logger):\n raise NotImplementedError(\"Class {} must implement method 'check_credentials'\".format(type(self)))", "def credentials_given(self):\n return self.key and self.secret", "def config_validate(ctx, **kwargs):\n # Validates pf9-express config file and obtains Auth Token\n #Load Active Config into ctx\n GetConfig(ctx).GetActiveConfig()\n #Get Token\n token = GetToken().get_token_v3(\n ctx.params[\"du_url\"],\n ctx.params[\"du_username\"],\n ctx.params[\"du_password\"],\n ctx.params[\"du_tenant\"] )\n if token is not None:\n click.echo('Config Validated!')\n click.echo('Token: %s' % token)\n else:\n click.echo('Config Validation Failed!')", "def validate(self, credentials):\n user = authenticate(**credentials)\n if user and user.is_active:\n return user\n raise serializers.ValidationError('Incorrect Credentials')", "def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials", "def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials", "def get_credentials():\n credential_dir = os.path.realpath('.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path) # stores the users credentials --> TODO: put in database\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n\n credentials = tools.run_flow(flow, store, flags)\n\n print('Storing credentials to ' + credential_path)\n return credentials", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def check_credentials_typo(credentials):\n regex_username = r'^[\\w\\.\\-]{2,}$'\n regex_password = r'[^.]{4,10}$'\n\n if not match(regex_username, credentials['username']):\n raise ValueError('invalid username typo')\n\n if not match(regex_password, credentials['password']):\n raise ValueError('invalid password typo')", "def credentials(self):\n return True", "def credentials_work(self):\n\n good = True\n try:\n self.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def validate_and_init() -> bool:\n env_vars_absent = [\n env\n for env in REQUIRED_ENVS\n if env not in os.environ or len(os.environ[env]) == 0\n ]\n if env_vars_absent:\n print(f\"Please define {env_vars_absent} in your github secrets. Aborting...\")\n return False\n\n if not (\n ENV_VAR_STATS_TYPE in os.environ\n and len(os.environ[ENV_VAR_STATS_TYPE]) > 0\n and os.environ[ENV_VAR_STATS_TYPE] in ALLOWED_STATS_TYPES\n ):\n print(f\"Using default stats type: {DEFAULT_STATS_TYPE}\")\n os.environ[ENV_VAR_STATS_TYPE] = DEFAULT_STATS_TYPE\n\n return True", "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")", "def verify_creds(self):\n if getattr(self, \"_verified\", None):\n return\n\n if self.assume_role is not NotSpecified:\n self.assume()\n self._verified = True\n return\n\n log.info(\"Verifying amazon credentials\")\n try:\n self.session = boto3.session.Session(region_name=self.region)\n amazon_account_id = self.session.client('sts').get_caller_identity().get('Account')\n if int(self.account_id) != int(amazon_account_id):\n raise BespinError(\"Please use credentials for the right account\", expect=self.account_id, got=amazon_account_id)\n self._verified = True\n except botocore.exceptions.NoCredentialsError:\n raise BespinError(\"Export AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY before running this script (your aws credentials)\")\n except botocore.exceptions.ClientError as error:\n raise BespinError(\"Couldn't determine what account your credentials are from\", error=error.message)\n\n if self.session is None or self.session.region_name != self.region:\n raise ProgrammerError(\"botocore.session created in incorrect region\")", "def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials", "def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False", "def load_credentials(secrets: Secrets = None): # noqa: E501\n secrets = secrets or {}\n service_account_file = secrets.get(\"service_account_file\")\n service_account_info = secrets.get(\"service_account_info\")\n\n if not service_account_file:\n google_app_creds = os.getenv(\n \"GOOGLE_APPLICATION_CREDENTIALS\",\n os.getenv(\"GCP_APPLICATION_CREDENTIALS\"),\n )\n if google_app_creds:\n service_account_file = google_app_creds\n\n credentials = None\n if service_account_file:\n service_account_file = os.path.expanduser(service_account_file)\n if not os.path.exists(service_account_file):\n raise FailedActivity(\n \"GCP account settings not found at {}\".format(\n service_account_file\n )\n )\n\n logger.debug(\n \"Using GCP credentials from file: {}\".format(service_account_file)\n )\n credentials = Credentials.from_service_account_file(\n service_account_file\n )\n elif service_account_info and isinstance(service_account_info, dict):\n logger.debug(\"Using GCP credentials embedded into secrets\")\n credentials = Credentials.from_service_account_info(\n service_account_info\n )\n else:\n raise FailedActivity(\n \"missing GCP credentials settings in secrets of this activity\"\n )\n\n if credentials is not None and credentials.expired:\n logger.debug(\"GCP credentials need to be refreshed as they expired\")\n credentials.refresh(httplib2.Http())\n\n if not credentials:\n raise FailedActivity(\n \"missing a service account to authenticate with the \"\n \"Google Cloud Platform\"\n )\n\n return credentials", "def validate_credentials(self, data):\n try:\n boolean_param_list = []\n get_service_data = app.config.get('JWT_CONFIG').get('CREDENTIAL')\n token_identity_param = app.config.get('JWT_CONFIG').get('TOKEN_IDENTITY_PARAM')\n expires_delta = app.config.get('JWT_CONFIG').get('TOKEN_EXPIRY')\n expires_delta = eval(expires_delta) if isinstance(expires_delta, str) else expires_delta\n credentials = data.get('credentials')\n identity_credentials_keys = list(get_service_data.keys())\n for key in identity_credentials_keys:\n if get_service_data[key] != credentials[key]:\n boolean_param_list.append(False)\n else:\n boolean_param_list.append(True)\n\n if False in boolean_param_list:\n return {'msg': \"Incorrect Credentials\"}, 401\n else:\n access_token = self.auth_token_generate(\n identity_param_val=credentials[token_identity_param], expires_delta=expires_delta)\n return {'access_token': access_token}, 200\n except Exception as e:\n print(e)\n return {'msg': \"Incorrect Credentials\"}, 401", "def get_credentials(self):\n #\n # Why is this not read from the yaml file?\n path = Path(path_expand(self.credential_file)).resolve()\n if not os.path.exists(path):\n os.makedirs(path)\n\n credentials_path = (path / 'google-drive-credentials.json').resolve()\n print(credentials_path)\n\n store = Storage(credentials_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(self.client_secret_file,\n self.scopes)\n flow.user_agent = self.application_name\n #\n # SHOUDL THE FLAGS NOT BE SET IN THE YAML FILE OR DOCOPTS OFTHE COMMAND?\n #\n if self.flags:\n credentials = tools.run_flow(flow, store, self.flags)\n\n return credentials", "def validate_credential_flags(host, username, password, client_ca, client_ca_password,\n client_pk, client_pk_password, credentials_required: bool = True):\n using_cert_auth = not (client_ca is None and\n client_ca_password is None and\n client_pk is None and\n client_pk_password is None)\n\n if using_cert_auth:\n return validate_certificate_flags(\n host,\n username,\n password,\n client_ca,\n client_ca_password,\n client_pk,\n client_pk_password)\n\n if (username is None and password is None):\n if credentials_required is False:\n return None\n\n return [\"cluster credentials required, expected --username/--password or --client-cert/--client-key\"]\n\n if (username is None or password is None):\n return [\"the --username/--password flags must be supplied together\"]\n\n return None", "def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:\n return pulumi.get(self, \"credentials\")", "def authenticate_credentials(self, **credentials):\n return None", "def validate(self):\n if not self.credentials['password']:\n raise ValueError(\n 'An empty z/VM guest password is trying to be used. '\n 'Please set the correct password.')", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(env=\"development\") -> dict:\n load_dotenv()\n credentials = {}\n\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"DEV_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"DEV_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"DEV_AWS_REGION\")\n\n if env == \"production\":\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"PROD_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"PROD_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"PROD_AWS_REGION\")\n\n return credentials", "def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def check_credendtials(account):\n return Credentials.if_credential_exist(account)", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _is_valid(self):\n # TODO: Query Google to validate credentials\n return True", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def test_getcredentials_failed_netrc(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert not server._username\n assert not server._password", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'thejam_calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _load_credentials(creds_file=None):\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds", "def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials", "def _parse_creds(filename='.divvy'):\n\n creds = None\n\n try:\n file_path = os.path.expanduser('~') + '/' + filename\n with open(file_path, 'r') as credfile:\n for line in credfile:\n if line.strip()[0] == '#':\n pass\n elif ':' in line:\n username = line.strip().split(':')[0]\n password = line.strip().split(':')[1]\n creds = username, password\n break\n return creds\n\n # Fail silently as most people will not have creds file\n except IOError:\n return None\n\n except (UnboundLocalError, IndexError):\n print('Attempted to use a credentials dotfile ({}) but '\n 'it is either empty or malformed. Credentials should be in '\n 'the form <USERNAME>:<API_TOKEN>.'.format(file_path))\n raise", "def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials() -> client.Credentials:\n\n credential_path = os.path.join(HOME_DIR, \"google-credentials.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(os.path.join(HOME_DIR, CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n # This attempts to open an authorization page in the default web browser, and asks the user\n # to grant the bot access to their data. If the user grants permission, the run_flow()\n # function returns new credentials.\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print(\"Storing credentials to \" + credential_path)", "def validate_account_credentials(self, deployment=None):\n if deployment is None:\n deployment = {}\n boto_args = {'region_name': self.env_vars['AWS_DEFAULT_REGION']}\n for i in ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']:\n if self.env_vars.get(i.upper()):\n boto_args[i] = self.env_vars[i.upper()]\n if isinstance(deployment.get('account-id'), (int, six.string_types)):\n account_id = str(deployment['account-id'])\n elif deployment.get('account-id', {}).get(self.environment_name):\n account_id = str(deployment['account-id'][self.environment_name])\n else:\n account_id = None\n if account_id:\n self.validate_account_id(boto3.client('sts', **boto_args),\n account_id)\n if isinstance(deployment.get('account-alias'), six.string_types):\n account_alias = deployment['account-alias']\n elif deployment.get('account-alias', {}).get(self.environment_name):\n account_alias = deployment['account-alias'][self.environment_name]\n else:\n account_alias = None\n if account_alias:\n self.validate_account_alias(boto3.client('iam', **boto_args),\n account_alias)", "def credentials_work(self):\n good = True\n try:\n self.session.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def get_credentials():\n credential_dir = os.path.dirname(os.path.realpath(CLIENT_SECRET_FILE))\n credential_path = os.path.join(\n credential_dir, 'sheets.googleapis.com-endosys-events.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def app_env():\n cred = tk.config_from_environment()\n if any(i is None for i in cred):\n skip_or_fail(KeyError, \"No application credentials!\")\n\n return cred", "def get_credentials_from_file(credentials_file):\n # Change the scope username and password variables to global\n global username\n global password\n try:\n # Open and reads the credentials.pwd file and save the lines in the username and password\n with open(os.path.dirname(__file__) + credentials_file) as credential_file:\n credentials = credential_file.readlines()\n username = credentials[0].strip()\n password = credentials[1].strip()\n\n credential_file.close()\n except FileNotFoundError as error:\n print(error)\n sys.exit(1)", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'client_secret_OCR.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n print(\"Current folder: \" + os.getcwd())\n flow = client.flow_from_clientsecrets(\n \"../../\" + CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def test_credential_default_values():\n creds = Credentials()\n assert creds.url is None\n assert creds.token is None\n assert creds.org_key is None\n assert creds.ssl_verify\n assert creds.ssl_verify_hostname\n assert creds.ssl_cert_file is None\n assert not creds.ssl_force_tls_1_2\n assert creds.proxy is None\n assert not creds.ignore_system_proxy\n assert creds.integration is None\n with pytest.raises(AttributeError):\n assert creds.notexist is None", "def _check_env():\n\tif os.getenv(_DATA_DIRECTORY_ENV_KEY) is None:\n\t\texit_everything(ERROR_DATA_DIRECTORY_NOT_SET, f'{_DATA_DIRECTORY_ENV_KEY} env var not set')\n\t\n\tif os.getenv(_FRONTEND_URL_ENV_KEY) is None:\n\t\texit_everything(ERROR_FRONTEND_NOT_SET, f'{_FRONTEND_URL_ENV_KEY} env var not set')", "def test_getcredentials_from_netrc(netrc):\n netrc.return_value.authenticators.return_value = (USERNAME, \"\", PASSWORD)\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def _load_credentials(self, datasource):\n\n self.credentials = datasource.credentials # Access the credentials\n\n # If there are credentials then make the api call\n if self.credentials:\n self.credentials = yaml.load(self.credentials)\n if self._validate_credentials():\n return self.credentials[\"client_id\"], self.credentials[\"client_secret\"]\n\n raise InvalidOrMissingCredentials(\"client_id and client_secret are missing or invalid\")", "def get_credentials(args, my_dirname):\n\n credential_dir = os.path.join(my_dirname, '.credentials')\n if not os.path.exists(credential_dir):\n os.mkdir(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-cotus-checker.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n try:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, args)\n print('Storing credentials to ' + credential_path)\n except (oauth2client.clientsecrets.InvalidClientSecretsError, json.decoder.JSONDecodeError):\n pass\n return credentials", "def _authenticate_from_file(self, credentials):\n self._gauth.LoadCredentialsFile(credentials)", "def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }", "def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self, **kwargs):\n creds_file = os.path.join(kwargs['user_dir'], 'credentials.json')\n\n # Getting credentials from Storage\n store = file.Storage(creds_file)\n creds = store.get()\n\n # Validating or refreshing credentials, if necessary\n if creds is None or creds.invalid:\n flow = client.flow_from_clientsecrets(self.client_secret_file,\n self.scopes)\n creds = tools.run_flow(flow, store)\n elif creds.access_token_expired:\n creds.refresh(httplib2.Http())\n else:\n pass\n\n return creds", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def load_credentials(cred=\"credentials_prod.json\"):\n if isinstance(cred, dict):\n # Easy way to handle if a function was handed valid credentials\n pass\n elif isinstance(cred, str):\n with open(cred, 'r') as f:\n cred = json.load(f)\n else:\n raise ValueError(\"Invalid input cred={0}\".format(cred))\n\n # Check for correct entries\n cred_keys = [ \"access_token\", \"expires_in\", \"refresh_token\", \"scope\", \"token_type\"]\n for k in cred_keys:\n if k not in cred:\n raise ValueError(\"Credentials missing key {0}\".format(k))\n return cred", "def aws_credentials() -> None:\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def get_credentials():\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n flags = None\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(GoogleGsuiteAPI.CLIENT_SECRET_FILE, GoogleGsuiteAPI.SCOPES)\n flow.user_agent = GoogleGsuiteAPI.APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _credentials_are_valid(self, username, password):\n LDAP_SERVER = 'ldap://xxx.xxx.xxx' # EDIT THIS\n LDAP_USERNAME = '%s@xxx.com' % username # EDIT THIS\n LDAP_PASSWORD = password\n\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n # perform a synchronous bind\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)\n except ldap.INVALID_CREDENTIALS:\n ldap_client.unbind()\n # Wrong username or password\n return False\n except ldap.SERVER_DOWN:\n # AD server not available\n return False\n # all is well\n ldap_client.unbind()\n # Login successful\n return True" ]
[ "0.661287", "0.64161855", "0.6251266", "0.61858857", "0.61782354", "0.6148377", "0.61406416", "0.6052758", "0.6034112", "0.6033556", "0.60107875", "0.60107875", "0.60083914", "0.6002498", "0.5968834", "0.59106755", "0.5909878", "0.5885369", "0.585459", "0.5849183", "0.58435297", "0.57935625", "0.57888234", "0.5787467", "0.5766613", "0.575164", "0.57392514", "0.571363", "0.56959903", "0.5687062", "0.56752586", "0.56717896", "0.56611705", "0.563774", "0.56363904", "0.5631642", "0.561847", "0.56122315", "0.56055987", "0.55850697", "0.5576465", "0.5547414", "0.55447406", "0.55410403", "0.5534489", "0.5523607", "0.55219984", "0.55194706", "0.5513108", "0.5502152", "0.5501527", "0.54902697", "0.5490065", "0.5483884", "0.5469192", "0.5463268", "0.5462135", "0.5455258", "0.5453272", "0.5449717", "0.5449717", "0.54451805", "0.5442831", "0.54338646", "0.54320973", "0.54207855", "0.54065365", "0.53941214", "0.5392162", "0.5391208", "0.5379208", "0.53727806", "0.53727806", "0.53727806", "0.53727806", "0.53727806", "0.5365985", "0.53596056", "0.53588593", "0.53464085", "0.5338455", "0.53338796", "0.53310233", "0.5318035", "0.53095865", "0.530856", "0.5301657", "0.52998954", "0.5288449", "0.5273099", "0.52655804", "0.52543694", "0.52512974", "0.5249519", "0.52464753", "0.5245665", "0.5240109", "0.5236275", "0.5235913", "0.5229351" ]
0.7446492
0
Validate that the configuration values, passed as either a file or a json string, are correct.
def validate_configuration_values(self, source, **kwargs): return self._validate_values("configuration_values", source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, config_json):\n pass", "def validate_config(\n json_schema: JsonDict, config: Any, config_path: StrSequence\n) -> None:\n try:\n jsonschema.validate(config, json_schema)\n except jsonschema.ValidationError as e:\n raise json_error_to_config_error(e, config_path)", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def validate_config(self, config: Dict) -> bool:\n raise NotImplementedError", "def test_invalid_configuration(self):\n\n config = copy.deepcopy(self.configuration)\n config['version'] = 'BAD'\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : config\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_tap_config_json_validation_retry_with_invalid_config_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.invalid_json_file,\n properties=self.valid_json_file,\n state=self.valid_json_file)", "def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")", "def validate_config(self):\n pass", "def validate_config(self):\n pass", "def _validate_config(self):\n pass", "def validate_config(config: Dict) -> None:\n\n # For validating with pydantic\n config_flattened = format_config_for_validation(config)\n user_defined_sets = get_all_sets(config)\n\n errors = []\n for input_data in config_flattened:\n try:\n if \"type\" not in input_data:\n UserDefinedValue(**input_data)\n elif input_data[\"type\"] == \"param\":\n input_data[\"defined_sets\"] = user_defined_sets\n UserDefinedParameter(**input_data)\n elif input_data[\"type\"] == \"result\":\n input_data[\"defined_sets\"] = user_defined_sets\n UserDefinedResult(**input_data)\n elif input_data[\"type\"] == \"set\":\n UserDefinedSet(**input_data)\n else:\n # have pydantic raise an error\n UserDefinedValue(\n name=input_data[\"name\"],\n type=input_data[\"type\"],\n dtype=input_data[\"dtype\"],\n )\n except ValidationError as ex:\n errors_caught = [x[\"msg\"] for x in ex.errors()]\n errors.extend(errors_caught)\n\n if errors:\n error_message = \"\\n\".join(errors)\n raise OtooleConfigFileError(message=f\"\\n{error_message}\")", "def is_valid_task_config(config):\n try:\n config_json = json.loads(config)\n except ValueError:\n return False\n return validate_config(config_json)", "def validate_config_dict(self):\n config_options = [\"pipeline_name\",\n \"num_processors\",\n \"num_sessions_at_once\",\n \"available_memory\",\n \"cluster_system\",\n \"output_directory\",\n \"working_directory\",\n \"template_head_for_anat\",\n \"exclude_zeros\",\n \"start_idx\",\n \"stop_idx\",\n \"write_report\",\n \"write_graph\",\n \"write_all_outputs\",\n \"upload_to_s3\",\n \"bucket_prefix\",\n \"bucket_out_prefix\",\n \"local_prefix\",\n \"bucket_name\",\n \"creds_path\"]\n invalid = []\n for param in self._config.keys():\n if param not in config_options:\n invalid.append(param)\n if len(invalid) > 0:\n err = \"\\n[!] The following parameters in your configuration \" \\\n \"file are not recognized. Double-check the pipeline \" \\\n \"configuration template.\\n\"\n err += \"\\n\".join([x for x in invalid])\n raise Exception(err)\n else:\n return 0", "def settings_validate(ctx):\n path = ctx.obj['load_path']\n if not path:\n _raise_settings_not_found()\n with open(path) as handle:\n config_dict = json.load(handle)\n try:\n config.validate_config(config_dict)\n except exceptions.ConfigValidationError as err:\n raise click.ClickException(\n '{} is invalid: '.format(path) + err.message\n ) from err", "def _check_loaded_conf(self, config_dict):\n expected_fields = set(self.data_types.keys())\n\n expected_missing = []\n for name in expected_fields:\n if name not in self.defaults and name not in config_dict:\n expected_missing.append(name)\n if expected_missing:\n raise Exception(\"Missing mandatory fileds: {}\"\n .format(\", \".join(expected_missing)))\n\n unexpected = []\n for name in config_dict:\n if name not in expected_fields:\n unexpected.append(name)\n if unexpected:\n raise Exception(\"Unexpected fields: {}\"\n .format(\", \".join(unexpected)))\n\n \"\"\" Check data types method \"\"\"\n for name in config_dict:\n if not isinstance(config_dict[name], self.data_types[name]):\n raise Exception(\"Bad data type: param {}, expected {}\"\n . format(name, self.data_types[name]))\n\n self.inmodule.validcheck(config_dict)", "def validate_config(config):\n # check if paths are valid\n check_paths = {\n 'data_path': r'data$',\n 'master_list_path': r'master_list\\.csv$',\n 'duplicate_list_path': r'duplicate_list\\.csv$',\n 'log_path': r'data[\\\\\\/]jobfunnel.log$',\n 'filter_list_path': r'data[\\\\\\/]filter_list\\.json$',\n }\n\n for path, pattern in check_paths.items():\n if not re.search(pattern, config[path]):\n raise ConfigError(path)\n # check if the provider list only consists of supported providers\n if not set(config['providers']).issubset(PROVIDERS):\n raise ConfigError('providers')\n\n # check validity of region settings\n validate_region(config['search_terms']['region'])\n\n # check validity of delay settings\n validate_delay(config['delay_config'])\n\n # check the validity of max_listing_days settings\n if(config['max_listing_days'] is not None and config['max_listing_days'] < 0):\n raise ConfigError('max_listing_days')", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def validate_json(self):\n pass", "def validate(self, config=None):\n for k, v in config.items():\n assert (\n k in self.arguments\n ), f\"\"\"{k} is not a valid argument. Support arguments are {self.format_arguments()}.\"\"\"\n\n if self.arguments[k].type is not None:\n try:\n self.arguments[k].val = self.arguments[k].type(v)\n except ValueError:\n raise ValueError(f\"{k} is not a valid {self.arguments[k].type}.\")\n\n if self.arguments[k].choices is not None:\n assert (\n v in self.arguments[k].choices\n ), f\"\"\"{k} must be one of {self.arguments[k].choices}.\"\"\"\n\n return config", "def test_tap_config_json_validation_retry_with_invalid_properties_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.valid_json_file,\n properties=self.invalid_json_file,\n state=self.valid_json_file)", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def __check_config(config):\n\n for key, value in defaults.CONFIG_REQUIRED_ENTRIES.items():\n if (key not in config.keys()) or (config[key] == \"\" and not value):\n raise ValueError(\"configuration file is missing required key \" + key +\n \" or invalid value was provided\")", "def test_tap_config_json_raise_exception_on_invalid_content_for_state_file(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='state',\n invalid_file_contents=(' ', 'foo', '{\"foo\": 1')\n )", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def test_tap_config_json_validation_retry_with_invalid_state_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.valid_json_file,\n properties=self.valid_json_file,\n state=self.invalid_json_file)", "def validate_config(config):\n pywikibot.log('Config:')\n for key, value in config.items():\n pywikibot.log('-{} = {}'.format(key, value))\n if key in ('ISBN', 'PMID', 'RFC', 'summary'):\n if not isinstance(value, str):\n return False\n config[key] = value.strip() or None\n else:\n return False\n return True", "def validate_config(config: dict):\n if config.get(\"defaults\") is not None:\n if type(config[\"defaults\"]) is not str:\n raise ValueError(\n \"Defaults in the CLI may only be specified using a string. \"\n \"The string used may specify either a path to a configuration \"\n \"yaml or one of the named default configurations.\"\n )\n\n if config[\"defaults\"] in SEASON_CONFIGS.keys():\n return\n else:\n raise ValueError(\"Default configuration string not recognized.\")\n\n freq_params = config.get(\"freq\", {})\n time_params = config.get(\"time\", {})\n array_params = config.get(\"telescope\", {}).get(\"array_layout\", {})\n if {} in (freq_params, time_params, array_params):\n raise ValueError(\"Insufficient information for initializing simulation.\")\n\n freqs_ok = _validate_freq_params(freq_params)\n times_ok = _validate_time_params(time_params)\n array_ok = _validate_array_params(array_params)\n if not all([freqs_ok, times_ok, array_ok]):\n raise ValueError(\"Insufficient information for initializing simulation.\")", "def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")", "def config_bonus_validator(config: Dict[str, Any]) -> None:\n\n if len(config['infiles']) != 2:\n abort(\"Error: Two file names must be provided, what was found: %s\" % config['infiles'])\n elif not exists(config['infiles'][0]):\n abort(\"Error: The first file does not exist: %s\" % config['infiles'][0])\n elif not exists(config['infiles'][1]):\n abort(\"Error: The second file does not exist: %s\" % config['infiles'][1])\n\n if config['compare_cols'] and config['ignore_cols']:\n abort(\"Error: Provide only one of compare_cols or ignore_cols, not both\")\n\n if len(list(set(config['ignore_cols']) & set(config['key_cols']))) > 0:\n config['ignore_cols'] = [x for x in config['ignore_cols'] if x not in config['key_cols']]\n print(\"Warning: some key-cols removed from ignore-cols\")\n print(\"Revised config['ignore_cols']: %s\" % config.get('ignore_cols', None))\n elif len(list(set(config['compare_cols']) & set(config['key_cols']))) > 0:\n config['compare_cols'] = [x for x in config['compare_cols'] if x not in config['key_cols']]\n print(\"Warning: some key-cols removed from compare-cols\")\n print(\"Revised config['compare_cols']: %s\" % config.get('compare_cols', None))\n\n for kv_pair in config['variables']:\n if ':' not in kv_pair:\n abort('Invalid variable: must be name:value. Was: %s' % kv_pair)\n\n if 'assignments' in config:\n for assign in config['assignments']:\n if isinstance(assign['src_field'], list):\n abort('Assignment src_field must be a string (refers to col_name) '\n 'or an integer - it is a list')\n if isinstance(assign['dest_field'], list):\n abort('Assignment dest_field must be a string (refers to col_name)'\n 'or an integer - it is a list')", "def _validate_config(self, conf: Dict[str, Any]) -> Dict[str, Any]:\n try:\n validate(conf, constant.CONF_SCHEMA, Draft4Validator)\n return conf\n except ValidationError as exception:\n logger.critical(\n 'Invalid configuration. See config.json.example. Reason: %s',\n exception\n )\n raise ValidationError(\n best_match(Draft4Validator(constant.CONF_SCHEMA).iter_errors(conf)).message\n )", "def _check_scenario_sections_valid(self):\n # 0. check correct number of keys\n assert len(self.yaml_dict) >= len(VALID_CONFIG_KEYS), \\\n (f\"Too few config file keys: {len(self.yaml_dict)} \"\n f\"< {len(VALID_CONFIG_KEYS)}\")\n\n # 1. check keys are valid and values are correct type\n for k, v in self.yaml_dict.items():\n assert k in VALID_CONFIG_KEYS or k in OPTIONAL_CONFIG_KEYS, \\\n f\"{k} not a valid config file key\"\n\n if k in VALID_CONFIG_KEYS:\n expected_type = VALID_CONFIG_KEYS[k]\n else:\n expected_type = OPTIONAL_CONFIG_KEYS[k]\n\n assert isinstance(v, expected_type), \\\n (f\"{v} invalid type for config file key '{k}': {type(v)}\"\n f\" != {expected_type}\")", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def test_file_data_arguments():\n filename = 'wso_args.json'\n assert AUTH.check_file_exists(filename) is True\n\n assert AUTH.verify_config(filename, 'authorization',\n AUTH.encode(RANDOM_USERNAME,\n RANDOM_PASSWORD)) is True\n assert AUTH.verify_config(filename, 'url', RANDOM_URL) is True\n assert AUTH.verify_config(filename, 'aw-tenant-code',\n RANDOM_TENANTCODE) is True", "def validate(config):\n runner = ScenarioRunner._get_cls(config.get(\"type\", \"continuous\"))\n jsonschema.validate(config, runner.CONFIG_SCHEMA)", "def _validate_config(dataset_cfg: DictConfig):\n if dataset_cfg.get('hf_name') is not None:\n # Using the HuggingFace dataset codepath\n illegal_keys = ['local', 'remote']\n discovered_illegal_keys = []\n for key in illegal_keys:\n if dataset_cfg.get(key) is not None:\n discovered_illegal_keys.append('`' + key + '`')\n if discovered_illegal_keys:\n raise ValueError(\n 'The dataset config sets a value for `hf_name` as well as the ' +\\\n f'following keys: {\", \".join(discovered_illegal_keys)}.\\n' +\\\n 'Those keys are used when building from a streaming dataset, but ' +\\\n 'setting `hf_name` instructs the dataset to build from a HuggingFace dataset.'\n )\n elif dataset_cfg.get('remote') is not None:\n # Using the streaming dataset codepath\n illegal_keys = ['hf_name', 'hf_kwargs', 'preprocessing_fn']\n discovered_illegal_keys = []\n for key in illegal_keys:\n if dataset_cfg.get(key) is not None:\n discovered_illegal_keys.append('`' + key + '`')\n if discovered_illegal_keys:\n raise ValueError(\n 'The dataset config sets a value for `remote` as well as the ' +\\\n f'following keys: {\", \".join(discovered_illegal_keys)}.\\n' +\\\n 'Those keys are used when building from a HuggingFace dataset, but ' +\\\n 'setting `remote` instructs the dataset to build from a streaming dataset.'\n )\n if dataset_cfg.get('local') is None:\n raise ValueError(\n 'Using a streaming dataset requires setting both `remote` and `local`, ' +\\\n 'but dataset.local is None.'\n )\n else:\n raise ValueError(\n 'In the dataset config, you must set either `hf_name` to use a ' +\\\n 'HuggingFace dataset or set `remote` to use a streaming ' +\\\n 'dataset, but both were None.'\n )", "def test_tap_config_raise_exception_if_invalid_config_yet_after_retries(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='config',\n invalid_file_contents=('', ' ', 'foo', '{\"foo\": 1')\n )", "def get_valid_config_data():\n\t# Valid JSON config for testing\n\tJSON_config = '''{\n\t\t\"1\": {\n\t\t\t\"type\": \"custom\",\n\t\t\t\"faces\": \"right\"\n\t\t},\n\t\t\"3\": {\n\t\t\t\"type\": \"custom2\"\n\t\t},\n\t\t\"5\": {\n\t\t\t\"x\": 32,\n\t\t\t\"y\": 128\n\t\t},\n\t\t\"7\": {\n\t\t\t\"is_collidable\": false\n\t\t}\n\t}'''\n\n\t# Expected values for the parsed tileset config\n\texpected_config = {\n\t\t1: {\n\t\t\t'type': 'custom',\n\t\t\t'faces': 'right',\n\t\t},\n\t\t2: {},\n\t\t3: {\n\t\t\t'type': 'custom2',\n\t\t},\n\t\t4: {},\n\t\t5: {\n\t\t\t'x': 32,\n\t\t\t'y': 128,\n\t\t},\n\t\t6: {},\n\t\t7: {\n\t\t\t'is_collidable': False,\n\t\t},\n\t\t8: {},\n\t}\n\n\treturn {\n\t\t'JSON': JSON_config,\n\t\t'expected': expected_config\n\t}", "def validate_settings(_cfg, _ctx):\n pass", "def test_validate_config_invalid_config(self):\n\n sample_config = {\n 'syncs': [\n {\n 'name': 'all'\n }\n ]\n }\n\n result = syncme.validate_config(sample_config)\n # FIXME: it's better to raise exception when something goes wrong\n self.assertFalse(result, 'syncs with name \"all\" are not allowed')\n\n sample_config = {\n 'hosts': [\n {\n # global host without address field is invalid\n 'name': 'global_host'\n }\n ]\n }\n is_valid = syncme.validate_config(sample_config)\n self.assertFalse(is_valid)\n\n sample_config = {\n 'hosts': [\n {\n # a global host at least need to define a name and address\n 'name': 'global_host',\n 'address': 'example.com'\n }\n ]\n }\n is_valid = syncme.validate_config(sample_config)\n self.assertTrue(is_valid)\n\n sample_config = {\n 'hosts': [\n {\n # global host with paths field is invalid\n 'name': 'global_host',\n 'address': 'example.com',\n 'paths': [\n '/some/path',\n ]\n }\n ]\n }\n is_valid = syncme.validate_config(sample_config)\n self.assertFalse(is_valid)\n\n sample_config = {\n 'hosts': [\n {\n 'name': 'global_host',\n 'address': 'example.com',\n 'user': 'user1',\n 'password': '123'\n }\n ],\n 'syncs': [\n {\n # sync without a name is invalid\n 'paths': [\n '/some/path',\n '/another/path'\n ]\n }\n ]\n }\n is_valid = syncme.validate_config(sample_config)\n self.assertFalse(is_valid)\n\n # host is defined without address\n sample_config = {\n 'hosts': [\n {\n 'name': 'backup_server',\n }\n ],\n 'syncs': [\n {\n 'name': 'backups',\n 'paths': [\n '/some/path',\n '/another/path'\n ],\n 'hosts': [\n {\n 'name': 'backup_server',\n }\n ]\n\n }\n ]\n }\n is_valid = syncme.validate_config(sample_config)\n self.assertFalse(is_valid)", "def valid_cfg(cfg):\n\t\tif not isinstance(cfg, dict):\n\t\t\traise TypeError('Config should be a python dictionary')\n\t\treturn cfg", "def _validate_config(self):\n # Simulation ID\n empty_string_check(self._config_dict['@id'])\n \n # Output\n empty_string_check(self._config_dict['output']['@baseDirectory'])\n self._config_dict['output']['@saveInteractionLog'] = parse_boolean(self._config_dict['output']['@saveInteractionLog'])\n self._config_dict['output']['@saveRelevanceJudgments'] = parse_boolean(self._config_dict['output']['@saveRelevanceJudgments'])\n self._config_dict['output']['@trec_eval'] = parse_boolean(self._config_dict['output']['@trec_eval'])\n \n # Topics\n def check_topic(t):\n \"\"\"\n Checks a given topic, t. Looks for a topic ID and a valid topic description file.\n \"\"\"\n empty_string_check(t['@id'])\n filesystem_exists_check(t['@filename'])\n filesystem_exists_check(t['@qrelsFilename'])\n \n if '@backgroundFilename' in t: # A background file was specified.\n filesystem_exists_check(t['@backgroundFilename'])\n else:\n t['@backgroundFilename'] = None # No background file was specified.\n \n topics = self._config_dict['topics']['topic']\n \n if type(topics) == list:\n for topic in topics:\n check_topic(topic)\n else:\n check_topic(topics)\n \n # Users\n users = self._config_dict['users']['user']\n \n if type(users) == list:\n for user in users:\n filesystem_exists_check(user['@configurationFile'])\n else:\n filesystem_exists_check(users['@configurationFile'])\n \n # Search Interface\n empty_string_check(self._config_dict['searchInterface']['@class'])\n check_attributes(self._config_dict['searchInterface'])", "def _validate_main_config(self):\n # check for required top-level parameters in main config\n required_params = {\"name\": str, \"version\": str, \"datasets\": list}\n\n for param, expected_type in required_params.items():\n if param not in self.config:\n msg = (\n \"[ERROR] Config error: missing required configuration parameter in {}: '{}'\"\n )\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param))\n elif not isinstance(self.config[param], expected_type):\n msg = \"[ERROR] Config error: parameter is of unexpected type {}: '{}' (expected: '{}')\"\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param, expected_type))", "def validate_config(self):\r\n c = self.config\r\n \r\n # Make sure that we have a database_path, and an image_path...\r\n assert 'database_path' in c\r\n assert 'image_path' in c\r\n # We should probably check if these paths exist and make them as well...\r\n \r\n # Set the default values.\r\n graph_draw_frequency = c['graph_draw_frequency']\r\n for period, interval in self.default_config['graph_draw_frequency'].iteritems():\r\n graph_draw_frequency.setdefault(period, interval)\r\n \r\n # A quick check to make sure that our port is an integer.\r\n c['httpd_port'] = int(c['httpd_port'])\r\n \r\n # Make sure that no duplicate IDs exist, and that the template exists as well.\r\n ids = set()\r\n for graph in c['graphs']:\r\n graph.setdefault('config', {})\r\n graph['config'].setdefault('periods', [])\r\n assert graph['id'] not in ids\r\n ids.add(graph['id'])\r\n assert(template_exists(graph['template']))", "def validateProp(filename):\n\n # does the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Prop file (%s) does not exist' % (filename))\n return False\n\n # can I read it\n try:\n propFile = open(filename, 'r')\n prop = json.load(propFile)\n propFile.close()\n except (ValueError, OSError):\n LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))\n return False\n\n # does the prop have the correct value\n for key in ('name', 'md5', 'description', 'size', 'contact'):\n if (key not in prop):\n LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))\n return False\n\n return True", "def validate_configuration_file(self):\n\n with open(self.config_path, \"r+\") as f_config:\n return bool(re.search(get_configuration_file_re(),\n f_config.read()))", "def test_tap_config_json_valid_if_state_file_is_empty(self):\n self._assert_tap_config(\n config=self.valid_json_file,\n properties=self.valid_json_file,\n state=self.empty_file\n )", "def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)", "def test_load_json_good_to_dictionary(self):\n self.assertIsInstance(LoadJsonConfig.read_config_file(LoadJsonConfig(),'data/json/conf_ok.json'),OrderedDict)", "def test_target_config_raises_exception_if_not_valid_json_after_retries(self):\n invalid_file_contents = ['', 'foo', '{\"foo\": 1']\n\n for invalid_content in invalid_file_contents:\n with open(self.invalid_json_file, 'w', encoding='utf-8') as invalid_file:\n invalid_file.write(invalid_content)\n\n with self.assertRaises(commands.RunCommandException) as command_exception:\n self._assert_target_config(config=self.invalid_json_file)\n\n self.assertEqual(f'Invalid json file for config: {self.invalid_json_file}',\n str(command_exception.exception))", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def validate_input(json_object):\n try:\n if type(json_object) is not list:\n return False\n for machine_config in json_object:\n if (type(machine_config[\"ip\"]) is not str) or not validate_ip(machine_config[\"ip\"]):\n return False\n if type(machine_config[\"community\"]) is not str:\n return False\n if type(machine_config[\"config\"]) is not list:\n return False\n for actual_config in machine_config[\"config\"]:\n if (type(actual_config[\"segment\"]) is not int) or not validate_segment(actual_config[\"segment\"]):\n return False\n if type(actual_config[\"ports\"]) is not list:\n return False\n for actual_port in actual_config[\"ports\"]:\n if (type(actual_port) is not int) or not validate_port(actual_port):\n return False\n except KeyError as ke:\n # Formato incorrecto debido a que algun campo no existe\n return False\n # Todos los campos existen y estan bien\n return True", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def test_invalid_json_config(self):\n invalid_config_data = 'This is not JSON!!!'\n with mock_open(LAMBDA_FILE, invalid_config_data):\n with mock_open(GLOBAL_FILE, invalid_config_data):\n _load_config()", "def test_json_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n # empty - should parse\n spec = {}\n read_wrapper(spec, ps)\n\n # empty array - should parse\n spec = {'constraints': []}\n read_wrapper(spec, ps)\n\n # empty element - should fail\n spec = {'constraints': [{}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching block - should fail\n spec = {'constraints': [{'block': 'a'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching variable - should fail\n spec = {'constraints': [{'variable': 'c'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner option - should fail\n spec = {'constraints': [{'option': 'a1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner block - should parse\n spec = {'constraints': [{'block': 'A', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # block and option - should parse\n spec = {'constraints': [{'block': 'A', 'option': 'a1', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variable and option - should parse\n spec = {'constraints': [{'variable': 'a', 'option': '2.5', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # weird option - should parse\n # fixme: {'option': '[1,2]'} will fail\n spec = {'constraints': [{'variable': 'c', 'option': '[1, 2]', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H==b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H.index==1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)", "def validate_file(inp, name=''):\n validate_string(inp, name)\n assert (os.path.exists(inp)), name + ' settings with value ' + inp + ' should exist.'", "def _verify_ini(self, config_file=None):\n\n imgur_values = ['ClientID', 'ClientSecret', 'AccessToken', 'RefreshToken']\n mysql_values = ['Host', 'User', 'Password', 'Database']\n missing_values = []\n\n if not config_file:\n print(\"No Config Filed Supplied. Aborting\")\n sys.exit(1)\n\n for val in imgur_values:\n if val not in config_file['IMGURAPI']:\n missing_values.append('IMGURAPI: ' + val)\n\n for val in mysql_values:\n if val not in config_file['MYSQL']:\n missing_values.append('MYSQL: ' + val)\n\n if missing_values:\n print('ERROR: ini file is missing required values. \\n Missing Values:')\n for val in missing_values:\n print(val)\n sys.exit(1)", "def test_configurations_create_invalid_values(self):\n values = '{\"this_is_invalid\": 123}'\n try:\n instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC)\n except exceptions.UnprocessableEntity:\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 422)", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def validate_config_file(self, config_file):\n config_dict = ConfigObj(config_file, configspec=CFG_SPEC.split('\\n'))\n result = config_dict.validate(Validator(), copy=True, preserve_errors=True)\n if result != True:\n msg = 'Config file validation failed: %s' % result\n raise Exception(msg)\n return config_dict", "def validate_config(params, error_callback):\n local_params = dict(params)\n _validate_value_formats(local_params, error_callback)\n _validate_in_cidr(local_params, error_callback)\n _validate_dhcp_range(local_params, error_callback)\n _validate_inspection_range(local_params, error_callback)\n _validate_no_overlap(local_params, error_callback)\n _validate_ips(local_params, error_callback)\n _validate_interface_exists(local_params, error_callback)", "def check_config(config):\n rq = {\"name\", \"description\", \"region\", \"user\", \"instance_type\",\n \"base_image\", \"uploads\", \"commands\"}\n diff = rq - set(config.keys())\n if diff:\n raise(BadConfigFile(\"Missing keys {} in config\".format(diff)))", "def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration]\n ) -> None:\n\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def validate_config(config: Settings):\n validators = [\n Validator(\"logging.console\", is_type_of=bool, required=True, eq=True)\n | Validator(\"logging.file\", is_type_of=bool, required=True, eq=True),\n Validator(\n \"logging.console_verbosity\",\n is_in=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n required=True,\n when=Validator(\"logging.console\", eq=True),\n ),\n Validator(\n \"logging.file_verbosity\",\n is_in=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n required=True,\n when=Validator(\"logging.file\", eq=True),\n ),\n Validator(\n \"logging.filename\", required=True, when=Validator(\"logging.file\", eq=True)\n ),\n Validator(\n \"vast\", \"vast_binary\", \"threatbus\", \"metrics.filename\", required=True\n ),\n Validator(\"live_match\", \"retro_match\", is_type_of=bool, required=True),\n Validator(\n \"snapshot\",\n \"retro_match_max_events\",\n \"max_background_tasks\",\n \"metrics.interval\",\n is_type_of=int,\n required=True,\n ),\n Validator(\"retro_match_timeout\", is_type_of=float, required=True),\n Validator(\"transform_context\", \"sink\", default=None),\n Validator(\"metrics.interval\"),\n ]\n\n config.validators.register(*validators)\n config.validators.validate()", "def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))", "def test_valid_variables(self):\n with open(DEFINITIONS_SETTINGS_FILE, 'r') as openDefFile:\n definitionsContents = json.loads(openDefFile.read())\n with open(VARIABLES_SETTINGS_FILE, 'r') as openVarFile:\n variablesContents = json.loads(openVarFile.read())\n\n variablePattern = re.compile(r'{[^{^}]*}')\n for dictKey, dictContents in definitionsContents.items():\n variables = variablePattern.findall(str(dictContents))\n if len(variables) > 0:\n for variable in variables:\n valid = False\n for variableKey, variableItem in variablesContents.items():\n if variable == variableItem:\n valid = True\n break\n self.assertTrue(valid)", "def properties_validation(config_data: Dict = None) -> bool:\n\n if config_data is None:\n config_file = os.path.join(\n os.path.dirname(__file__), 'server-config.json')\n with open(config_file) as config:\n config_data = json.load(config)\n platform_properties, err = PlatformPropertiesSchema().load(config_data)\n\n # Raise error if required property is not provided\n if err:\n raise MissingRequiredParameterError(err)\n\n # Raise error if unsupported protocol or module\n for protocol in platform_properties.supported_transfer_protocols:\n if protocol not in SUPPORTED_PROTOCOLS:\n err = str.format(\"Unsupported protocol {}\", protocol)\n raise ValueError(err)\n for module in platform_properties.supported_modules:\n if module not in SUPPORTED_MODULES:\n err = str.format(\"Unsupported module {}\", module)\n raise ValueError(err)\n\n # Raise error if https not in supported protocols\n if \"https\" not in platform_properties.supported_transfer_protocols:\n raise MissingRequiredParameterError(\n 'CARMIN 0.3 requires https support')\n\n # Raise error if minTimeout is greater than maxTimeout\n if (platform_properties.max_authorized_execution_timeout != 0\n and platform_properties.min_authorized_execution_timeout >\n platform_properties.max_authorized_execution_timeout):\n raise ValueError('maxTimeout must be greater than minTimeout')\n return True", "def config_validation(configuration):\n tube_num = configuration.get('tube_number')\n q_dof = configuration.get('q_dof')\n radius = configuration.get('tube_radius')\n delta_x = configuration.get('delta_x')\n tube_lengths = configuration.get('tube_lengths')\n\n if isinstance(q_dof, int):\n configuration['q_dof'] = [q_dof] * tube_num\n print(f\"Using {q_dof} as q_dof for every tube.\\n\")\n elif isinstance(q_dof, list) and len(q_dof) == tube_num:\n pass\n else:\n raise ValueError(f\"Input for q_dof of {q_dof} is not suitable.\\n\")\n\n if isinstance(radius, list) and len(radius) == tube_num:\n inner = [rad - 0.1 for rad in radius]\n configuration['tube_radius'] = {'outer': radius, 'inner': inner}\n elif isinstance(radius, dict) and 'outer' in radius.keys() and len(radius.get('outer')) == tube_num:\n if 'inner' in radius.keys() and len(radius.get('inner')) == tube_num:\n pass\n else:\n radius['inner'] = [rad - 0.1 for rad in radius.get('outer')]\n configuration['tube_radius'] = radius\n else:\n raise ValueError(f\"Input for radius of {radius} is not suitable.\\n\")\n\n if isinstance(tube_lengths, (int, float)):\n configuration['tube_lengths'] = [tube_lengths] * tube_num\n print(f\"Using {tube_lengths} as length for every tube.\\n\")\n elif isinstance(tube_lengths, list) and len(tube_lengths) == tube_num:\n pass\n else:\n raise ValueError(f\"Input for tube_lengths of {tube_lengths} is not suitable.\\n\")\n\n new_lengths = configuration.get('tube_lengths')\n for this_length in new_lengths:\n if this_length % delta_x != 0:\n raise ValueError(f\"Length input {this_length} not divisible by delta_x: {delta_x}\\n\")", "def valid_configuration(self):\n valid = True\n\n if (not self.__config.suffix()) and (self.__config.output_dir() == self.__config.input_dir()):\n print(\"ERROR: output_dir directory cannot be the same as input_dir with an empty suffix!\")\n valid = False\n if not self.__config.public_key():\n print(\"ERROR: public_key not set! Set it through 'pdfworkshop config public_key <your_key>'. \"\n \"A free API key can be obtained from https://developer.ilovepdf.com/\")\n valid = False\n return valid", "def test_tap_config_raise_exception_if_invalid_properties_yet_after_retries(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='properties',\n invalid_file_contents=('', ' ', 'foo', '{\"foo\": 1')\n )", "def validate_settings(self, settings):\n pass", "def _verify(self, data):\n sections = {\n \"schema\": dict,\n \"defaults\": dict,\n \"projects\": list\n }\n\n for key, type_ in sections.items():\n if key not in data:\n self._report_error(\"Missing section: {}\".format(key))\n return False\n if not isinstance(data[key], type_):\n err = \"Wrong data type for section {}, should be {}\"\n self._report_error(err.format(key, type_))\n return False\n\n for setting in data[\"defaults\"]:\n if setting not in data[\"schema\"]:\n err = \"Invalid setting {0} in default configuration\"\n self._report_error(err.format(setting))\n return False\n\n for i, project in enumerate(data[\"projects\"]):\n if not isinstance(project, dict):\n err = \"Wrong data type for project at index {}\"\n self._report_error(err.format(i))\n return False\n if \"name\" not in project:\n err = \"Missing name for project at index {}\"\n self._report_error(err.format(i))\n return False\n for setting in project:\n if setting not in data[\"schema\"]:\n err = \"Invalid setting {} for project {}\"\n self._report_error(err.format(setting, project[\"name\"]))\n return False\n\n return True", "def check_config(config):\n pass", "def test_config_from_json(self):\n\n # Make json-file\n path = self.write_temp_file(\"\"\"\n{\n \"section1\": {\n \"string1\": \"\",\n \"string2\": \"string2\",\n \"int1\": 0,\n \"int2\": 1,\n \"float1\": 0.0,\n \"float2\": 1.1,\n \"boolean1\": false,\n \"boolean2\": true\n },\n \"section2\": {\n \"string2\": \"string2\",\n \"int2\": 2,\n \"float2\": 2.2,\n \"boolean2\": false\n }\n}\n\"\"\")\n\n for namespace in [None, 'namespace']:\n config = Config()\n config.load_from_json(path, namespace=namespace)\n\n namespace_prefix = '%s.' % namespace if namespace is not None else ''\n\n # Test section 1\n self.assert_equal_deep(8, len(config('%ssection1' % namespace_prefix)))\n self.assert_equal_deep('', config('%ssection1.string1' % namespace_prefix))\n self.assert_equal_deep('string2', config('%ssection1.string2' % namespace_prefix))\n self.assert_equal_deep(0, config('%ssection1.int1' % namespace_prefix))\n self.assert_equal_deep(1, config('%ssection1.int2' % namespace_prefix))\n self.assert_equal_deep(0.0, config('%ssection1.float1' % namespace_prefix))\n self.assert_equal_deep(1.1, config('%ssection1.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection1.boolean1' % namespace_prefix))\n self.assert_equal_deep(True, config('%ssection1.boolean2' % namespace_prefix))\n\n # Test section 2\n self.assert_equal_deep(4, len(config('%ssection2' % namespace_prefix)))\n self.assert_equal_deep('string2', config('%ssection2.string2' % namespace_prefix))\n self.assert_equal_deep(2, config('%ssection2.int2' % namespace_prefix))\n self.assert_equal_deep(2.2, config('%ssection2.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection2.boolean2' % namespace_prefix))\n\n # Test section 3\n self.assert_equal(None, config('%ssection3' % namespace_prefix))", "def test_ssh_config1(self):\n self.assertEqual(\n parse(self.f_in['ssh_config1'], quiet=True),\n self.f_json['ssh_config1']\n )", "def test_parse_prefs_file(self):\r\n # Test good input\r\n ps1 = \"\"\"{'bgcolor':'white','colors':\r\n {'id':'blue','name':'green'},'list':[1,2,3]}\"\"\"\r\n exp1 = {'bgcolor': 'white', 'colors': {'id': 'blue', 'name': 'green'},\r\n 'list': [1, 2, 3]}\r\n self.assertEqual(parse_prefs_file(ps1), exp1)\r\n\r\n # Test bad input\r\n # list of valid input rather than multiline string should fail.\r\n ps_bad_1 = [\"{'bgcolor':'white',\",\r\n \"'colors':{'id':'blue','name':'green'}\",\r\n \",'list':[1,2,3]}\"]\r\n self.assertRaises(QiimeParseError, parse_prefs_file, ps_bad_1)\r\n\r\n # bad data. Can be evaluated but not a dict.\r\n ps_bad_2 = \"[1,2,3]\"\r\n self.assertRaises(QiimeParseError, parse_prefs_file, ps_bad_2)", "def test_config_must_exist(cls, values):\n configs = [c.config for c in values.get('configs')]\n for test in values.get('tests'):\n if test.config not in configs:\n raise ValueError(\n f\"Test '{test.test}' gave the config '{test.config}', but \"\n \"this config does not exist in the file \"\n f\"'{values.get('yaml')}'. Configs detected : {configs} \\n\")\n return values", "def test_validate_config_file(self):\n ingest_mgmr = IngestManager()\n ingest_mgmr.validate_config_file(self.example_config_data)\n assert(ingest_mgmr.config is not None)\n assert (ingest_mgmr.config.config_data is not None)", "def validate_var(path,scheme,var,val):\n if 'type' not in scheme:\n raise InvalidConfigTemplate(var+'.type: missing')\n typ=scheme.type\n if not isinstance(typ,str):\n raise InvalidConfigTemplate(var+'.type: must be a string')\n allowed=scheme.get('allowed',[])\n if not isinstance(allowed,list) and not isinstance(allowed,list_eval):\n raise InvalidConfigTemplate(var+'.allowed: must be a list')\n validate_type(path,var,typ,val,allowed)", "def validate_settings(self):\n\t\t# Check all attributes exist\n\t\tfor key, value in vars(self).items():\n\t\t if hasattr(self, key) == False:\n\t\t\t\tUtility.report_error(1, '%s: Missing attribute \"%s\"' % (self._file_path, key))\n\n\t\t# Check mandatory attributes\n\t\tif self.is_valid_status(self.status) == False:\n\t\t\tUtility.report_error(1, '%s: Status \"%s\" is not valid' % (self._file_path, self.status))\n\n\t\tif self.definition == '' or self.definition == None:\n\t\t\tUtility.report_error(1, '%s: Definition field is empty or missing' % (self._file_path))\n\t\t\n\t\tif self.term == '' or self.term == None:\n\t\t\tUtility.report_error(1, '%s: Term field is empty or missing' % (self._file_path))\n\n\t\t# If status is neither approved or elaboration reject reason must be stated\n\t\tif (self.status == 'rejected' or self.status == 'replaced') and (self.status_reason == '' or self.status_reason == None):\n\t\t\tUtility.report_error(1, '%s: \"Status reason\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is rejected a rejected by user must be specified\n\t\tif self.status == 'rejected' and (self.rejected_by == '' or self.rejected_by == None):\n\t\t\tUtility.report_error(1, '%s: \"Rejected by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is replaced then Replaced by must be specified\n\t\tif self.status == 'replaced' and (self.replaced_by == None or self.replaced == ''):\n\t\t\tUtility.report_error(1, '%s: \"Replaced by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\tself.created_by = self.make_link_list('stakeholders', 'Created by', self.created_by, False)\n\t\tself.rejected_by = self.make_link_list('stakeholders', 'Rejected by', self.rejected_by, False)\n\t\tself.replaced_by = self.make_link_list('glossary', 'Replaced by', self.replaced_by)\n\n\t\tif self.is_string_date(self.created_on) == False:\n\t\t\tUtility.report_error(1, '%s: Created on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.created_on))\n\n\t\tif self.is_string_date(self.rejected_on) == False:\n\t\t\tUtility.report_error(1, '%s: Rejected on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.rejected_on))", "def validate_config():\n\n # diff/sync settings, not including templates (see below)\n nori.setting_check_list('action', ['diff', 'sync'])\n nori.setting_check_type('reverse', bool)\n nori.setting_check_type('bidir', bool)\n nori.setting_check_callbacks('pre_action_callbacks')\n nori.setting_check_callbacks('post_action_callbacks', 1, 1)\n for i, cb_t in enumerate(nori.core.cfg['post_action_callbacks']):\n nori.setting_check_type(('post_action_callbacks', i, 3), bool)\n nori.setting_check_list('source_type', ['generic', 'drupal'])\n nori.setting_check_callable('source_query_func', may_be_none=False)\n nori.setting_check_callable('source_query_defaulter', may_be_none=True)\n nori.setting_check_callable('source_query_validator', may_be_none=False)\n nori.setting_check_callbacks('source_template_change_callbacks')\n nori.setting_check_callbacks('source_global_change_callbacks')\n nori.setting_check_list('dest_type', ['generic', 'drupal'])\n nori.setting_check_callable('dest_query_func', may_be_none=False)\n nori.setting_check_callable('dest_query_defaulter', may_be_none=True)\n nori.setting_check_callable('dest_query_validator', may_be_none=False)\n nori.setting_check_callbacks('dest_template_change_callbacks')\n nori.setting_check_callbacks('dest_global_change_callbacks')\n nori.setting_check_list('template_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['template_mode'] != 'all':\n nori.setting_check_not_empty('template_list')\n for i, t_name in enumerate(nori.core.cfg['template_list']):\n nori.setting_check_type(('template_list', i),\n nori.core.STRING_TYPES)\n nori.setting_check_list('key_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['key_mode'] != 'all':\n nori.setting_check_not_empty('key_list')\n\n # templates: general\n nori.setting_check_not_empty(\n 'templates', types=nori.core.MAIN_SEQUENCE_TYPES\n )\n for i, template in enumerate(nori.core.cfg['templates']):\n nori.setting_check_type(('templates', i), nori.core.MAPPING_TYPES)\n # bogus elements\n for k in template:\n if k not in T_KEYS:\n nori.err_exit(\n \"Warning: cfg['templates'][{0}][{1}] is set\\n\"\n \"(to {2}), but there is no such setting.\" .\n format(i, *map(nori.pps, [k, template[k]])),\n nori.core.exitvals['startup']['num']\n )\n # template name\n nori.setting_check_type(('templates', i, T_NAME_KEY),\n nori.core.STRING_TYPES)\n # multiple-valued value columns?\n nori.setting_check_type(('templates', i, T_MULTIPLE_KEY), bool)\n # source-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_S_QUERY_ARGS_KEY))\n # to-dest transform function\n nori.setting_check_callable(('templates', i, T_TO_D_FUNC_KEY),\n may_be_none=True)\n # source-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_S_NO_REPL_KEY), bool)\n # source-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_S_CHANGE_CB_KEY))\n # dest-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_D_QUERY_ARGS_KEY))\n # to-source transform function\n nori.setting_check_callable(('templates', i, T_TO_S_FUNC_KEY),\n may_be_none=True)\n # dest-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_D_NO_REPL_KEY), bool)\n # dest-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_D_CHANGE_CB_KEY))\n # key mode\n nori.setting_check_list(('templates', i, T_KEY_MODE_KEY),\n ['all', 'include', 'exclude'])\n if template[T_KEY_MODE_KEY] != 'all':\n # key list\n nori.setting_check_not_empty(('templates', i, T_KEY_LIST_KEY))\n\n # templates: query-function arguments\n for (sd, t_key, validator_key) in [\n ('s', T_S_QUERY_ARGS_KEY, 'source_query_validator'),\n ('d', T_D_QUERY_ARGS_KEY, 'dest_query_validator')\n ]:\n # args tuple\n args_idx = ('templates', i, t_key)\n args_t = template[t_key]\n # key_cv, value_cv (somewhat)\n for cv_str in ['key_cv', 'value_cv']:\n cv_idx = args_idx + (1, cv_str)\n nori.setting_check_not_empty(\n cv_idx, types=nori.core.MAIN_SEQUENCE_TYPES\n )\n cv_seq = args_t[1][cv_str]\n for j, cv in enumerate(cv_seq):\n nori.setting_check_length(cv_idx + (j, ), 2, 3,\n types=tuple)\n # the rest of the arguments\n nori.core.cfg[validator_key](sd, args_idx, args_t, i)\n\n # reporting settings\n nori.setting_check_list('report_order', ['template', 'keys'])\n # the rest are handled by nori.validate_email_config()", "def validate_config_data(self, sentry_unit, config_file, section,\n expected):\n config = self._get_config(sentry_unit, config_file)\n\n if section != 'DEFAULT' and not config.has_section(section):\n return \"section [{}] does not exist\".format(section)\n\n for k in expected.keys():\n if not config.has_option(section, k):\n return \"section [{}] is missing option {}\".format(section, k)\n if config.get(section, k) != expected[k]:\n return \"section [{}] {}:{} != expected {}:{}\".format(\n section, k, config.get(section, k), k, expected[k])\n return None", "def check_config(self):\n cfgs = self.__get() \n \n for option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n \n if not cfgs.has_key(option):\n self.log.warn(\"Parameter '%s' is missing in '%s', using default('%s')\" % \\\n (option, self.config_file, _default))\n _file = _default\n else:\n _file = cfgs[option]\n Config.FILE_OPTIONS[option] = _file\n\n if not os.path.exists(_file) and not os.path.isfile(_file):\n self.log.error(\"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n\n\n for option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n \n if not cfgs.has_key(option):\n self.log.warn(\"Parameter '%s' is missing in '%s', using default('%s')\" % \\\n (option, self.config_file, _default))\n _dir = _default\n else:\n _dir = cfgs[option]\n Config.PATH_OPTIONS[option] = _dir\n\n if not os.path.exists(_dir) and not os.path.isdir(_dir):\n self.log.error(\"Paramenter '%s' points to non-existing directory '%s')\" % \\\n (option, _dir))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing directory '%s')\" % \\\n (option, _dir))\n\n \n Config.DB_SFT_OPTIONS['sqlalchemy_sft.url'] = cfgs['sqlalchemy_sft.url']\n Config.DB_NAGIOS_OPTIONS['sqlalchemy_nagios.url'] = cfgs['sqlalchemy_nagios.url']\n\n self.log.debug(\"Configuration successfully checked\")", "def validate(self, config):\n if not isinstance(config, list):\n config = [config]\n\n for conf in config:\n if not conf.get('path'):\n raise ConfigError('Camera needs a `path` to save files to.')\n \n return config", "def _validate(self, config):\n assert isinstance(config, BaseConfig), \\\n \"Configuration should be instance of `BaseConfig`, but given {}\".format(type(config))", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def verify_configuration_types(config):\n if not isinstance(config[\"count\"], int):\n return False\n return True", "def validate_rate_config(rate_config: dict):\n if not isinstance(rate_config, dict):\n raise ValueError(f'Rate config must be dict object. Recieved a rate config of type {type(rate_config)}')\n\n # TODO: add further config validation checks", "def _validity_check(settings, valid):\n\n Settings._dict_validity_check(settings, valid)", "def _check_yaml(self, yaml):\n if type(yaml['datasets']) == dict:\n logging.error(\n \"[ERROR] \\\"datasets\\\" section of config file must be a list, not a dictionary...\" \n )\n sys.exit()", "def validate(self, data):\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data", "def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n super().validate_configuration(configuration)\n self.validate_metric_value_between_configuration(configuration=configuration)", "def _verify_options(config: configuration.Config) -> None:\n\n if not config.config['species']:\n log._logger.error('You must specify a species (-s/--species)')\n exit(1)\n\n if config.config['hpc'] and config.config['local']:\n log._logger.error('You can only use one of the config options (hpc/local)')\n exit(1)\n\n if config.config['hpc'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (hpc/custom)')\n exit(1)\n\n if config.config['local'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (local/custom)')\n exit(1)\n\n if (not config.config['hpc']) and\\\n (not config.config['local']) and\\\n (not config.config['custom']):\n log._logger.error(\n 'You must specify a compute cluster environment (hpc/local/custom)'\n )\n exit(1)\n\n if config.config['custom'] and (not config.config['scheduler']):\n log._logger.error(\n 'The custom compute environment requires a scheduler address to be set'\n )\n exit(1)", "def check_config(cfg):", "def _validate_configurations(self) -> None:\n if self.__exception:\n raise self.__exception", "def test_json(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(input_file_path), 'r') as input_file:\n with open(attach_path(answer_file_path), 'r') as answer_file:\n assert str(read_json(input_file.read().strip())) == answer_file.read().strip()", "def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)", "def raiser(string):\n raise Exception(f'Please check your config.json file, {string} is missed or wrong.')", "def validate_config_values(func):\n\n @wraps(func)\n def decorator(self_, *args, **kwargs):\n for arg in args:\n if isinstance(arg, Mapping):\n try:\n # parse date strings by the pendulum library. It will raise the exception ParserError if it is some format mistakes.\n pendulum.parse(arg[\"start_date\"])\n # try to check an end_date value. It can be ussed for different CI tests\n end_date = arg.get(\"end_date\")\n if end_date:\n pendulum.parse(end_date)\n except ParserError as e:\n raise Exception(f\"{str(e)}. Example: 2021-01-01T00:00:00Z\")\n break\n\n return func(self_, *args, **kwargs)\n\n return decorator" ]
[ "0.7649231", "0.68019813", "0.6770367", "0.6684895", "0.6678061", "0.66503954", "0.66342115", "0.6629598", "0.6629598", "0.66133463", "0.6548915", "0.6533521", "0.6527717", "0.6519201", "0.65179867", "0.6504512", "0.64998895", "0.64800966", "0.6476303", "0.64425457", "0.64190865", "0.6415855", "0.63789624", "0.6367426", "0.6365211", "0.6345887", "0.63440377", "0.6327295", "0.63242537", "0.6294106", "0.6287575", "0.6267252", "0.62563217", "0.62500554", "0.62413585", "0.62187296", "0.61869264", "0.61812115", "0.61801577", "0.6176202", "0.6154198", "0.61512446", "0.61309457", "0.6128716", "0.6124844", "0.61171216", "0.6110576", "0.6109818", "0.60830724", "0.60738677", "0.60631025", "0.60390854", "0.6037309", "0.60227937", "0.6015197", "0.60111284", "0.6000288", "0.59983134", "0.5990737", "0.5987339", "0.5978492", "0.597747", "0.59772915", "0.5959973", "0.59540385", "0.5949336", "0.5947621", "0.5945074", "0.59426117", "0.5938623", "0.59352577", "0.59215844", "0.59172577", "0.59122", "0.5907871", "0.5907012", "0.590688", "0.59064406", "0.58998644", "0.58985853", "0.5895415", "0.58768463", "0.5874538", "0.58741957", "0.5869317", "0.5866509", "0.5864606", "0.58522296", "0.5848414", "0.5833962", "0.58259475", "0.5823663", "0.58206916", "0.58136034", "0.58120984", "0.58033335", "0.5791859", "0.5791816", "0.57917595", "0.57912946" ]
0.6064642
50
Validate that the input values, passed as either a file or a json string, are correct.
def validate_input_values(self, source, **kwargs): return self._validate_values("input_values", source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def validate_json(self):\n pass", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def test_json(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(input_file_path), 'r') as input_file:\n with open(attach_path(answer_file_path), 'r') as answer_file:\n assert str(read_json(input_file.read().strip())) == answer_file.read().strip()", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def test_load_json_value_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('\"two\"')\n\n assert load_json(fname) == \"two\"\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a dict\"\n ):\n load_json_object(fname)\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)", "def check_job_json(job_info):\n job_type_list = [job_type.value for _, job_type in JobType.__members__.items()]\n if 'source_id' not in job_info:\n raise ValueError(\"Json string Errors, key:source_id not found.\")\n if 'job_id' not in job_info:\n raise ValueError(\"Json string Errors, key:job_id not found.\")\n if 'job_type' not in job_info or not job_info['job_type']:\n raise ValueError(\"Json string Errors, key:job_type not found.\")\n if job_info['job_type'] not in job_type_list:\n raise ValueError(\"Invalid job type: {}.\".format(job_info['job_type']))\n if 'job_content' not in job_info:\n raise ValueError(\"Json string Errors, key:job_content not found.\")", "def is_valid_json(json_str):\n assert json_str is not None\n try:\n json.loads(json_str)\n return True\n except (ValueError, TypeError):\n return False", "def validate(self, config_json):\n pass", "def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)", "def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst", "def test_invalid_data(self):\n\n json_data = {\n \"input\" : {\n 'version': 'BAD',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def validate_input(json_object):\n try:\n if type(json_object) is not list:\n return False\n for machine_config in json_object:\n if (type(machine_config[\"ip\"]) is not str) or not validate_ip(machine_config[\"ip\"]):\n return False\n if type(machine_config[\"community\"]) is not str:\n return False\n if type(machine_config[\"config\"]) is not list:\n return False\n for actual_config in machine_config[\"config\"]:\n if (type(actual_config[\"segment\"]) is not int) or not validate_segment(actual_config[\"segment\"]):\n return False\n if type(actual_config[\"ports\"]) is not list:\n return False\n for actual_port in actual_config[\"ports\"]:\n if (type(actual_port) is not int) or not validate_port(actual_port):\n return False\n except KeyError as ke:\n # Formato incorrecto debido a que algun campo no existe\n return False\n # Todos los campos existen y estan bien\n return True", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []", "def _validate(self, path, obj):\r\n if isinstance(obj, str):\r\n if path[-1] != \"pattern\":\r\n self._validate_string(path, obj)\r\n elif isinstance(obj, dict):\r\n for key, value in obj.items():\r\n new_path = path.copy()\r\n new_path.append('%s' % key)\r\n self._validate_string(new_path, key, True)\r\n self._validate(new_path, value)\r\n elif isinstance(obj, list):\r\n for index, value in enumerate(obj):\r\n new_path = path.copy()\r\n new_path.append('%d' % index)\r\n self._validate(new_path, value)\r\n elif isinstance(obj, bool):\r\n pass\r\n elif isinstance(obj, int):\r\n pass\r\n elif isinstance(obj, float):\r\n pass\r\n elif isinstance(obj, type(None)):\r\n pass\r\n else:\r\n print(type(obj))\r\n pass\r\n # raise Exception()\r", "def validate(self, value):\n if isinstance(value, dict):\n if set(value.keys()) == {\"type\", \"coordinates\"}:\n if value[\"type\"] != self._type:\n self.error(f'{self._name} type must be \"{self._type}\"')\n return self.validate(value[\"coordinates\"])\n else:\n self.error(\n \"%s can only accept a valid GeoJson dictionary\"\n \" or lists of (x, y)\" % self._name\n )\n return\n elif not isinstance(value, (list, tuple)):\n self.error(\"%s can only accept lists of [x, y]\" % self._name)\n return\n\n validate = getattr(self, \"_validate_%s\" % self._type.lower())\n error = validate(value)\n if error:\n self.error(error)", "def test_load_json_file_not_found_error() -> None:\n fname = \"invalid_file.json\"\n\n assert load_json(fname) == {}\n assert load_json(fname, default=\"\") == \"\"\n assert load_json_object(fname) == {}\n assert load_json_object(fname, default={\"Hi\": \"Peter\"}) == {\"Hi\": \"Peter\"}\n assert load_json_array(fname) == []\n assert load_json_array(fname, default=[\"Hi\"]) == [\"Hi\"]", "def test_json_reader_data_contents(process_data):\n json_data = process_data(file_name_or_type='scooter_data.json')\n for val in json_data:\n assert(isinstance(val['id'], int))\n assert(isinstance(val['name'], str))\n assert(isinstance(val['vin_number'], str))\n assert(isinstance(val['electric_scooter'], bool))\n assert(isinstance(val['city'], str))\n assert(isinstance(val['usage'], str))\n assert(isinstance(val['cost_usd'], float))\n assert(isinstance(val['total_years_of_use'], int))", "def test_file_data_arguments():\n filename = 'wso_args.json'\n assert AUTH.check_file_exists(filename) is True\n\n assert AUTH.verify_config(filename, 'authorization',\n AUTH.encode(RANDOM_USERNAME,\n RANDOM_PASSWORD)) is True\n assert AUTH.verify_config(filename, 'url', RANDOM_URL) is True\n assert AUTH.verify_config(filename, 'aw-tenant-code',\n RANDOM_TENANTCODE) is True", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)", "def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")", "def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")", "def _validate_parameter(value):\n if isinstance(value, (dict)):\n if any([not isinstance(key, string_types) for key in value.keys()]):\n raise TypeError(\"Invalid parameter. Dictionary keys must be strings.\")\n [_validate_parameter(item) for item in value.values()]\n elif isinstance(value, (list, tuple)):\n [_validate_parameter(item) for item in value]\n elif (\n value is None or\n isinstance(value, string_types) or\n isinstance(value, (int, float, bool))\n ):\n pass\n else:\n raise TypeError(\"Invalid parameter type. Got '%s'.\" % type(value))", "def is_correct_json(string):\n if len(string) == 0:\n return False\n\n if string[0] is not '{' and string[0] is not '[':\n return False\n\n try:\n json.loads(string)\n except ValueError:\n return False\n\n return True", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def validateProp(filename):\n\n # does the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Prop file (%s) does not exist' % (filename))\n return False\n\n # can I read it\n try:\n propFile = open(filename, 'r')\n prop = json.load(propFile)\n propFile.close()\n except (ValueError, OSError):\n LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))\n return False\n\n # does the prop have the correct value\n for key in ('name', 'md5', 'description', 'size', 'contact'):\n if (key not in prop):\n LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))\n return False\n\n return True", "def test_json_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n # empty - should parse\n spec = {}\n read_wrapper(spec, ps)\n\n # empty array - should parse\n spec = {'constraints': []}\n read_wrapper(spec, ps)\n\n # empty element - should fail\n spec = {'constraints': [{}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching block - should fail\n spec = {'constraints': [{'block': 'a'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching variable - should fail\n spec = {'constraints': [{'variable': 'c'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner option - should fail\n spec = {'constraints': [{'option': 'a1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner block - should parse\n spec = {'constraints': [{'block': 'A', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # block and option - should parse\n spec = {'constraints': [{'block': 'A', 'option': 'a1', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variable and option - should parse\n spec = {'constraints': [{'variable': 'a', 'option': '2.5', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # weird option - should parse\n # fixme: {'option': '[1,2]'} will fail\n spec = {'constraints': [{'variable': 'c', 'option': '[1, 2]', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H==b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H.index==1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)", "def test_search_validator_good_data():\n sval = helpers.search_validator()\n good = '{\"fields\": {\"country\": \"DK\", \"plate\": \"BC69432\"}}'\n assert sval.validate(loads(good))", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def test_tap_config_json_raise_exception_on_invalid_content_for_state_file(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='state',\n invalid_file_contents=(' ', 'foo', '{\"foo\": 1')\n )", "def params_is_valid(data):\n if isinstance(data['title'], str) and isinstance(data['description'], str) and isinstance(data['params'], dict):\n return True\n else:\n return False", "def _reject_invalid_json(val: Any) -> None:\n raise ValueError(f\"Invalid JSON value: {val!r}\")", "def is_valid(data):\n return isinstance(data, dict) \\\n and \"u_id\" in data \\\n and \"token\" in data \\\n and isinstance(data[\"u_id\"], int) \\\n and isinstance(data[\"token\"], str)", "def validate(self, data):\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def validate_file(inp, name=''):\n validate_string(inp, name)\n assert (os.path.exists(inp)), name + ' settings with value ' + inp + ' should exist.'", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def _validate_parameters(parameters):\n if not isinstance(parameters, dict):\n raise ValueError(\"Please enter a dictionary for parameters\")\n for key, val in parameters.items():\n if isinstance(val, list):\n for params in val:\n if not isinstance(params, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n else:\n if not isinstance(val, u.unyt_array):\n raise ValueError(\n \"Parameter value {} lacks a unyt\".format(val)\n )\n if not isinstance(key, str):\n raise ValueError(\"Parameter key {} is not a str\".format(key))\n\n return parameters", "def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')", "def _reject_invalid_json(val):\n raise ValueError(f\"Invalid JSON value: {val!r}\")", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def validate(self):\n if self.params.get(\"format\"):\n if self.params[\"format\"] not in formats:\n raise ValueError(f\"format must be one of {formats}: {self.dt}\")\n for p in self.required:\n if not self.params.get(p):\n raise ValueError(f\"{p} missing: {self.dt}\")", "def validator(data_json):\n fields = spec[\"fields\"]\n data = json.loads(data_json, object_pairs_hook=collections.OrderedDict)\n for k, v in fields.items():\n if v.get(\"required\"):\n found = False\n if k in data:\n found = True\n elif \".\" in k:\n # Dotted keys could be nested, like ecs.version\n subkeys = k.split(\".\")\n subval = data\n for subkey in subkeys:\n subval = subval.get(subkey, {})\n if subval:\n found = True\n if not found:\n raise ValidationError(\"Missing required key {}\".format(k))\n if k in data:\n if v[\"type\"] == \"string\" and not (\n isinstance(data[k], str) or isinstance(data[k], basestring)\n ):\n raise ValidationError(\n \"Value {0} for key {1} should be string, is {2}\".format(\n data[k], k, type(data[k])\n )\n )\n if v[\"type\"] == \"datetime\":\n try:\n datetime.datetime.strptime(data[k], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n raise ValidationError(\n \"Value {0} for key {1} doesn't parse as an ISO datetime\".format(\n data[k], k\n )\n )\n if v.get(\"index\") and list(data.keys())[v.get(\"index\")] != k:\n raise ValidationError(\"Key {0} is not at index {1}\".format(k, index))\n\n return data_json", "def isJson(data):\r\n try:\r\n json.loads(data)\r\n return True\r\n except ValueError:\r\n return False", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def test_read_file_invalid():\n\tfrom ..skySurvey import SkySurvey\n\tfile_list = 0\n\ttry:\n\t\tSkySurvey(file_list = file_list)\n\texcept TypeError:\n\t\tassert True\n\telse:\n\t\tassert False", "def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()", "def is_json(filename):\n try:\n with open(filename, 'r') as f:\n dstore = json.load(f)\n except JSONDecodeError:\n return False # In case the file is invalid json file\n return True # In case the file is a valid json file", "def test_non_string_values(self):\n valid_xml = '{\"foo\": \"<b>Bar</b>\", \"baz\": true}'\n eq_(validate_xml(valid_xml), valid_xml)", "def test_input_schema(self, data, errors):\n resp = self.client.post(self.url, json=data)\n\n if not errors:\n assert resp.status_code == 200\n assert resp.get_json() == {\n 'status': 'OK',\n 'message': 'Data published via Upload service',\n }\n else:\n assert resp.status_code == 400\n assert resp.get_json() == {\n 'status': 'Error',\n 'message': 'Input payload validation failed',\n 'errors': {\n k: ['Missing data for required field.'] for k in errors\n },\n }", "def test_correct_upload_item(upload_items: List[JSONDict]) -> None:\n validated = UploadItem(**upload_items[0])\n assert validated.dict() == upload_items[0]", "def __validate_info(title: str, artist: str, runtime: str, path_name: str):\n if (type(title) != str) or (type(artist) != str) or (type(runtime) != str) \\\n or (type(path_name) != str):\n raise ValueError", "def clean(self):\n # If JSON was passed in as a string, try to interpret it as JSON\n if isinstance(self.required_arguments, str):\n try:\n self.required_arguments = json.loads(self.required_arguments)\n except json.JSONDecodeError:\n raise ValidationError(\"'%s' is not valid JSON!\"\n % self.required_arguments)\n\n if isinstance(self.required_arguments_default_values, str):\n try:\n self.required_arguments_default_values = json.loads(\n self.required_arguments_default_values)\n except json.JSONDecodeError:\n raise ValidationError(\"'%s' is not valid JSON!\"\n % self.required_arguments_default_values)\n\n # Make sure arguments are valid\n is_valid, reason = task_type_args_are_valid(self)\n\n # Arguments are not valid!\n if not is_valid:\n raise ValidationError(reason)", "def test_read_json(self, magic_0, magic_1):\n expected = {\n 'key_1': [1, 2, 3, 4, 5],\n 'key_2': ['a', 'b', 'c', 'd', 'e']\n }\n result = helpers.read_json(r\"path\")\n self.assertEqual(expected, result)", "def test_tap_config_json_validation_retry_with_invalid_properties_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.valid_json_file,\n properties=self.invalid_json_file,\n state=self.valid_json_file)", "def check_knowledge_area_json_string_validity(knowledge_area_json):\n if knowledge_area_json is not None:\n try:\n dictionary = json.loads(knowledge_area_json)\n if dictionary is not None and isinstance(dictionary, dict):\n for key in dictionary.keys():\n if key is not None and key is not '':\n try:\n item = dictionary[key]\n if item is not None and item is not \"\" and not math.isnan(item):\n try:\n float_item = float(item)\n except ValueError:\n return errors.InvalidKnowledgeAreaJsonStringError(\n knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n except KeyError:\n return errors.InvalidKnowledgeAreaJsonStringError(\n knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n except ValueError:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)", "def test_case_3(self):\n with open(f'{TEST_DATA_DIR}/r1.json') as file:\n data = json.load(file)\n self.assertIsInstance(data, dict)\n\n task_1 = Task.new(data=data)\n self.assertTrue(task_1.validate())\n\n with self.assertRaises(GCGValidationError):\n task_2 = Task.new(data={'data': 'bad_data'})", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def test_data_parse_vanilla_json(self):\n lines = ['{\"a\": \"val\", \"b\": \"val2\"}']\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual({\"a\": \"val\", \"b\": \"val2\"}, dat)", "def check_parameters_valid(self) :\n for check_parameter in self.parameters :\n if (not self.parameters[check_parameter]['set']) :\n error_message = \"Missing key -> '\" + check_parameter + \"'\"\n if (Config.logger) :\n dtpo_log('fatal', error_message)\n raise ParseError(error_message)\n\n if self.parameters[check_parameter]['type'] == 'dir' :\n value = self.parameters[check_parameter]['value']\n return_string = check_directory_permissions(value)\n if return_string :\n error_message = \"{0} not accessible \" \\\n \"-> {1}\".format(\n check_parameter,\n return_string)\n raise ParseError(error_message)\n elif self.parameters[check_parameter]['type'] == 'file' :\n value = self.parameters[check_parameter]['value']\n try :\n file_pointer = open(value)\n file_pointer.close()\n except IOError as io_error :\n error_message = \"File {0} not accessible -> {2}\" \\\n .format(\n check_parameter,\n self.parameters[check_parameter]['value'],\n str(io_error))\n raise ParseError(error_message)", "def validate_file_desc(self):\n if 'name' not in self.file_desc.keys() or 'format' not in self.file_desc.keys():\n raise AirflowException('file_desc does not have required keys: name, format')\n elif self.file_desc['format'].lower() not in ['csv', 'parquet']:\n raise AirflowException('file_desc have incorrect format type: csv, parquet')\n else:\n return {\"name\": self.file_desc['name'], \"format\": self.file_desc['format']}", "def test_search_validator_bad_data():\n sval = helpers.search_validator()\n assert not sval.validate({})\n bad = dict(foo=\"bar\", baz=42)\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {}}')\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {\"vin\": \"\"}}')\n assert not sval.validate(bad)\n bad = loads('{\"fields\": {\"foo\": \"bar\"}}')\n assert not sval.validate(bad)", "def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))", "def validate(self, data):\n age = data.get(\"age\", None)\n age = age.split(\",\")\n size = data.get(\"size\", None)\n size = size.split(\",\")\n gender = data.get(\"gender\", None)\n gender = gender.split(\",\")\n for i in age:\n if i not in ['b', 'y', 'a', 's']:\n raise serializers.ValidationError(\n \"Age must be either 'b' for baby, 'y' for young,\"\n \" 'a' for adult, or 's' for senior. Can do multiple with\"\n \" commas, ex: a,y,e\")\n for i in size:\n if i not in ['s', 'm', 'l', 'xl']:\n raise serializers.ValidationError(\n \"Size must be either 's' for small, 'm' for medium, 'l' \"\n \"for large, or 'xl' for extra large. Can do multiple with\"\n \" commas, ex: s,l,xl\")\n for i in gender:\n if i not in ['m', 'f']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, or 'f' for female. Can\"\n \" have both using commas, ex: m,f\")\n return data", "def test_get_users_from_invalid_json():\n with pytest.raises(ValueError):\n Users.from_json(file_path='{0}/json_input/invalid.json'.format(os.path.dirname(os.path.abspath(__file__))))", "def test_metadata_schema_json_invalid(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(files=files)\n assert not metadata_validation_form.is_valid()", "def test_json_loads_array() -> None:\n assert json_loads_array('[{\"c\":1.2}]') == [{\"c\": 1.2}]\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'dict'>\"\n ):\n json_loads_array(\"{}\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'bool'>\"\n ):\n json_loads_array(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'NoneType'>\"\n ):\n json_loads_array(\"null\")", "def test_json_loads_array() -> None:\n assert json_loads_array('[{\"c\":1.2}]') == [{\"c\": 1.2}]\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'dict'>\"\n ):\n json_loads_array(\"{}\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'bool'>\"\n ):\n json_loads_array(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'NoneType'>\"\n ):\n json_loads_array(\"null\")", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def test_tap_config_json_validation_retry_with_invalid_config_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.invalid_json_file,\n properties=self.valid_json_file,\n state=self.valid_json_file)", "def validate_arguments(arguments: dict) -> None:\n if not isinstance(arguments, dict):\n raise TypeError('Argument \"arguments\" should be a dict')\n for argument in arguments:\n if not isinstance(arguments[argument][0], arguments[argument][1]):\n raise TypeError(f'Argument {argument} should be a {arguments[argument][1]}')", "def test_metadata_schema_json_valid_file_upload(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert metadata_validation_form.is_valid()\n assert len(metadata_validation_form.cleaned_data['mi_json_schema_file']) > 0", "def check_file():\n #print('request=', request)\n #print('request.data=', request.data)\n #print('request.form=', request.form)\n #print('request.files=', request.files)\n #print('request.json=', request.json)\n qdata = None\n adata = None\n Q = None\n A = None\n if request.json:\n qdata = request.json.get('Q')\n adata = request.json.get('A')\n if 'Qfile' in request.files:\n qdata = request.files['Qfile'].read().decode('utf-8')\n if 'Afile' in request.files:\n adata = request.files['Afile'].read().decode('utf-8')\n\n #print('qdata\\n', qdata)\n #print('adata\\n', adata)\n try:\n if qdata:\n Q = adc2019.read_Q(qdata)\n if adata:\n A = adc2019.read_A(adata)\n if Q is None and A is None:\n return jsonify({'check_file': 'No data'})\n if Q is None:\n return jsonify({'check_file': 'A-ok'})\n if A is None:\n return jsonify({'check_file': 'Q-ok'})\n\n info = adc2019.check_data(Q, A)\n #print(info)\n info2 = info.copy()\n for k in ['count', 'corner', 'line_length', 'line_corner', 'ban_data_F']:\n info2[k] = str(info2[k])\n info2['check_file'] = 'ok'\n return jsonify(info2)\n except Exception as e:\n #traceback.print_exc()\n errinfo = ['ADC2019 rule violation'] + [str(i) for i in e.args]\n info = {'error': errinfo, 'stack_trace': traceback.format_exc()}\n return jsonify(info)\n\n return jsonify({'check_file': 'ok',\n 'value': 1234567,\n 'msg': '生麦生米生卵'})", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def __valid_json(string):\n try:\n obj = json.loads(string)\n except ValueError:\n return False\n else:\n return json.dumps(obj)", "def test_parse_prefs_file(self):\r\n # Test good input\r\n ps1 = \"\"\"{'bgcolor':'white','colors':\r\n {'id':'blue','name':'green'},'list':[1,2,3]}\"\"\"\r\n exp1 = {'bgcolor': 'white', 'colors': {'id': 'blue', 'name': 'green'},\r\n 'list': [1, 2, 3]}\r\n self.assertEqual(parse_prefs_file(ps1), exp1)\r\n\r\n # Test bad input\r\n # list of valid input rather than multiline string should fail.\r\n ps_bad_1 = [\"{'bgcolor':'white',\",\r\n \"'colors':{'id':'blue','name':'green'}\",\r\n \",'list':[1,2,3]}\"]\r\n self.assertRaises(QiimeParseError, parse_prefs_file, ps_bad_1)\r\n\r\n # bad data. Can be evaluated but not a dict.\r\n ps_bad_2 = \"[1,2,3]\"\r\n self.assertRaises(QiimeParseError, parse_prefs_file, ps_bad_2)", "def is_json(json_str: str) -> bool:\n try:\n json.loads(json_str)\n except ValueError:\n return False\n return True", "def is_json(my_object):\n try:\n json.loads(my_object)\n except ValueError:\n return False\n\n return True", "def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data", "def test_wrong_upload_item(invalid_item: JSONDict) -> None:\n\n with pytest.raises(ValidationError) as e:\n invalid = UploadItem(**invalid_item) # noqa: F841\n assert e.value.errors() == [\n {\n \"ctx\": {\"limit_value\": 2},\n \"loc\": (\"language\",),\n \"msg\": \"ensure this value has at most 2 characters\",\n \"type\": \"value_error.any_str.max_length\",\n },\n {\n \"loc\": (\"date\",),\n \"msg\": \"Could not validate format '02-2031-01'. Must be YYYY-MM-DD or iso-formatted time stamp\",\n \"type\": \"value_error\",\n },\n {\n \"loc\": (\"url\",),\n \"msg\": \"invalid or missing URL scheme\",\n \"type\": \"value_error.url.scheme\",\n },\n ]", "def json_attribs_check(func):\n @wraps(func)\n def inner_func(jsonStr):\n gslvtsSchema = {\"type\":\"object\",\n \"properties\":{\n \"tagID\": {\"type\":\"number\"}, \n \"UTC\": {\"type\":\"string\",\n \"format\":\"date-time\"}\n\t\t\t},\n\t\t\t\"required\":[\"tagID\",\"UTC\"]\n }\n try:\n jsonGslvts=json.loads(jsonStr)\n for elem in jsonGslvts:\n try: \n validate(elem, gslvtsSchema, format_checker=FormatChecker())\n except ValidationError, e:\n print \"[-] Invalid json post data. Check it, brah.\"\n print e\n raise AttributeError \n except (AttributeError, ValueError):\n print \"[-] IDk what that was, but it wasn't JSON.\"\n raise AttributeError\n\n return(func(jsonStr)) \n return inner_func", "def validate_json(schema, doc):\n is_invalid = set(doc).difference(set(schema))\n if is_invalid:\n return False\n return True", "def verify_json(output, expected_keys):\n deser = json.loads(output)\n assert deser\n for expected_key in expected_keys:\n assert expected_key in deser", "def _ConstructParseAndCheckJSON(\n self, inputfiles, logfiles, graphs):\n logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs)\n index = 0\n for filename in logfiles:\n graph_name = graphs[index]\n actual = logs[graph_name]\n path = os.path.join(self.data_directory, filename)\n expected = json.load(open(path))\n self.assertEqual(expected, actual, 'JSON data in %s did not match '\n 'expectations.' % filename)\n\n index += 1", "def test_json_exceptions(self, error, input_file_path: str, msg: str):\n with pytest.raises(error) as context:\n with open(attach_path(input_file_path), 'r') as input_file:\n str(read_json(input_file.read()))\n assert str(context.value) == msg", "def test_tap_config_json_validation_retry_with_invalid_state_and_then_fix(self):\n self._assert_retry_validation_of_json_file(config=self.valid_json_file,\n properties=self.valid_json_file,\n state=self.invalid_json_file)", "def file_jsoncheck(filename):\n with open(filename, 'r') as jsontable:\n try:\n json_object = json.load(jsontable)\n except ValueError, e:\n return False\n\n # DQLL.json number of lines should be 35\n # Will change with table version\n nlines = 35\n \n with open(filename, 'r') as f:\n l = [x for x in f.readlines()]\n # Default number of lines should be 35\n if len(l) != nlines:\n print \"Number of lines in DQLL.json is not default {} but {}\".format(nlines, len(l))\n return False\n\n return True", "def validate_required_string(dictionary, dict_name, value, yaml_file):\n\n validate_dict_contains_value(dictionary, dict_name, value, yaml_file)\n validate_type(dictionary[value], value, str, 'str', yaml_file)\n del dictionary[value]", "def _validate_array_params(array_params):\n if isinstance(array_params, dict):\n # Shallow check; make sure each antenna position is a 3-vector.\n if all(len(pos) == 3 for pos in array_params.values()):\n return True\n elif isinstance(array_params, str):\n # Shallow check; just make sure the file exists.\n return os.path.exists(array_params)\n else:\n raise TypeError(\"Array layout must be a dictionary or path to a layout csv.\")", "def is_json_valid(json_data: dict, json_schema: dict) -> bool:\r\n try:\r\n validate(instance=json_data, schema=json_schema)\r\n except jsonschema.exceptions.ValidationError as err:\r\n return False\r\n return True", "def is_valid(self, value) -> 'True | str':\n err_str = super().is_valid()\n if isinstance(err_str, str):\n return err_str\n if self.must_exists and not os.path.isfile(value):\n return f'The file \"{value}\" does not exist.'\n return True" ]
[ "0.7035887", "0.7005028", "0.69904476", "0.6892422", "0.68914765", "0.66186786", "0.6573325", "0.65226805", "0.64924496", "0.6450398", "0.64263535", "0.63742745", "0.6305237", "0.62949204", "0.6228534", "0.6220705", "0.618344", "0.6172133", "0.61165553", "0.61000866", "0.60901415", "0.6050257", "0.60250664", "0.6018873", "0.6018642", "0.6013244", "0.60103846", "0.6000024", "0.5975821", "0.5963092", "0.5927843", "0.5927843", "0.59222966", "0.5921905", "0.59067374", "0.5901181", "0.5901172", "0.58841735", "0.58726186", "0.5868638", "0.58624685", "0.58477205", "0.5843965", "0.5826048", "0.58161855", "0.5815225", "0.5806027", "0.5804351", "0.5801439", "0.5795957", "0.57914555", "0.5791102", "0.5787909", "0.5784165", "0.5766023", "0.5751803", "0.5750204", "0.5747317", "0.5747128", "0.57451063", "0.5739381", "0.57374626", "0.5733758", "0.5732164", "0.57263505", "0.5725928", "0.57248396", "0.57100993", "0.5709159", "0.56965744", "0.5695737", "0.56922907", "0.5682434", "0.56823736", "0.56739575", "0.56648755", "0.56567127", "0.56567127", "0.56536466", "0.56525505", "0.56447744", "0.5639479", "0.5639392", "0.56383157", "0.56381065", "0.5637088", "0.5636842", "0.5623432", "0.5621766", "0.56176484", "0.56114906", "0.56087357", "0.5608544", "0.56067145", "0.56035435", "0.56021893", "0.5601708", "0.5591928", "0.5589369", "0.55752206", "0.5573981" ]
0.0
-1
Validate that the output values, passed as either a file or a json string, are correct.
def validate_output_values(self, source, **kwargs): return self._validate_values("output_values", source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_output(out: Union[str, bytes], fmt: str) -> None:\n if fmt in [\"png\", \"pdf\"]:\n assert isinstance(out, bytes)\n elif fmt in [\"vega\", \"vega-lite\"]:\n assert isinstance(out, str)\n dct = json.loads(out)\n assert len(dct) > 0\n else:\n assert isinstance(out, str)\n assert len(out) > 0", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def test_output_invalid(self):\n assert (\n self.route.output_invalid(hug_core.output_format.json).route[\"output_invalid\"]\n == hug_core.output_format.json\n )", "def test_json(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(input_file_path), 'r') as input_file:\n with open(attach_path(answer_file_path), 'r') as answer_file:\n assert str(read_json(input_file.read().strip())) == answer_file.read().strip()", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def verify_json(output, expected_keys):\n deser = json.loads(output)\n assert deser\n for expected_key in expected_keys:\n assert expected_key in deser", "def validate_json(self):\n pass", "def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def test_to_json_file_non_dict(self):\n\n output_file = \"this_file_is_a_ghost\"\n File(output_file).delete()\n\n self.assertRaises(TypeError, lambda: Dict(1).to_json_file(output_file))\n self.assertRaises(TypeError, lambda: Dict(\"100\").to_json_file(output_file))\n self.assertRaises(\n TypeError, lambda: Dict(\"{'hello': 'world'}\").to_json_file(output_file)\n )\n\n File(output_file).delete()", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def is_valid_output(output) -> bool:\n log.info(f\"Output validation: {output}\")\n\n try:\n float(output)\n except ValueError as value_error:\n log.error(value_error)\n return False\n\n log.info(\"Output successfully validated\")\n return True", "def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)", "def validate(self, config_json):\n pass", "def _validate_results(self, task, result):\n assert isinstance(result, dict), \\\n f\"{task} returned a {type(result)} rather than a dict\"\n for k in result:\n assert k in self.provides, \\\n f\"{task} provided unwanted output {k}\"\n for k in self.provides:\n assert k in result, \\\n f\"{task} failed to provide needed output {k}\"", "def test_json_reader_data_contents(process_data):\n json_data = process_data(file_name_or_type='scooter_data.json')\n for val in json_data:\n assert(isinstance(val['id'], int))\n assert(isinstance(val['name'], str))\n assert(isinstance(val['vin_number'], str))\n assert(isinstance(val['electric_scooter'], bool))\n assert(isinstance(val['city'], str))\n assert(isinstance(val['usage'], str))\n assert(isinstance(val['cost_usd'], float))\n assert(isinstance(val['total_years_of_use'], int))", "def test_load_json_value_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('\"two\"')\n\n assert load_json(fname) == \"two\"\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a dict\"\n ):\n load_json_object(fname)\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)", "def test_arg_is_none(self):\n self.s0.save_to_file(None)\n with open('Square.json', mode='r', encoding='utf-8') as f:\n read = f.read()\n self.assertEqual(read, '[]')", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def test_invalid_data(self):\n\n json_data = {\n \"input\" : {\n 'version': 'BAD',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk,\n \"configuration\" : self.configuration\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def valid_and_export(template, dashname):\n\n if not json_validation(template):\n print('Bad json format for ' + dashname + ' grafana dashboard')\n else:\n if export_file(template, dashname + '.json'):\n print('Successfully generated dashboard: ' + dashname)\n else:\n print('Error during export dashboard: ' + dashname)", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def test_json(self):\n\n cases_dir = pathlib.Path(__file__).parent / 'cases'\n\n asn_strs = {\n asn_path.stem: asn_path.read_text()\n for asn_path in cases_dir.glob('*.asn')\n }\n json_strs = {\n json_path.stem: json_path.read_text()\n for json_path in cases_dir.glob('*.json')\n }\n\n assert set(asn_strs.keys()) == set(json_strs.keys())\n assert len(asn_strs) > 0\n\n for key in asn_strs:\n with self.subTest(key=key):\n res_json = asn1vnparser.parse_asn1_value_assignment(\n asn_strs[key], as_json=True)\n res_py = json.loads(res_json)\n self.maxDiff = None\n self.assertEqual(res_py, json.loads(json_strs[key]))", "def test_json(self):\n result = self._do_output(o.JsonOutput(), self._demo_msgs)\n self.assertEqual(result,\n '{\"errors\": ['\n '{\"id\": \"MOCK_MSG\", \"location\": {\"file\": \"mock.cmake\", \"line\": 1, \"package\": \"mock\"}, \"text\": \"short text\"}, '\n '{\"id\": \"MOCK_MSG\", \"location\": {\"package\": \"mock\"}, \"text\": \"short text\"}, '\n '{\"id\": \"MOCK_MSG\", \"location\": {\"file\": \"mock.cmake\", \"package\": \"mock\"}, \"text\": \"short text\"}'\n '], \"notices\": ['\n '{\"id\": \"MOCK_MSG\", \"location\": {\"file\": \"mock.cmake\", \"line\": 3, \"package\": \"mock\"}, \"text\": \"short text\"}'\n '], \"version\": \"%(version)s\", \"warnings\": ['\n '{\"id\": \"MOCK_MSG\", \"location\": {\"file\": \"mock.cmake\", \"line\": 2, \"package\": \"mock\"}, \"text\": \"short text\"}'\n ']}\\n' % {\"version\": catkin_lint_version}\n )", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def check_input_matches_expected_output(in_, out):\n ...", "def check_job_json(job_info):\n job_type_list = [job_type.value for _, job_type in JobType.__members__.items()]\n if 'source_id' not in job_info:\n raise ValueError(\"Json string Errors, key:source_id not found.\")\n if 'job_id' not in job_info:\n raise ValueError(\"Json string Errors, key:job_id not found.\")\n if 'job_type' not in job_info or not job_info['job_type']:\n raise ValueError(\"Json string Errors, key:job_type not found.\")\n if job_info['job_type'] not in job_type_list:\n raise ValueError(\"Invalid job type: {}.\".format(job_info['job_type']))\n if 'job_content' not in job_info:\n raise ValueError(\"Json string Errors, key:job_content not found.\")", "def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)", "def test_badFormat(self):\n with open(os.path.join(self.directory, \"service2.json\"), \"w\") as f:\n f.write(\"this is not json\")\n self.pump()\n self.assertNodesEqual(knownNodes(self.disco, \"service2\", \"staging\"), [])", "def check_file():\n #print('request=', request)\n #print('request.data=', request.data)\n #print('request.form=', request.form)\n #print('request.files=', request.files)\n #print('request.json=', request.json)\n qdata = None\n adata = None\n Q = None\n A = None\n if request.json:\n qdata = request.json.get('Q')\n adata = request.json.get('A')\n if 'Qfile' in request.files:\n qdata = request.files['Qfile'].read().decode('utf-8')\n if 'Afile' in request.files:\n adata = request.files['Afile'].read().decode('utf-8')\n\n #print('qdata\\n', qdata)\n #print('adata\\n', adata)\n try:\n if qdata:\n Q = adc2019.read_Q(qdata)\n if adata:\n A = adc2019.read_A(adata)\n if Q is None and A is None:\n return jsonify({'check_file': 'No data'})\n if Q is None:\n return jsonify({'check_file': 'A-ok'})\n if A is None:\n return jsonify({'check_file': 'Q-ok'})\n\n info = adc2019.check_data(Q, A)\n #print(info)\n info2 = info.copy()\n for k in ['count', 'corner', 'line_length', 'line_corner', 'ban_data_F']:\n info2[k] = str(info2[k])\n info2['check_file'] = 'ok'\n return jsonify(info2)\n except Exception as e:\n #traceback.print_exc()\n errinfo = ['ADC2019 rule violation'] + [str(i) for i in e.args]\n info = {'error': errinfo, 'stack_trace': traceback.format_exc()}\n return jsonify(info)\n\n return jsonify({'check_file': 'ok',\n 'value': 1234567,\n 'msg': '生麦生米生卵'})", "def test_cli_format_error_handler_broken_json():\n resp = MagicMock()\n resp.json.side_effect = ValueError(\"\")\n resp.text = \"Not JSON\"\n output = format_utils.cli_format_error_handler(resp)\n assert 'Error: Unable to decode response. Value: Not JSON' in output", "def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst", "def local_output_check(environment, output, value):\n outputs = environment.cfy.local.outputs()['cfy_outputs']\n # Make sure the comparisons are all string based, a different step should\n # be created that forces the parsed type to be of the correct type for\n # other types.\n assert str(outputs[output]) == value", "def test_file_data_arguments():\n filename = 'wso_args.json'\n assert AUTH.check_file_exists(filename) is True\n\n assert AUTH.verify_config(filename, 'authorization',\n AUTH.encode(RANDOM_USERNAME,\n RANDOM_PASSWORD)) is True\n assert AUTH.verify_config(filename, 'url', RANDOM_URL) is True\n assert AUTH.verify_config(filename, 'aw-tenant-code',\n RANDOM_TENANTCODE) is True", "def test_json_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n # empty - should parse\n spec = {}\n read_wrapper(spec, ps)\n\n # empty array - should parse\n spec = {'constraints': []}\n read_wrapper(spec, ps)\n\n # empty element - should fail\n spec = {'constraints': [{}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching block - should fail\n spec = {'constraints': [{'block': 'a'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching variable - should fail\n spec = {'constraints': [{'variable': 'c'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner option - should fail\n spec = {'constraints': [{'option': 'a1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner block - should parse\n spec = {'constraints': [{'block': 'A', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # block and option - should parse\n spec = {'constraints': [{'block': 'A', 'option': 'a1', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variable and option - should parse\n spec = {'constraints': [{'variable': 'a', 'option': '2.5', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # weird option - should parse\n # fixme: {'option': '[1,2]'} will fail\n spec = {'constraints': [{'variable': 'c', 'option': '[1, 2]', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H==b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H.index==1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)", "def test_make_json_simple(self):\n resources = get_test_resources()\n output, filename = make_json(**resources)\n output = json.loads(output)\n self.assertEqual(\n output[\"text\"][\"0\"][\"0\"][\"0\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Text passages should be parsed correctly\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"0\"][\"0\"], \"Qui tecum cupis esse meos ubicumque libellos \",\n \"Text passages should be parsed correctly\"\n )\n\n self.assertEqual(\n output[\"text\"][\"1\"][\"0\"][\"1\"], \"Et comites longae quaeris habere viae, Something\",\n \"Text passages should be parsed correctly and note kept\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"1\"][\"3\"], \"Crede slug. mihi, nimium Martia turba sapit. \",\n \"Text passages should be parsed correctly and abbr kept\"\n )\n self.assertEqual(\n filename, \"textgroup__work__lat.json\",\n \"Filename should be created in a stable and understandable manner\"\n )\n self.assertEqual(\n output[\"original-urn\"], \"urn:cts:latinLit:textgroup.work.version-lat1\",\n \"Original URN should be fed\"\n )\n self.assertEqual(\n output[\"urn\"], \"urn:cts:latinLit:textgroup.work.version-lat1-simple\",\n \"CLTK URN should be suffixed\"\n )\n self.assertEqual(\n output[\"credit\"], \"\",\n \"Credit should be empty by default\"\n )\n self.assertEqual(\n output[\"meta\"], \"book-poem-line\",\n \"meta should reflect the citation scheme\"\n )\n self.assertEqual(\n output[\"author\"], \"textgroup\",\n \"Author name should be the English textgroup name\"\n )\n self.assertEqual(\n output[\"work\"], \"work\",\n \"Work name should be the English work name\"\n )\n self.assertEqual(\n output[\"edition\"], \"description\",\n \"We should have the English description\"\n )", "def test_xyz_file_format_to_xyz(self):\n xyz1 = converter.xyz_file_format_to_xyz(xyz_file=self.xyz1['file'])\n xyz2 = converter.xyz_file_format_to_xyz(xyz_file=self.xyz2['file'])\n xyz6 = converter.xyz_file_format_to_xyz(xyz_file=self.xyz6['file'])\n self.assertEqual(xyz1, self.xyz1['dict'])\n self.assertEqual(xyz2, self.xyz2['dict'])\n self.assertEqual(xyz6, self.xyz6['dict'])", "def is_jsonable(self, input):\n try:\n json.dumps(input)\n return True\n except (TypeError, OverflowError):\n return False", "def test_simplef(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"resultd\": \"yes\"}')\n self.assertFalse(check_json_object(j, samp1))", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def test_outputs(self, monkeypatch, script_runner):\n monkeypatch.setattr(\"builtins.input\", lambda _: \"n\")\n _ = script_runner.run(\n \"spectrafit\",\n \"spectrafit/test/test_data.txt\",\n \"-i\",\n \"spectrafit/test/test_input_2.json\",\n )\n assert len(list(Path(\".\").glob(\"*.json\"))) == 1\n assert len(list(Path(\".\").glob(\"*.csv\"))) == 3", "def test_json_to_file(self):\n\n self.parser.parse()\n self.parser.json_to_file(JSON_FILE)\n json_string = self.parser.to_json()\n\n with open(JSON_FILE) as f:\n json_file_string = f.read()\n \n self.assertEqual(json_string, json_file_string)", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def validate_input(json_object):\n try:\n if type(json_object) is not list:\n return False\n for machine_config in json_object:\n if (type(machine_config[\"ip\"]) is not str) or not validate_ip(machine_config[\"ip\"]):\n return False\n if type(machine_config[\"community\"]) is not str:\n return False\n if type(machine_config[\"config\"]) is not list:\n return False\n for actual_config in machine_config[\"config\"]:\n if (type(actual_config[\"segment\"]) is not int) or not validate_segment(actual_config[\"segment\"]):\n return False\n if type(actual_config[\"ports\"]) is not list:\n return False\n for actual_port in actual_config[\"ports\"]:\n if (type(actual_port) is not int) or not validate_port(actual_port):\n return False\n except KeyError as ke:\n # Formato incorrecto debido a que algun campo no existe\n return False\n # Todos los campos existen y estan bien\n return True", "def expect_output(self, file, parse_json=False):\n contents = self._data_file(file)\n patcher = mock.patch('sys.stdout', new_callable=StringIO)\n output = patcher.start()\n yield\n patcher.stop()\n if parse_json:\n self.assertEqual(json.loads(output.getvalue()),\n json.loads(contents))\n else:\n self.assertEqual(output.getvalue().split('\\n'), contents.split('\\n'))", "def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()", "def verify_rpc_value ( user_dict ):\n for key in user_dict:\n if not isinstance ( user_dict[ key ], str ):\n # Error code 422\n raise ValueError ( 'Value of {0} is not a string'.format ( key ) )", "def _test_output_formatting_func(self, sample: Any):\n try:\n if not type(sample) == iter:\n self._formatting_func_return_types(format=sample)\n return True\n except Exception:\n raise ValueError(\n f\"formatting_func must return {self._formatting_func_return_types.__annotations__['format']}, not {type(sample)}\"\n )", "def test_lti20_good_json(self):\r\n for json_str, expected_comment in self.GOOD_JSON_INPUTS:\r\n score, comment = self.xmodule.parse_lti_2_0_result_json(json_str)\r\n self.assertEqual(score, 0.1)\r\n self.assertEqual(comment, expected_comment)", "def _ConstructParseAndCheckJSON(\n self, inputfiles, logfiles, graphs):\n logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs)\n index = 0\n for filename in logfiles:\n graph_name = graphs[index]\n actual = logs[graph_name]\n path = os.path.join(self.data_directory, filename)\n expected = json.load(open(path))\n self.assertEqual(expected, actual, 'JSON data in %s did not match '\n 'expectations.' % filename)\n\n index += 1", "def test_data_parse_vanilla_json(self):\n lines = ['{\"a\": \"val\", \"b\": \"val2\"}']\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual({\"a\": \"val\", \"b\": \"val2\"}, dat)", "def _type_check_output(\n output_def: \"OutputDefinition\", output: Any, context: \"BoundOpExecutionContext\"\n) -> Any:\n from ..execution.plan.execute_step import do_type_check\n\n op_label = context.describe_op()\n\n if isinstance(output, (Output, DynamicOutput)):\n dagster_type = output_def.dagster_type\n type_check = do_type_check(context.for_type(dagster_type), dagster_type, output.value)\n if not type_check.success:\n raise DagsterTypeCheckDidNotPass(\n description=(\n f'Type check failed for {op_label} output \"{output.output_name}\" - '\n f'expected type \"{dagster_type.display_name}\". '\n f\"Description: {type_check.description}\"\n ),\n metadata_entries=type_check.metadata_entries,\n dagster_type=dagster_type,\n )\n\n context.observe_output(\n output_def.name, output.mapping_key if isinstance(output, DynamicOutput) else None\n )\n return output\n else:\n dagster_type = output_def.dagster_type\n type_check = do_type_check(context.for_type(dagster_type), dagster_type, output)\n if not type_check.success:\n raise DagsterTypeCheckDidNotPass(\n description=(\n f'Type check failed for {op_label} output \"{output_def.name}\" - '\n f'expected type \"{dagster_type.display_name}\". '\n f\"Description: {type_check.description}\"\n ),\n metadata_entries=type_check.metadata_entries,\n dagster_type=dagster_type,\n )\n return output", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def check_output(self, args):\n if isinstance(args, str):\n args = [args]\n try:\n return check_output(args)\n\n except IOError as e:\n raise ScriptError(e)\n\n except OSError as e:\n raise ScriptError(e)\n\n except CalledProcessError as e:\n raise ScriptError(e)", "def test_tap_config_json_raise_exception_on_invalid_content_for_state_file(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='state',\n invalid_file_contents=(' ', 'foo', '{\"foo\": 1')\n )", "def test_read_json(self, magic_0, magic_1):\n expected = {\n 'key_1': [1, 2, 3, 4, 5],\n 'key_2': ['a', 'b', 'c', 'd', 'e']\n }\n result = helpers.read_json(r\"path\")\n self.assertEqual(expected, result)", "def test_cli_format_error_handler_bogus_json():\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads('{\"key\": \"value\"}'))\n output = format_utils.cli_format_error_handler(resp)\n assert 'Error: Not specified' in output\n assert 'Reason: Not specified' in output", "def test_output_writer_errors():\n with pytest.raises(TypeError):\n load_output_writer(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_output_writer({\"format\": \"invalid_driver\"})", "def output_from_json(self, output: Dict[str, Any]) -> OutputInfo:", "def test_write_to_json():\r\n tmp_dir = os.getcwd()\r\n json_content = '{ \"name\":\"John\", \"age\":30}'\r\n directory = os.path.join(tmp_dir, 'inputspec.json')\r\n write_to_json(directory, json_content) \r\n with open(directory) as json_file:\r\n data = json.load(json_file)\r\n json_string = json.dumps(data)\r\n if os.path.exists(directory):\r\n os.remove(directory)\r\n assert json_string.replace(' ', '') == json_content.replace(' ' , '')", "def verify_output(self, output):\n return output == self.output", "def parse_json_format(file_path=None):\n is_file_res = check_is_file(file_path)\n if is_file_res['result']:\n with open(file_path) as f:\n if f.readline().strip().startswith('['):\n return generate_response(result='jsonl')\n return generate_response(result='json')\n else:\n return is_file_res", "def _does_output_dict_contain_info(self):\n for species_output_dict in self.output.values():\n for key0, val0 in species_output_dict.items():\n if key0 in ['paths', 'job_types']:\n for key1, val1 in species_output_dict[key0].items():\n if val1 and key1 not in ['rotors', 'bde']:\n return True\n else:\n if val0:\n return True\n return False", "def test_successful_file(self):\n\n url = '/%s/jobs/%i/input_files/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])\n self.assertIn('file_name', result)\n self.assertIn('workspace', result)\n self.assertIn('media_type', result)\n self.assertIn('file_type', result)\n self.assertIn('file_size', result)\n self.assertIn('file_path', result)\n self.assertIn('is_deleted', result)\n self.assertIn('url', result)\n self.assertIn('created', result)\n self.assertIn('deleted', result)\n self.assertIn('data_started', result)\n self.assertIn('data_ended', result)\n self.assertIn('source_started', result)\n self.assertIn('source_ended', result)\n self.assertIn('last_modified', result)\n self.assertIn('geometry', result)\n self.assertIn('center_point', result)\n self.assertIn('countries', result)\n self.assertIn('job_type', result)\n self.assertIn('job', result)\n self.assertIn('job_exe', result)\n self.assertIn('job_output', result)\n self.assertIn('recipe_type', result)\n self.assertIn('recipe', result)\n self.assertIn('recipe_node', result)\n self.assertIn('batch', result)\n self.assertFalse(result['is_superseded'])\n self.assertIn('superseded', result)", "def _valid_output_type(self, output_type):\n # pylint: disable=W0613, R0201\n return True", "def test_to_json_string(self):\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertTrue(type(Base.to_json_string(None)) is str)\n self.assertEqual(Base.to_json_string([]), \"[]\")\n self.assertTrue(type(Base.to_json_string([])) is str)\n myDict = {'id': 4, 'width': 3, 'height': 4, 'x': 1, 'y': 3}\n myDict2 = {'id': 3, 'width': 6, 'height': 2, 'x': 1, 'y': 9}\n jsonized = Base.to_json_string([myDict, myDict2])\n self.assertTrue(type(jsonized) is str)\n myDict3 = json.loads(jsonized)\n self.assertEqual(myDict3, [myDict, myDict2])", "def test_parse(self, tmpdir):\n json_file = tmpdir.join(\"f.json\")\n obj = {\"ds\": [{\"file\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}]}\n with open(str(json_file), \"w\") as f:\n json.dump(obj, f)\n\n csv_file = tmpdir.join(\"f.csv\")\n csv_file.write(\"\\n\".join([\n \",\".join(HEADER_ROW),\n \"ds,1,url,title,yes,no,{}\".format(str(json_file))\n ]))\n\n expected = {\n \"ds\": {\n \"generate_aggregation\": True,\n \"include_in_wms\": False,\n \"tech_note_title\": \"title\",\n \"tech_note_url\": \"url\",\n \"files\": [\n {\"path\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}\n ]\n }\n }\n\n s = StringIO()\n sys.stdout = s\n parse_file(str(csv_file))\n sys.stdout = sys.__stdout__\n\n output_json = s.getvalue()\n try:\n parsed = json.loads(output_json)\n except ValueError:\n assert False, \"parse_file() produced invalid JSON\"\n\n assert parsed == expected", "def test_make_json_advanced(self):\n resources = get_test_resources()\n output, filename = make_json(commit=\"1245\", exclude=[\"tei:note\", \"tei:orig\"], credit=\"PerseusDL\", **resources)\n output = json.loads(output)\n self.assertEqual(\n output[\"text\"][\"0\"][\"0\"][\"0\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Text passages should be parsed correctly\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"0\"][\"1\"], \"Et comites longae quaeris habere viae, \",\n \"Text passages should be parsed correctly and note removed\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"1\"][\"3\"], \"Crede mihi, nimium Martia turba sapit. \",\n \"Text passages should be parsed correctly and note removed\"\n )\n self.assertEqual(\n output[\"text\"][\"1\"][\"0\"][\"0\"], \"Qui tecum cupis esse meos ubicumque libellos \",\n \"Text passages should be parsed correctly\"\n )\n self.assertEqual(\n filename, \"textgroup__work__lat.json\",\n \"Filename should be created in a stable and understandable manner\"\n )\n self.assertEqual(\n output[\"original-urn\"], \"urn:cts:latinLit:textgroup.work.version-lat1\",\n \"Original URN should be fed\"\n )\n self.assertEqual(\n output[\"urn\"], \"urn:cts:latinLit:textgroup.work.version-lat1-simple\",\n \"CLTK URN should be suffixed\"\n )\n self.assertEqual(\n output[\"credit\"], \"PerseusDL\",\n \"Credit should be empty by default\"\n )\n self.assertEqual(\n output[\"meta\"], \"book-poem-line\",\n \"meta should reflect the citation scheme\"\n )\n self.assertEqual(\n output[\"author\"], \"textgroup\",\n \"Author name should be the English textgroup name\"\n )\n self.assertEqual(\n output[\"work\"], \"work\",\n \"Work name should be the English work name\"\n )\n self.assertEqual(\n output[\"edition\"], \"description\",\n \"We should have the English description\"\n )\n self.assertEqual(\n output[\"commit\"], \"1245\",\n \"We should have the commit information\"\n )", "def is_valid(self, value) -> 'True | str':\n err_str = super().is_valid()\n if isinstance(err_str, str):\n return err_str\n if self.must_exists and not os.path.isfile(value):\n return f'The file \"{value}\" does not exist.'\n return True", "def _is_valid_result(result):\n return result.get(\"version\", \"\") != \"\"", "def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))", "def compare_json(test_name): # , json_object, filters):\n baseline_json = None\n result_json = None\n try:\n with open(robot_dir + \"/output/baseline/{}.json\".format(test_name.replace(' ', ''))) as baseline_file:\n try:\n with open(robot_dir + \"/output/results/{}.json\".format(test_name.replace(' ', ''))) as result_file:\n baseline_json = json.load(baseline_file)\n result_json = json.load(result_file)\n except:\n print(\"Failed to open the results json\")\n except:\n print(\"Failed to open the baseline json\")\n return False\n return ordered(baseline_json) == ordered(result_json)", "def _validate_analysis(self):\n if \"analysis\" not in self.params.keys():\n raise SpecificationError(f\"analysis is a required field in parameters\")\n elif not isinstance(self.params[\"analysis\"], dict):\n raise SpecificationError(\"Value of key 'analysis' must be a dictionaries\")\n else:\n analysis_script = self.params[\"analysis\"].get(\"script\", \"\")\n if analysis_script:\n analysis_script = self.params[\"scripts\"] / analysis_script\n if not analysis_script.is_file():\n raise FileNotFoundError(\n \"Script from analysis does not exist: {}\".format(\n analysis_script\n )\n )\n else:\n self.params[\"analysis\"][\"script\"] = analysis_script\n else:\n self.params[\"analysis\"][\"script\"] = \"\"\n analysis_command = self.params[\"analysis\"].get(\"command\", None)\n if not analysis_command or not isinstance(analysis_command, str):\n raise SpecificationError(\"'command' must be a string.\")\n if not self.params[\"analysis\"].get(\"inputs\", None):\n self.params[\"analysis\"][\"inputs\"] = []\n elif not isinstance(self.params[\"analysis\"][\"inputs\"], list):\n raise SpecificationError(\"Value of key 'inputs' must be a list.\")\n else:\n for inp_el in self.params[\"analysis\"][\"inputs\"]:\n self._validate_input_dict(inp_el)", "def validateDictionaries(self):\n self.logger.info(\"Validating Dictionaries\")\n\n message = ''\n shader_dict = {}\n disp_dict = {}\n attr_dict = {}\n layers_dict = {}\n namespace_str = ''\n\n shader_attr = self.getAttr(\"shadersAssignation\")\n disp_attr = self.getAttr(\"displacementsAssignation\")\n attr_attr = self.getAttr(\"attributes\")\n layers_attr = self.getAttr(\"layersOverride\")\n namespace_attr = self.getAttr(\"shadersNamespace\")\n\n shaders = None\n disp = None\n attr = None\n layers = None\n namespace = None\n\n fail = False\n\n if shader_attr:\n try:\n shader_dict = json.loads(shader_attr)\n if shader_dict.has_key('shaders'):\n fail = True\n shaders = 'please remove the shaders key'\n except ValueError as e:\n shaders = e\n fail = True\n\n if disp_attr:\n try:\n disp_dict = json.loads(disp_attr)\n if disp_dict.has_key('displacement'):\n fail = True\n disp = 'please remove the displacement key'\n except ValueError as e:\n disp = e\n fail = True\n\n if attr_attr:\n try:\n attr_dict = json.loads(attr_attr)\n if attr_dict.has_key('attributes'):\n fail = True\n attr = 'please remove the attributes key'\n except ValueError as e:\n attr = e\n fail = True\n\n if layers_attr:\n try:\n layers_dict = json.loads(layers_attr)\n if layers_dict.has_key('layers'):\n fail = True\n layers = 'please remove the layers key'\n except ValueError as e:\n layers = e\n fail = True\n\n if namespace_attr:\n try:\n namespace_str = ast.literal_eval(namespace_attr)\n if type(namespace_attr) == dict:\n if namespace_attr.has_key('namespace'):\n fail = True\n namespace = 'please remove the namespace key'\n\n except ValueError as e:\n namespace = e\n fail = True\n\n if not fail:\n self.logger.info(\"Valid\")\n return True\n else:\n if shaders:\n self.logger.error(\"%s.shadersAssignation : %s\" % (self.data['shapeNode'], shaders))\n if disp:\n self.logger.error(\"%s.displacementsAssignation : %s\" % (self.data['shapeNode'], disp))\n if attr:\n self.logger.error(\"%s.attributes : %s\" % (self.data['shapeNode'], attr))\n if layers:\n self.logger.error(\"%s.layersOverride : %s\" % (self.data['shapeNode'], layers))\n if namespace:\n self.logger.error(\"%s.shadersNamespace : %s\" % (self.data['shapeNode'], namespace))\n self.logger.info(\"Invalid\")\n return False", "def test_output_reader_errors():\n with pytest.raises(TypeError):\n load_output_reader(\"not_a_dictionary\")\n with pytest.raises(errors.MapcheteDriverError):\n load_output_reader({\"format\": \"invalid_driver\"})", "def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')", "def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)", "def test_json_file(self):\n #response = os.system(\"python3 client.py -f filename.csv\")\n response = client.result(False, 'json', 'unittest',file = 'test_file.csv')\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file conetent is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n raise exception.FileFormatError(err_msg)", "def validate_output(self, file_name):\n with open(file_name, \"r\") as f:\n line = f.readline()\n # Expected: \"EMPTY: <instance_type>=<count>; ...\"\n parts = line.split()\n self.assertEqual(\"EMPTY:\", parts[0])\n num_instance_types = len(INSTANCE_TYPES)\n self.assertEqual(num_instance_types, len(parts) - 1)\n for i in xrange(0, num_instance_types):\n m = re.match(INSTANCE_COUNT_RE, parts[i + 1])\n self.assertIsNotNone(m)\n self.assertEqual(INSTANCE_TYPES[i], m.group(1))\n # m.group(2) is the integer value.\n # The regex match is sufficient validation.\n\n line = f.readline()\n # Expected: \"FULL: <instance_type>=<count>; ...\"\n parts = line.split()\n self.assertEqual(\"FULL:\", parts[0])\n num_instance_types = len(INSTANCE_TYPES)\n self.assertEqual(num_instance_types, len(parts) - 1)\n for i in xrange(0, num_instance_types):\n m = re.match(INSTANCE_COUNT_RE, parts[i + 1])\n self.assertIsNotNone(m)\n self.assertEqual(INSTANCE_TYPES[i], m.group(1))\n # m.group(2) is the integer value.\n # The regex match is sufficient validation.\n\n line = f.readline()\n # Expected:\n # \"MOST FILLED: <instance_type>=<count>,<empty slots>; ...\"\n parts = line.split()\n self.assertEqual(\"MOST\", parts[0])\n self.assertEqual(\"FILLED:\", parts[1])\n num_instance_types = len(INSTANCE_TYPES)\n self.assertEqual(num_instance_types, len(parts) - 2)\n for i in xrange(0, num_instance_types):\n m = re.match(INSTANCE_COUNT_SLOTS_RE, parts[i + 2])\n self.assertIsNotNone(m)\n self.assertEqual(INSTANCE_TYPES[i], m.group(1))\n # m.group(2) and m.group(3) are the integer values.\n # The regex match is sufficient validation.\n\n # Verify that we have reached the end of the file\n line = f.readline()\n self.assertEqual(\"\", line)", "def has_error(json):\n judge_value = error_type(json)\n judge_result = True\n if judge_value > 0:\n judge_result = True\n else:\n judge_result = False\n return judge_result", "def read_and_check_valid_params_json(instrument, file_header):\n non_valid_params = []\n file_loc = \"/grp/hst/cdbs/tools/jwst/valid_params/\" + change_style(instrument) + \"_valid_params.csv\"\n\n datetime1 = re.compile(\"([1][9]|([2][0-1]))\\d{2}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1])T([0-1][0-9]|[2][0-3]):[0-5][0-9]:[0-5][0-9]\")\n datetime2 = re.compile(\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\")\n inflight_datetime = re.compile(\"INFLIGHT ([1][9]|([2][0-1]))\\d{2}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1]) ([1][9]|([2][0-1]))\\d{2}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1])\")\n\n new_file_header = {}\n for header in file_header:\n if header == \"description\":\n new_file_header[header[:7].upper()] = file_header[header]\n else:\n new_file_header[header[:8].upper()] = file_header[header]\n file_header = new_file_header\n\n with open(file_loc, 'rb') as csvfile:\n keyreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in keyreader:\n if row[0].lower() in file_header or (row[0] == \"HISTORY\" and row[0] in file_header):\n #for header in file_header:\n #If OR is present in value\n if not type(file_header[row[0]]) is int and \"|\" in file_header[row[0]]:\n values = file_header[row[0]].split(\"|\")\n if values[0] in row[1:]:\n pass\n else:\n non_valid_params.append((values[0], row[0]))\n\n if values[1] in row[1:]:\n pass\n else:\n non_valid_params.append((values[1], row[0]))\n #Valid value\n elif file_header[row[0]] in row[1:]:\n pass\n #Check USEAFTER\n elif row[0] == 'USEAFTER':\n if re.match(datetime1, file_header[row[0]]):\n pass\n elif re.match(datetime2, file_header[row[0]]):\n print (\"Correct format but inaccurate dates in USEAFTER\")\n non_valid_params.append((file_header[row[0]], row[0]))\n else:\n non_valid_params.append((file_header[row[0]], row[0]))\n #Check PEDIGREE\n elif row[0] == 'PEDIGREE':\n valid_options = ['SIMULATION', 'GROUND', 'DUMMY']\n if (file_header[row[0]] in valid_options) or re.match(inflight_datetime, file_header[row[0]]):\n pass\n else:\n non_valid_params.append((file_header[row[0]], row[0]))\n #Check's to see if certain headers are not empty\n elif row[0] in ['AUTHOR', 'DESCRIP', 'HISTORY']:\n if file_header[row[0]] == \"\":\n non_valid_params.append((file_header[row[0]], row[0]))\n #Not a valid value\n else:\n non_valid_params.append((file_header[row[0]], row[0]))\n else:\n pass\n if not non_valid_params:\n print (\"All parameters are valid\")\n else:\n print (\"Non-valid paramters (Format (Non-valid value, Header located in)): {}\".format(non_valid_params))", "def validate(input, output, fields, delimiter, encoding, verbose, format_in, zipfile, rule, filter, mode):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['zipfile'] = zipfile\n options['filter'] = filter\n options['rule'] = rule\n options['mode'] = mode\n acmd = Validator()\n acmd.validate(input, options)\n pass", "def test_bcl_convert_output_unknown_format(self):\n self.verify_local()\n\n mock_bcl_workflow: Workflow = WorkflowFactory()\n\n mock_wfl_run = libwes.WorkflowRun()\n mock_wfl_run.id = TestConstant.wfr_id.value\n mock_wfl_run.status = WorkflowStatus.SUCCEEDED.value\n mock_wfl_run.time_stopped = make_aware(datetime.utcnow())\n mock_wfl_run.output = {\n 'main/fastqs': \"say, for example, cwl workflow output is some malformed string, oh well :(\"\n }\n\n workflow_version: libwes.WorkflowVersion = libwes.WorkflowVersion()\n workflow_version.id = TestConstant.wfv_id.value\n mock_wfl_run.workflow_version = workflow_version\n when(libwes.WorkflowRunsApi).get_workflow_run(...).thenReturn(mock_wfl_run)\n\n try:\n orchestrator.handler({\n 'wfr_id': TestConstant.wfr_id.value,\n 'wfv_id': TestConstant.wfv_id.value,\n }, None)\n except Exception as e:\n logger.exception(f\"THIS ERROR EXCEPTION IS INTENTIONAL FOR TEST. NOT ACTUAL ERROR. \\n{e}\")\n\n self.assertRaises(json.JSONDecodeError)", "def file_jsoncheck(filename):\n with open(filename, 'r') as jsontable:\n try:\n json_object = json.load(jsontable)\n except ValueError, e:\n return False\n\n # DQLL.json number of lines should be 35\n # Will change with table version\n nlines = 35\n \n with open(filename, 'r') as f:\n l = [x for x in f.readlines()]\n # Default number of lines should be 35\n if len(l) != nlines:\n print \"Number of lines in DQLL.json is not default {} but {}\".format(nlines, len(l))\n return False\n\n return True", "def output_is_valid(output):\n\n is_correct = type(output) is list\n for member in output:\n is_correct *= type(member) is list\n for item in member:\n is_correct *= type(item) is tuple and len(item) == 2\n\n return bool(is_correct)", "def test_invalid_dict(self):\r\n data = '\"\\\\\"Test\\\\tTesting\"'\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('JSON should be dict', response.content)\r\n self.assertEqual(response.status_code, 400)", "def check_knowledge_area_json_string_validity(knowledge_area_json):\n if knowledge_area_json is not None:\n try:\n dictionary = json.loads(knowledge_area_json)\n if dictionary is not None and isinstance(dictionary, dict):\n for key in dictionary.keys():\n if key is not None and key is not '':\n try:\n item = dictionary[key]\n if item is not None and item is not \"\" and not math.isnan(item):\n try:\n float_item = float(item)\n except ValueError:\n return errors.InvalidKnowledgeAreaJsonStringError(\n knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n except KeyError:\n return errors.InvalidKnowledgeAreaJsonStringError(\n knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n except ValueError:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)\n else:\n return errors.InvalidKnowledgeAreaJsonStringError(knowledge_area_json)", "def test_search_validator_good_data():\n sval = helpers.search_validator()\n good = '{\"fields\": {\"country\": \"DK\", \"plate\": \"BC69432\"}}'\n assert sval.validate(loads(good))", "def test_json_string_to_file_empty(self):\n Square.save_to_file([])\n with open(\"Square.json\") as a_file:\n self.assertEqual(json.loads(a_file.read()), [])" ]
[ "0.6843762", "0.6608919", "0.65803474", "0.6542176", "0.64707804", "0.6435956", "0.6402429", "0.6313526", "0.6293885", "0.61496603", "0.6148562", "0.613426", "0.6072376", "0.60628194", "0.6022932", "0.59941685", "0.59931684", "0.59114295", "0.5907436", "0.58949924", "0.5893585", "0.58658415", "0.5863124", "0.5860577", "0.58600664", "0.5835267", "0.5821637", "0.5811933", "0.57978743", "0.5779133", "0.5779133", "0.5771696", "0.5760844", "0.5752645", "0.5742522", "0.57406104", "0.57128674", "0.5703422", "0.56984895", "0.56979614", "0.5694274", "0.5693982", "0.5693293", "0.5676709", "0.5675387", "0.5662037", "0.5646306", "0.56381375", "0.56173396", "0.56102574", "0.5606857", "0.5606105", "0.5606075", "0.5592228", "0.55863047", "0.5580772", "0.55784976", "0.55648714", "0.5564648", "0.55636144", "0.55568254", "0.55469316", "0.55355287", "0.5524589", "0.5523919", "0.55176264", "0.5515292", "0.55087775", "0.5508722", "0.5501486", "0.55003977", "0.5498554", "0.54975253", "0.54967654", "0.54932153", "0.5482012", "0.54814345", "0.54781204", "0.54686016", "0.54624933", "0.54482704", "0.54303294", "0.54288703", "0.54254353", "0.54245174", "0.5422715", "0.54179764", "0.5411453", "0.5407473", "0.53979236", "0.53935957", "0.53915995", "0.5387892", "0.53878033", "0.5387274", "0.5384473", "0.5380771", "0.53767604", "0.53763485", "0.53716016" ]
0.5906991
19
Validate monitor message against the monitor message schema strand.
def validate_monitor_message(self, source, **kwargs): return self._validate_values(kind="monitor_message", source=source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n\n # Check if motherboard record exists\n motherboard_record_exists = False\n board_info_records = self.groups[constants.RecordType.BASEBOARD_RECORD]\n for handle_id in board_info_records:\n record = self.records[handle_id]\n if 'Type' in record.props and record.props['Type'].val == 'Motherboard':\n motherboard_record_exists = True\n break\n if not motherboard_record_exists:\n self.err_msgs['Motherboard SMBIOS record is missing.'] = (\n 'There should be at least one structure defining the motherboard '\n '(Board Type: 0xA).')\n\n return self.err_msgs", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def validate_message(self, state_id, msg):\n pass", "def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise", "def validation_event(self, message):", "def message_error_validator():\n\n return validator.MessageErrorSchema()", "def check_message(self, msg):\n pass", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validate():", "def validateMessage(self):\n assert self.validation_class is not None, (f'{self.__class__.__name__}'\n ' must include a validation'\n '_attribute or override '\n 'validateMessage method.')\n\n validation_class = self.validation_class\n registry = validation_class(data=self.message, context={'request': None})\n\n if registry.is_valid():\n self.is_valid = True\n self.registry = registry\n else:\n self.result = registry.errors\n super().finishTask(failed=True)\n\n return self.is_valid", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def handle_message(self, validated_message: dict):\n pass", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def onMessage(self):\n \"\"\"\n Validates that the received message is from a student and then broadcasts the message to the rest of the class.\n\n @param self: self is the instance of this object.\n @param message: the message that is received\n @param student: the student that sent the message\n \"\"\"\n pass", "def validate(self, soapmsg):\n return self.xsd_validator.validate(soapmsg.body)", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def determineMessageValidity(message):\n return Sentience._determineMessagePositivityWrapper(message, overall=False)", "def _check_monitorline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.monitor_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if ';' is used\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id, None)\n self._display_semantic_error(monitor_error)\n elif self._is_period(self.symbol):\n # DType output\n self.symbol = self.scanner.get_symbol()\n if self._check_validdtypeoutput(self.symbol):\n self.monitor_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id,\n self.monitor_port.id)\n self._display_semantic_error(monitor_error)\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n self._display_syntax_error(\"doutput\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None", "def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))", "def validate_against_schema(request, schema, data):\n try:\n data_pure = schema.deserialize(data)\n data_clean = post_serialize(data_pure)\n # Attach data_clean to request: see usage in views.\n request.data_clean = data_clean\n except Invalid as e:\n # here we transform the errors we got from colander into cornice\n # errors\n for field, error in e.asdict().items():\n request.errors.add('body', field, error)", "def _is_message_valid(message):\n return isinstance(message, ev_envelope.Envelope)", "def _validate_input(self):\n\n try:\n expected_type(str, self.venue_id, \"venue_id\")\n expected_type(datetime.datetime, self.timestamp_utc, \"timestamp_utc\")\n\n expected_type(VenueStreamType, self.measurement_type, \"measurement_type\")\n\n expected_type(int, self.number_of_people, \"number_of_people\")\n\n if self.measurement_type is VenueStreamType.ABSOLUTE:\n if self.operator:\n raise ValueError(\"The stream type for the venue doesn't allow passing an Event operator\")\n\n elif self.measurement_type is VenueStreamType.EVENT:\n expected_type(EventStreamOperator, self.operator, \"operator\")\n else:\n raise ValueError(\"Unsupported member of the VenueStreamType enum\")\n\n if self.metadata:\n expected_type(dict, self.metadata, \"metadata\")\n\n except Exception as ex:\n raise ValueError(\"Validation of input failed. Reason: %s\" % str(ex))", "def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)", "def __validate():\n # TODO: implement", "def check_msg(self, msg, log, topic):\n topics = self.cfg_logs[log]['topics']\n\n if topic in topics.keys():\n for attribute in topics[topic]:\n if not attribute.startswith('_'):\n val = getattr(msg, attribute)\n\n if 'min' in topics[topic][attribute].keys():\n if val < topics[topic][attribute]['min']:\n self.errors.append(\n '{} {} {} {}: violating minimium value {}: {}'.format(log, msg.header.stamp.to_sec(),\n topic, attribute,\n topics[topic][attribute]['min'],\n val))\n if 'max' in topics[topic][attribute].keys():\n if val > topics[topic][attribute]['max']:\n self.errors.append(\n '{} {} {} {}: violating maximum value {}: {}'.format(log, msg.header.stamp.to_sec(),\n topic, attribute,\n topics[topic][attribute]['max'],\n val))\n if 'val' in topics[topic][attribute].keys():\n if val != topics[topic][attribute]['val']:\n self.errors.append(\n '{} {} {} {}: violating value {}: {}'.format(log, msg.header.stamp.to_sec(), topic,\n attribute, topics[topic][attribute]['val'],\n val))", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def _validate(self):\n pass", "def _check_message(self, _message_contents):\r\n if not type(_message_contents) is dict:\r\n self.logger.error(\"Message should be a dict.\")\r\n return False\r\n if not \"event\" in _message_contents:\r\n self.logger.error(\"Message dict has no event key.\")\r\n return False\r\n if not \"data\" in _message_contents:\r\n self.logger.error(\"Message dict has no data key.\")\r\n return False\r\n if not type(_message_contents[\"event\"]) == str:\r\n self.logger.error(\"Message event is not a string.\")\r\n return False\r\n if len(_message_contents[\"event\"]) == 0:\r\n self.logger.error(\"Message event cannot be empty.\")\r\n return False\r\n if not type(_message_contents[\"data\"]) == list:\r\n self.logger.error(\"Message data is not a list.\")\r\n return False\r\n if len(_message_contents[\"data\"]) == 0:\r\n self.logger.error(\"Message data cannot be empty list.\")\r\n return False\r\n return True", "def test_validate_connector(self):\n connector = {'wwpns': [\"not empty\"],\n 'wwnns': [\"not empty\"]}\n self.volume.driver.validate_connector(connector)", "def validate_fields(cls, message_type: str, attachment_data: dict) -> None:", "def _validate(self):\n self._eventHandler.validate()", "def verify_raw_message(self, msg: bytes):\n if not (MIN_MESSAGE_SIZE < len(msg) < MAX_MESSAGE_SIZE):\n raise ValueError(\"Invalid message size!\")\n\n msg_type = get_message_type(msg) # yields a ValueError on invalid type\n msg_sender = get_message_sender(msg) # yields a ValueError if sender is invalid\n msg_round = get_message_round(msg)\n\n if msg_round < self.round:\n raise ValueError(f\"Message to late\")\n\n if msg_round == self.round:\n if msg_type == MessageType.Propose and self.phase > Phase.Propose:\n raise ValueError(f\"Message to late!\")\n if msg_type == MessageType.Acknowledge and self.phase > Phase.Acknowledge:\n if not self.is_leader:\n raise ValueError(f\"Message to late!\")\n elif self.is_leader and msg_type != MessageType.Confirm:\n raise ValueError(\"Leaders only process Confirm messages for current round!\")\n\n if self.node_status[msg_sender] == NodeStatus.ADVERSARIAL:\n return ValueError(\"Message sender is an adversary!\")\n\n # TODO: Drop message if some message of the same (type, round, sender)-combination\n # was previously added to the queue.\n\n # Drop messages with invalid signatures\n if not ed25519.verify_attached(msg, NODE_INFOS[msg_sender].public_key):\n return ValueError(\"Signature check failed!\")\n\n return True", "def parse_valid(self):\n try:\n self.test_proto.parse()\n except avro.errors.ProtocolParseException: # pragma: no coverage\n self.fail(f\"Valid protocol failed to parse: {self.test_proto!s}\")", "def __validate_raw_message(raw_message: RawMessage) -> None:\n if not isinstance(raw_message, (tuple, list)):\n raise TypeError(\"'raw_message' is not list or tuple type\")\n if not all([isinstance(raw_byte, int) and 0x00 <= raw_byte <= 0xFF for raw_byte in raw_message]):\n raise ValueError(\"'raw_message' does not contain raw bytes (int value between 0 and 255) values only\")", "async def validate_screenshot(message):\n message_state[message.author.id] = 'SEND_SCREENSHOT'\n await message.channel.send(validate_screenshot_description)", "def validate(self):\n import os\n\n if self.kind == KDM.INTEROP:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'interop.xsd'), 'r') as f:\n schema = f.read()\n elif self.kind == KDM.SMPTE:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'smpte.xsd'), 'r') as f:\n schema = f.read()\n\n base_dir = os.getcwd()\n os.chdir(os.path.join(os.path.dirname(__file__), 'xsd'))\n try:\n schema = ET.XMLSchema(ET.XML(schema))\n xmlparser = ET.XMLParser(schema=schema)\n ET.fromstring(self.raw, xmlparser)\n finally:\n os.chdir(base_dir)", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def is_valid(self, soapmsg):\n return self.xsd_validator.is_valid(soapmsg.body)", "async def validate(self):\n pass", "def validate_new_message(payload):\n serialized_message = json.loads(payload)\n message = Message.make(serialized_message)\n print(\"Message ({0}) contents: {1}\".format(message.type, message))\n return message", "def SchemaValidate(self, xsd):\n ret = libxml2mod.xmlTextReaderSchemaValidate(self._o, xsd)\n return ret", "def testMessageFieldValidate(self):\n class MyMessage(messages.Message):\n pass\n\n class AnotherMessage(messages.Message):\n pass\n\n field = messages.MessageField(MyMessage, 10)\n field.validate(MyMessage())\n\n self.assertRaises(messages.ValidationError,\n field.validate,\n AnotherMessage())", "def isValidForSchema(schema):\n\n return True", "def _is_valid_message(tx_message: TransactionMessage) -> bool:\n # TODO check the hash matches the terms of the transaction, this means dm requires knowledge of how the hash is composed\n tx_hash = tx_message.signing_payload.get(\"tx_hash\")\n is_valid = isinstance(tx_hash, bytes)\n return is_valid", "def _validate_measurement(instance: typing.Dict[str, typing.Any], schema: typing.Dict[str, typing.Any], path: typing.List[str]) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'object_id'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - opt_federation_keys\n if invalid_keys:\n raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)\n if instance['_type'] != 'measurement':\n raise ValidationError('expected _type \"measurement\"', path)\n if not isinstance(instance['object_id'], int):\n raise ValidationError('object_id must be int', path)\n if 'component_uuid' in instance and instance['component_uuid'] != flask.current_app.config['FEDERATION_UUID']:\n pass\n else:\n try:\n measurement = objects.get_object(object_id=instance['object_id'])\n except ObjectDoesNotExistError:\n raise ValidationError('object does not exist', path)\n if measurement.action_id is None:\n raise ValidationError('object must be measurement', path)\n action = actions.get_action(measurement.action_id)\n if action.type is None:\n raise ValidationError('object must be measurement', path)\n if action.type_id != ActionType.MEASUREMENT and action.type.fed_id != ActionType.MEASUREMENT:\n raise ValidationError('object must be measurement', path)", "def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def validate_payload(cls, event):\n # TODO: Use invenio-jsonschemas/jsonresolver instead of this\n # Validate against Event JSONSchema\n # NOTE: raises `jsonschemas.ValidationError`\n cls._jsonschema_validator.validate(event)\n\n # Validate using marshmallow loader\n for payload in event:\n errors = RelationshipSchema(check_existing=True).validate(payload)\n if errors:\n raise MarshmallowValidationError(str(errors) + \"payload\" + str(payload))", "def validate(self, validate_valence_type=True, toolkit_registry=None):\n perceived_type = self.get_type(toolkit_registry=toolkit_registry)\n if (\n (perceived_type != self._expected_type)\n and validate_valence_type\n and not (self._expected_type is None)\n ):\n raise SMIRKSMismatchError(\n f\"{self.__class__} expected '{self._expected_type}' chemical environment, but \"\n f\"smirks was set to '{self.smirks}', which is type '{perceived_type}'\"\n )", "def _validate_against_schema(config):\n logging.info(\"Validating config file against the schema\")\n try:\n c = Core(source_data=config, schema_files=[CONFIG_SCHEMA])\n c.validate(raise_exception=True)\n except Exception as e:\n logging.error(\"Failed when validating schema: %s\", e)\n logging.info(\"Dumping rendered template:\\n%s\", dump_rendered_config_file(config))\n raise", "def monitorlist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.MONITOR_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.monitor_point()\n while (self.symbol.type == self.scanner.NAME):\n self.monitor_point()\n\n # Check right curly bracket ends monitors block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.END_ID):\n # Error: missing '}'\n # Stopping Symbols: END' KEYWORD\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Bad name terminated monitors incorrectly\n # Error: Invalid name\n # Stopping Symbols: END' KEYWORD\n self.error(self.NAME_STRING, [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Error: Curly needed after 'MONITOR'\n # Stopping Symbols: END' KEYWORD\n self.error(self.NO_CURLY_MONITOR, [self.scanner.KEYWORD],\n [self.scanner.END_ID])\n else:\n # Error: 'MONITOR' keyword required\n # Stopping Symbols: END' KEYWORD\n self.error(self.NEED_MONITOR_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.END_ID])", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def validate(schema, record):\n if six.PY3:\n return Utils._py3_validate(schema, record)\n else:\n return Utils._py2_validate(schema, record)", "def app_validate(data):\n\n schema = json.load(open('schemas/app_description_schema.json', 'r'))\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError as e:\n raise InvalidApplicationDescription(str(e))\n except jsonschema.SchemaError:\n log.exception('BUG: invalid schema for application descriptions')\n raise ZoeLibException('BUG: invalid schema for application descriptions')\n\n # Start non-schema, semantic checks\n if data['version'] != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription('Application description version mismatch (expected: {}, found: {}'.format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION, data['version']))\n\n found_monitor = False\n for service in data['services']:\n if service['monitor']:\n found_monitor = True\n\n service['resources']['memory']['max'] = zoe_lib.config.get_conf().max_memory_limit * (1024 ** 3)\n if service['resources']['memory']['min'] is not None and service['resources']['memory']['min'] > service['resources']['memory']['max']:\n raise InvalidApplicationDescription(msg='service {} tries to reserve more memory than the administrative limit'.format(service['name']))\n\n if service['resources']['cores']['min'] is None:\n service['resources']['cores']['min'] = 0.1\n\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have the monitor property set to true\")", "def validate_schema_consistent(self, node):\n debug(\"validate_schema_consistent() \" + node.name)\n\n response = node.nodetool('describecluster', True)[0]\n schemas = response.split('Schema versions:')[1].strip()\n num_schemas = len(re.findall('\\[.*?\\]', schemas))\n assert num_schemas == 1, \"There were multiple schema versions: \" + pprint.pformat(schemas)", "def __input_validator(msg):\n\n\t\tstatus = msg[\"status\"]\n\n\t\tif status == 1:\n\t\t\treturn status\n\t\telif status == 0:\n\t\t\tprint(msg[\"body\"])\n\t\telif status == -1:\n\t\t\tprint(\"Please enter something!\")\n\t\telif status == -2:\n\t\t\tprint(\"Your command {} is invalid\".format(msg[\"verb\"]))\n\t\telif status == -3:\n\t\t\tprint(\"No argument given after {}\".format(msg[\"verb\"]))", "def test_validate_ingest(self):\n #Validate schema and config file\n ingest_mgmr = IngestManager()\n response = ingest_mgmr.validate_config_file(self.example_config_data)\n assert (response is True)\n\n #Validate properties\n response = ingest_mgmr.validate_properties()\n assert (response is True)", "def test_is_valid_invalid_resume(self):\n self.assertFalse(resumeschema.is_valid(self.invalid_resume))", "def test_check_presence_only(self):\n schema = yaml.load(self.yaml_presence_check, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, allow_unknown=True,\n error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': ''}\n self.assertTrue(val.validate(document))\n document = {'eventDate': ''}\n val.validate(document)\n self.assertEqual(val.errors, {})", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def validator(request, schema):\n try:\n body = request.body.decode('utf-8')\n dictbody = json.loads(body) if body else {}\n validate_against_schema(request, schema, dictbody)\n except ValueError as e:\n request.errors.add('body', 'body', six.text_type(e))", "def validate(self, document) -> None:\n if not self._re.match(document.text):\n raise ValidationError(\n message=self._message, cursor_position=document.cursor_position\n )", "def validate(self):\n return _libsbml.SBMLExternalValidator_validate(self)", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def validate(self, descriptor, schema_id):\n try:\n jsonschema.validate(descriptor, self.load_schema(schema_id))\n return True\n\n except ValidationError as e:\n log.error(\"Failed to validate Descriptor against schema '{}'\"\n .format(schema_id))\n self.error_msg = e.message\n log.error(e.message)\n return\n\n except SchemaError as e:\n log.error(\"Invalid Schema '{}'\".format(schema_id))\n self.error_msg = e.message\n log.debug(e)\n return", "def validate(self, document):\n self.validator.validate(document)", "def __validate(self):\n pass", "def validate(self, doc):\n return self.schema.validate(doc)", "def validate(self):\n if not self.checkUserInput():\n return\n # detect_peaks function requires mpd (= minimum peak distance)\n # mpw should be converted in mpd\n mpd = self.mpw * self.npt / self.xspan\n npk = ntry = 0\n while npk == 0 and ntry < 5:\n peakindx, _ = signal.find_peaks(self.Y, height=self.mph,\n distance=mpd, threshold=self.thres)\n npk = len(peakindx)\n if npk == 0:\n if self.thres == 0:\n break\n if ntry == 3:\n self.thres = 0.0\n else:\n self.thres /= 2\n ntry += 1\n if not npk:\n QtWidgets.QMessageBox.information(self, self.title, \"No peak found\")\n return\n msg = \"{0:d} peaks have been detected, \" \\\n \"do you want to continue ?\".format(npk)\n ans = QtWidgets.QMessageBox.information(self, self.title, msg,\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n if ans == QtWidgets.QMessageBox.No:\n self.reject()\n else:\n self.peakindx = peakindx\n self.accept()", "def validator(data):\n\n request_validator = cerberus.Validator(SCHEMA)\n if request_validator.validate(data):\n return True\n else:\n return request_validator.errors", "def validate(self):\n self._check_type()", "def validate(self):\n pass # pylint: disable=unnecessary-pass", "def _check_monitorlist(self):\n self.symbol = self.scanner.get_symbol()\n # Repeatedly call _check_monitorline() until END MONITORS\n while (\n not self._is_end(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n self._check_monitorline()\n if self._is_eof(self.symbol):\n # In case file ends prematurely\n pass\n return None", "def validate(self) -> None:\n schema = type(self).schema\n if schema:\n if self.data is None:\n raise PresenterException(\"No data given.\")\n try:\n schema(self.data)\n except JsonSchemaException as exception:\n raise PresenterException(exception.message)\n else:\n if self.data is not None:\n raise PresenterException(\"This presenter does not take data.\")", "def validate(self):", "def validate(self):", "def validate_syntax(self):\n self._validate_network_prefix()\n self._validate_zero_network()\n self._validate_families()\n self._validate_unicast_addresses()\n self._validate_addresses()\n self._validate_gateway()\n self._validate_metric()", "def validate(self):\n return 1", "def validate(\n self,\n is_full_msg: bool,\n msg_type: Optional[bytes],\n header_len: int,\n payload_len: Optional[int],\n input_buffer: InputBuffer\n ) -> None:\n\n if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION:\n self._validate_starting_sequence(input_buffer)\n\n if self._size_validation_settings is not None:\n self._validate_payload_length(msg_type, payload_len)\n\n if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION:\n self._validate_control_flags(is_full_msg, header_len, payload_len, input_buffer)", "def _validateXML(self, to_be_validated, xsd_model):\n #We parse the XSD model\n xsd_model = StringIO.StringIO(xsd_model)\n xmlschema_doc = etree.parse(xsd_model)\n xmlschema = etree.XMLSchema(xmlschema_doc)\n\n string_to_validate = StringIO.StringIO(to_be_validated)\n\n try:\n document = etree.parse(string_to_validate)\n except (etree.XMLSyntaxError, etree.DocumentInvalid) as e: # pylint: disable=catching-non-exception\n LOG('SlapTool::_validateXML', INFO, \n 'Failed to parse this XML reports : %s\\n%s' % \\\n (to_be_validated, e))\n return False\n\n if xmlschema.validate(document):\n return True\n\n return False", "def validate(self, document) -> None:\n if not len(document.text) > 0:\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )", "def parse_monitor(self):\n return DEFAULT_MONITOR", "def validate_messages_for_send(message, app):\n if isinstance(message, (list, tuple, set)):\n result = True\n for m_one in message:\n result = result and m_one.valid_for_send(app)\n else:\n result = message.valid_for_send(app)\n return result", "def validate(self):\n ...", "def _validate_vhd(vdi_path):\n out, first_line = _vhd_util_check(vdi_path)\n\n if 'invalid' in first_line:\n LOG.warning(\"VHD invalid, attempting repair.\")\n repair_cmd = [\"vhd-util\", \"repair\", \"-n\", vdi_path]\n run_command(repair_cmd)\n out, first_line = _vhd_util_check(vdi_path)\n\n if 'invalid' in first_line:\n if 'footer' in first_line:\n part = 'footer'\n elif 'header' in first_line:\n part = 'header'\n else:\n part = 'setting'\n\n details = first_line.split(':', 1)\n if len(details) == 2:\n details = details[1]\n else:\n details = first_line\n\n extra = ''\n if 'timestamp' in first_line:\n extra = (\" ensure source and destination host machines have \"\n \"time set correctly\")\n\n LOG.info(\"VDI Error details: %s\" % out)\n\n raise Exception(\n \"VDI '%(vdi_path)s' has an invalid %(part)s: '%(details)s'\"\n \"%(extra)s\" % {'vdi_path': vdi_path, 'part': part,\n 'details': details, 'extra': extra})\n\n LOG.info(\"VDI is valid: %s\" % vdi_path)", "def validate(config):\n runner = ScenarioRunner._get_cls(config.get(\"type\", \"continuous\"))\n jsonschema.validate(config, runner.CONFIG_SCHEMA)", "def test_minlength_ignored_formats(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'code': 5}\n self.assertTrue(val.validate(document))\n document = {'code': '5'}\n self.assertFalse(val.validate(document))\n document = {'code': 5.2}\n self.assertTrue(val.validate(document))\n document = {'code': '5.2'}\n self.assertFalse(val.validate(document))", "def validate(self):\r\n return 1", "def validate(self):\r\n return 1", "def validate(self):\r\n return 1" ]
[ "0.6137711", "0.60649383", "0.60399437", "0.60151976", "0.5827821", "0.5748296", "0.5649671", "0.56458515", "0.550159", "0.54282165", "0.5421623", "0.54128164", "0.54105055", "0.5404401", "0.5385656", "0.5354246", "0.53523284", "0.5333825", "0.5331712", "0.53300637", "0.5320604", "0.53031343", "0.5286672", "0.52821654", "0.5263968", "0.5259492", "0.523754", "0.5236003", "0.5214703", "0.5212126", "0.5189302", "0.51882774", "0.51862824", "0.5184006", "0.517352", "0.5157693", "0.51549697", "0.5154055", "0.5153964", "0.51538044", "0.5134496", "0.5109668", "0.509871", "0.5091881", "0.50917715", "0.5087072", "0.50808847", "0.5074724", "0.50595486", "0.50561726", "0.50418746", "0.50405145", "0.50352293", "0.50351757", "0.50344133", "0.50333035", "0.5015423", "0.500988", "0.5006737", "0.5000829", "0.49991798", "0.49991798", "0.49991798", "0.49991798", "0.49991798", "0.49991798", "0.49991798", "0.49991798", "0.4997303", "0.4994686", "0.4991661", "0.49751192", "0.497268", "0.4971145", "0.49695498", "0.49546614", "0.49516442", "0.4946931", "0.4938821", "0.49340564", "0.49332684", "0.4927066", "0.49237213", "0.49182364", "0.491154", "0.491154", "0.4910965", "0.49097714", "0.49072516", "0.49033114", "0.48967814", "0.48957548", "0.48923352", "0.48799527", "0.4872411", "0.48718336", "0.48712337", "0.48660466", "0.48660466", "0.48660466" ]
0.73682123
0
Validate the input manifest, passed as either a file or a json string.
def validate_configuration_manifest(self, source, **kwargs): return self._validate_manifest("configuration_manifest", source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}", "def supports_manifest(manifest):\n pass", "def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def validate_json(self):\n pass", "def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest", "def validate(self, config_json):\n pass", "def validate(json_resp, schema, validictory_path, schema_base=None):\n # assumes /extern/validictory exists (see /cm for instructions)\n if not validictory_path in sys.path:\n sys.path.append(validictory_path)\n import validictory\n\n try:\n if schema_base and not json_resp[\"$schema\"].startswith(schema_base):\n print \"Warning: JSON schema is \", json_resp[\"$schema\"], \"instead of \", schema_base\n validictory.validate(json_resp, schema, required_by_default=False)\n return True\n except Exception as e:\n print \"Received exception %s while trying to validate: %s\" % (\n str(e), json_resp)\n return False", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def test_edit_manifest(self):\n \n manifest = copy.deepcopy(self.manifest)\n manifest['job']['interface']['command'] = ''\n \n json_data = {\n 'manifest': manifest,\n 'auto_update': False\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n \n # mismatch name\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['name'] = 'new-name'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)\n \n # mismatch version\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['jobVersion'] = '1.2.3'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def test_metadata_schema_json_invalid(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(files=files)\n assert not metadata_validation_form.is_valid()", "def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):\n manifest = []\n for json_line in codecs.open(manifest_path, 'r', 'utf-8'):\n try:\n json_data = json.loads(json_line)\n except Exception as e:\n raise IOError(\"Error reading manifest: %s\" % str(e))\n if (json_data[\"duration\"] <= max_duration and\n json_data[\"duration\"] >= min_duration):\n manifest.append(json_data)\n return manifest", "def test_metadata_schema_json_valid(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n with open(schema_file_path, 'r') as file_obj:\n json_schema = file_obj.read()\n assert len(json_schema) > 0\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema\": json_schema}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def load_manifest(path: Path):\n with open(path, \"rt\") as fin:\n data = json_load(fin)\n return Manifest.schema().load(data, many=True)", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def require_manifest(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # Assume the manifest is in the current directory\n try:\n # If we are in a repository, we want to look in\n # the root of that repository for the manifest\n current_repo = vcs_git.RepoTool(Path.cwd(), search_parent=True)\n root_path = current_repo.get_root_path()\n except vcs_git.InvalidRepository:\n # Since we are not in a repository we will look\n # for the manifest in the current directory\n root_path = Path.cwd()\n\n manifest_path = root_path / manifest.MANIFEST_NAME\n\n try:\n loaded_manifest = manifest.load_manifest(manifest_path)\n return func(loaded_manifest, root_path, *args, **kwargs)\n except manifest.NotFound:\n ui.error(f\"Unable to load manifest: Not found: {str(manifest_path)}\")\n sys.exit(1)\n except manifest.ValidationFailed as exc:\n ui.error(f\"Unable to load manifest: Validation failed\")\n ui.error(str(exc))\n sys.exit(1)\n\n return wrapper", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def is_valid_json(json_str):\n assert json_str is not None\n try:\n json.loads(json_str)\n return True\n except (ValueError, TypeError):\n return False", "def test_metadata_schema_json_valid_file_upload(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert metadata_validation_form.is_valid()\n assert len(metadata_validation_form.cleaned_data['mi_json_schema_file']) > 0", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def testGetManifest(self):\n manifest = self.dl_object._GetManifest()\n self.assertEqual(manifest.get('mediaType'),\n 'application/vnd.docker.distribution.manifest.v2+json')\n self.assertIn('layers', manifest)", "def test_is_valid_manifest_format_allowing_base64_encoded_md5(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\",\n allow_base64_encoded_md5=True,\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 not in error_log\n assert result == False", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validatePackage(filename, propFilename = None):\n\n if (propFilename == None):\n propFilename = filename + '.prop'\n\n if (not PackageUtil.validateProp(propFilename)):\n return False\n\n try:\n # check that the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Package (%s) does not exists' % (filename))\n return False\n\n # load in the prop file\n propFile = open(propFilename, 'r')\n prop = json.load(propFile)\n propFile.close()\n\n size = os.path.getsize(filename)\n if (size != int(prop['size'])):\n LOG.warning('package size = %s : %s' % (str(size), str(prop['size'])))\n return False\n\n md5Sum = md5sum(filename)\n propmd5 = prop['md5']\n if (md5Sum != propmd5):\n LOG.warning('package md5 = %s : %s' % (md5Sum, prop['md5']))\n return False\n\n # make sure the tar file has the expected structure\n # TPY to do after we fix the cronus-deploy\n\n except Exception, excep:\n LOG.error('validatePackage exception %s' % excep)\n return False\n\n return True", "def validate_package_metadata(filename, meta, expected_name, expected_version):\n if meta.get('name') != expected_name:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package name in %s: expected %s; got %s\"\n % (filename, expected_name, meta.get('name')))\n if meta.get('version') != expected_version:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package version in %s: expected %s; got %s\"\n % (filename, expected_version, meta.get('version')))\n if meta.get('dependencies') and not isinstance(meta['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"dependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('peerDependencies') and not isinstance(meta['peerDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"peerDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('devDependencies') and not isinstance(meta['devDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"devDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('rex'):\n if not isinstance(meta['rex'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex\\\" key should be a JSON object in %s\"\n % filename)\n if meta['rex'].get('dependencies') and not isinstance(meta['rex']['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex.dependencies\\\" key should be a JSON object in %s\"\n % filename)", "def Validate(self, relative_file, contents):\n pass", "def validateFileMetadata(samweb, md=None, mdfile=None):\n if md:\n data = json.dumps(md)\n elif mdfile:\n data = mdfile.read()\n else:\n raise ArgumentError('Must specify metadata dictionary or file object')\n return samweb.postURL('/files/validate_metadata', data=data, content_type='application/json').text", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def test_is_valid_manifest_format_with_invalid_urls(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_urls.tsv\"\n )\n error_log = caplog.text\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert '\"test/test.txt\"' in error_log\n assert '\"testaws/aws/test.txt\"' in error_log\n assert '\"://test_bucket/test.txt\"' in error_log\n assert '\"s3://\"' in error_log\n assert '\"gs://\"' in error_log\n assert '\"s3://bucket_without_object\"' in error_log\n assert '\"s3://bucket_without_object/\"' in error_log\n assert '\"test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:/test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:test_bucket/aws/test.txt\"' in error_log\n assert '\"://test_bucket/aws/test.txt\"' in error_log\n assert '\"s3test_bucket/aws/test.txt\"' in error_log\n assert '\"https://www.uchicago.edu\"' in error_log\n assert '\"https://www.uchicago.edu/about\"' in error_log\n assert '\"google.com/path\"' in error_log\n assert '\"\"\"\"' in error_log\n assert \"\\\"''\\\"\" in error_log\n assert '\"[]\"' in error_log\n assert \"\\\"['']\\\"\" in error_log\n assert '\"[\"\"]\"' in error_log\n assert '\"[\"\", \"\"]\"' in error_log\n assert '\"[\"\", \\'\\']\"' in error_log\n assert result == False", "def validate(\n declaration: str,\n version: Optional[str],\n):\n as3s = AS3Schema(version=version)\n _declaration = deserialize(declaration.name)\n try:\n as3s.validate(declaration=_declaration)\n LOG_STDOUT.info(\n \"Validation passed for AS3 Schema version: {}\",\n as3s.version,\n feature=\"f-strings\",\n )\n except AS3ValidationError as exc:\n LOG_STDERR.error(\n \"Validation failed for AS3 Schema version: {}\",\n as3s.version,\n feature=\"f-strings\",\n )\n if exc.context:\n for subexc in exc.context:\n LOG_STDERR.info(\n \"\\n{}\\n\",\n subexc,\n feature=\"f-strings\",\n )\n raise exc", "def test_metadata_schema_json_invalid_file_upload(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert not metadata_validation_form.is_valid()", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")", "def _read_manifest_json(self):\n with open(os.path.join(self._crx_dir, \"manifest.json\")) as manifest:\n return json.load(manifest)", "def is_json_valid(json_data: dict, json_schema: dict) -> bool:\r\n try:\r\n validate(instance=json_data, schema=json_schema)\r\n except jsonschema.exceptions.ValidationError as err:\r\n return False\r\n return True", "def test_sa_invalid_manifest_file_unknown_error(self, _mock_depfinder):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:\n filename = filename or self.manifest_filename\n manifest = manifest or {}\n self.log.debug(f\"Updating manifest '{manifest}' to file '{filename}'\")\n with open(filename, \"w\") as f:\n json.dump(manifest, f, indent=2)", "def validate_config(\n json_schema: JsonDict, config: Any, config_path: StrSequence\n) -> None:\n try:\n jsonschema.validate(config, json_schema)\n except jsonschema.ValidationError as e:\n raise json_error_to_config_error(e, config_path)", "def is_valid_file(args):\n if args.file is not None:\n return True\n return False", "def test_is_valid_manifest_format_using_allowed_protocols(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_custom_url_protocols.tsv\",\n allowed_protocols=[\"s3\", \"gs\", \"http\", \"https\"],\n )\n error_log = caplog.text\n assert \"gs://test/test.txt\" not in error_log\n assert \"s3://testaws/aws/test.txt\" not in error_log\n assert \"https://www.uchicago.edu/about\" not in error_log\n assert \"http://en.wikipedia.org/wiki/University_of_Chicago\" not in error_log\n\n assert '\"s3://bucket_without_path\"' in error_log\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert result == False", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_with_csv(caplog):\n assert is_valid_manifest_format(\"tests/test_manifest.csv\") == True\n assert caplog.text == \"\"", "def validate_json_schema(path, name, data, schema, full_schema=not is_extension):\n errors = 0\n\n # The standard repository has an example extension.\n if 'docs/examples/organizations/organizational_units/ocds_divisionCode_extension' in path:\n full_schema = False\n\n # Kingfisher Collect uses JSON Schema files to validate Scrapy items.\n code_repo = repo_name == 'kingfisher-collect'\n\n # Non-OCDS schema don't:\n # * pair \"enum\" and \"codelist\"\n # * disallow \"null\" in \"type\" of \"items\"\n # * UpperCamelCase definitions and lowerCamelCase properties\n # * allow \"null\" in the \"type\" of optional fields\n # * include \"id\" fields in objects within arrays\n # * require \"title\", \"description\" and \"type\" properties\n json_schema_exceptions = {\n 'json-schema-draft-4.json',\n 'meta-schema.json',\n 'meta-schema-patch.json',\n }\n ocds_schema_exceptions = {\n 'dereferenced-release-schema.json',\n # standard-maintenance-scripts\n 'codelist-schema.json',\n 'extension-schema.json',\n # extension_registry\n 'extensions-schema.json',\n 'extension_versions-schema.json',\n # spoonbill\n 'ocds-simplified-schema.json',\n }\n schema_exceptions = json_schema_exceptions | ocds_schema_exceptions\n\n validate_items_type_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n '/definitions/AmendmentUnversioned/properties/changes/items', # deprecated\n '/definitions/record/properties/releases/oneOf/0/items', # 1.1\n },\n }\n\n def validate_codelist_enum_allow_missing(codelist):\n return is_extension and codelist in external_codelists\n\n validate_codelist_enum_kwargs = {\n 'fallback': {\n '/definitions/Metric/properties/id': ['string'],\n '/definitions/Milestone/properties/code': ['string', 'null'],\n },\n 'allow_missing': validate_codelist_enum_allow_missing,\n }\n\n validate_letter_case_kwargs = {\n 'property_exceptions': {'former_value'}, # deprecated\n 'definition_exceptions': {'record'}, # 1.1\n }\n\n def validate_metadata_presence_allow_missing(pointer):\n return 'links' in pointer.split('/') or code_repo # ocds_pagination_extension\n\n validate_metadata_presence_kwargs = {\n 'allow_missing': validate_metadata_presence_allow_missing,\n }\n\n def validate_object_id_allow_missing(pointer):\n parts = pointer.split('/')\n return 'versionedRelease' in parts or parts[-1] in {\n 'changes', # deprecated\n 'records', # uses `ocid` not `id`\n '0', # linked releases\n }\n\n validate_object_id_kwargs = {\n 'allow_missing': validate_object_id_allow_missing,\n 'allow_optional': {\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Amendment',\n '/definitions/Organization',\n '/definitions/OrganizationReference',\n '/definitions/RelatedProcess',\n },\n }\n if repo_name == 'infrastructure':\n validate_object_id_kwargs['allow_optional'].add('/definitions/Classification')\n\n validate_null_type_kwargs = {\n # OCDS allows null. OC4IDS disallows null.\n 'no_null': repo_name == 'infrastructure' or code_repo,\n 'allow_object_null': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n # See https://github.com/open-contracting/standard/pull/738#issuecomment-440727233\n '/definitions/Organization/properties/details',\n },\n 'allow_no_null': {\n '/definitions/Amendment/properties/changes/items/properties/property', # deprecated\n\n # Children of fields with omitWhenMerged.\n '/definitions/Link/properties/rel',\n '/definitions/Link/properties/href',\n\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Organization/properties/id',\n '/definitions/OrganizationReference/properties/id',\n '/definitions/RelatedProcess/properties/id',\n },\n }\n\n validate_array_items_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n '/definitions/Location/properties/geometry/properties/coordinates/items', # recursion\n },\n }\n\n validate_deep_properties_kwargs = {\n 'allow_deep': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n },\n }\n if is_extension: # avoid repetition in extensions\n validate_deep_properties_kwargs['allow_deep'].add('/definitions/Item/properties/unit')\n\n validator = Draft4Validator(schema, format_checker=FormatChecker())\n\n errors += validate_schema(path, data, validator)\n if errors:\n warnings.warn(f'{path} is not valid JSON Schema ({errors} errors)')\n\n if name not in schema_exceptions:\n if 'versioned-release-validation-schema.json' in path:\n validate_items_type_kwargs['additional_valid_types'] = ['object']\n errors += validate_array_items(path, data, **validate_array_items_kwargs)\n errors += validate_items_type(path, data, **validate_items_type_kwargs)\n if not code_repo:\n errors += validate_codelist_enum(path, data, **validate_codelist_enum_kwargs)\n errors += validate_letter_case(path, data, **validate_letter_case_kwargs)\n errors += validate_merge_properties(path, data)\n\n # `full_schema` is set to not expect extensions to repeat information from core.\n if full_schema:\n exceptions_plus_versioned = schema_exceptions | {\n 'versioned-release-validation-schema.json',\n }\n\n exceptions_plus_versioned_and_packages = exceptions_plus_versioned | {\n 'project-package-schema.json',\n 'record-package-schema.json',\n 'release-package-schema.json',\n }\n\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_ref(path, data)\n\n if name not in exceptions_plus_versioned:\n # Extensions aren't expected to repeat `title`, `description`, `type`.\n errors += validate_metadata_presence(path, data, **validate_metadata_presence_kwargs)\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_object_id(path, jsonref.replace_refs(data), **validate_object_id_kwargs)\n\n if name not in exceptions_plus_versioned_and_packages:\n # Extensions aren't expected to repeat `required`. Packages don't have merge rules.\n errors += validate_null_type(path, data, **validate_null_type_kwargs)\n # Extensions aren't expected to repeat referenced codelist CSV files\n # TODO: This code assumes each schema uses all codelists. So, for now, skip package schema.\n errors += validate_schema_codelists_match(path, data, cwd, is_extension, is_profile, external_codelists)\n\n else:\n # Don't count these as errors.\n validate_deep_properties(path, data, **validate_deep_properties_kwargs)\n\n assert not errors, 'One or more JSON Schema files are invalid. See warnings below.'", "def check_input_file(value):\n if not os.path.exists(value):\n raise argparse.ArgumentTypeError(f'Input file `{value}` does not exist')\n\n return value", "def load(filename: Path) -> Optional[List[\"Downloadable\"]]:\n try:\n manifest = Manifest.load_manifest(filename)\n return Manifest.create_object_list(manifest)\n except FileNotFoundError as ex:\n logger.critical(f\"Error file not found: {ex.filename}\")\n except JSONDecodeError as ex:\n logger.critical(f\"format of manifest file is valid JSON: {ex.msg}\")\n\n return None", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_using_line_limit(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\",\n line_limit=3,\n )\n error_log = caplog.text\n assert \"line 2\" in error_log\n assert \"line 3\" in error_log\n assert \"line 4\" not in error_log\n assert \"line 5\" not in error_log\n assert result == False", "def parse(manifest_filename):\n manifest = {}\n with io.open(manifest_filename, 'rt', encoding='utf8') as f:\n lineno = 0\n for line in f:\n # Split line into fields\n lineno += 1\n line = line.rstrip('\\n')\n fields = line.split(' ')\n\n # Parse fields\n stored_path = fields[0]\n if len(fields) == 1 or (len(fields) == 2 and not fields[1]):\n # line like 'foo\\n' or 'foo \\n'\n local_path = None\n elif len(fields) == 2 and fields[1]:\n # line like 'foo bar\\n'\n local_path = fields[1]\n else:\n raise error.Error('Syntax error at line %d in [%s]: %s' %\n (lineno, manifest_filename, repr(line)))\n\n # Ensure no collisions\n if stored_path in manifest:\n raise error.Error(\n ('Configuration error at line %d in [%s]: file [%s] '\n 'specified more than once') %\n (lineno, manifest_filename, stored_path))\n manifest[stored_path] = local_path\n return manifest", "def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True", "def validate(name, args, required, typ):\n value = args.get(name)\n if required and value is None:\n raise errors.Error(\"{0} is required argument\".format(name))\n if value is not None and not isinstance(value, typ):\n raise errors.Error(\"{0} should be {1}\".format(name, typ))", "def validateYaml(f):\n\tif os.path.isfile(f) and f.endswith('.yaml'):\n\t\ttry:\n\t\t\tjsonschema.validate(yaml.load(open(f)), cfg['post_schema'])\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tprint (\"Error loading post %s: %s\" % (f,e))[0:240] + \"...\\n\"\n\treturn False", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def app_validate(data):\n\n schema = json.load(open('schemas/app_description_schema.json', 'r'))\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError as e:\n raise InvalidApplicationDescription(str(e))\n except jsonschema.SchemaError:\n log.exception('BUG: invalid schema for application descriptions')\n raise ZoeLibException('BUG: invalid schema for application descriptions')\n\n # Start non-schema, semantic checks\n if data['version'] != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription('Application description version mismatch (expected: {}, found: {}'.format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION, data['version']))\n\n found_monitor = False\n for service in data['services']:\n if service['monitor']:\n found_monitor = True\n\n service['resources']['memory']['max'] = zoe_lib.config.get_conf().max_memory_limit * (1024 ** 3)\n if service['resources']['memory']['min'] is not None and service['resources']['memory']['min'] > service['resources']['memory']['max']:\n raise InvalidApplicationDescription(msg='service {} tries to reserve more memory than the administrative limit'.format(service['name']))\n\n if service['resources']['cores']['min'] is None:\n service['resources']['cores']['min'] = 0.1\n\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have the monitor property set to true\")", "def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error", "def validate(self, json_data):\n try:\n self.process_json(json_data)\n except ValueError as e:\n # self.process_errors.append(e.args[0])\n self.process_errors = [e.args[0]]\n\n self.errors = list(self.process_errors)\n\n # Run validators\n if not self.errors:\n chain = itertools.chain(self.validators)\n self._run_validation_chain(chain)\n\n return len(self.errors) == 0", "def validate_file(inp, name=''):\n validate_string(inp, name)\n assert (os.path.exists(inp)), name + ' settings with value ' + inp + ' should exist.'", "def test_is_valid_manifest_format_using_column_names_to_enums(caplog):\n column_names_to_enums = {\n \"md5_with_underscores\": Columns.MD5,\n \"file size with spaces\": Columns.SIZE,\n \"Urls With Caps\": Columns.URL,\n \"authz with special chars!@*&\": Columns.AUTHZ,\n }\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_custom_column_names.tsv\",\n column_names_to_enums=column_names_to_enums,\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def is_manifest(location):\n return as_posixpath(location).lower().endswith('meta-inf/manifest.mf')", "def _check_is_file(_string: str) -> str:\n if os.path.isfile(_string):\n return _string\n else:\n raise argparse.ArgumentTypeError(\"{0} file does \"\n \"not exists.\".format(_string))", "def json_attribs_check(func):\n @wraps(func)\n def inner_func(jsonStr):\n gslvtsSchema = {\"type\":\"object\",\n \"properties\":{\n \"tagID\": {\"type\":\"number\"}, \n \"UTC\": {\"type\":\"string\",\n \"format\":\"date-time\"}\n\t\t\t},\n\t\t\t\"required\":[\"tagID\",\"UTC\"]\n }\n try:\n jsonGslvts=json.loads(jsonStr)\n for elem in jsonGslvts:\n try: \n validate(elem, gslvtsSchema, format_checker=FormatChecker())\n except ValidationError, e:\n print \"[-] Invalid json post data. Check it, brah.\"\n print e\n raise AttributeError \n except (AttributeError, ValueError):\n print \"[-] IDk what that was, but it wasn't JSON.\"\n raise AttributeError\n\n return(func(jsonStr)) \n return inner_func", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def is_valid_file_or_directory(args):\n if is_valid_file(args) or is_valid_directory(args):\n return True\n return False", "def validator(request, schema):\n try:\n body = request.body.decode('utf-8')\n dictbody = json.loads(body) if body else {}\n validate_against_schema(request, schema, dictbody)\n except ValueError as e:\n request.errors.add('body', 'body', six.text_type(e))", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def validate_json_schema(self, json_schema):\n cls = validators.validator_for(json_schema)\n cls.check_schema(json_schema)", "def validate_input(arg):\n if not type(arg) == list:\n raise ValidArgException('Input \"%s\" must be a list. Got %s' %(arg, type(arg)))\n \n if len(arg) != len(set(arg)):\n raise ValidArgException('\\n\\nDuplicate files found in input list %s\\n' %(arg))\n \n bnames= [os.path.split(x)[1] for x in arg]\n bnames= [re.sub('\\.gz$', '', x) for x in bnames]\n if len(bnames) == 2 and len(set(bnames)) == 1:\n raise ValidArgException('\\n\\nPaired fastq files must have different, unzipped names even if they are in different directories.\\nGot %s\\n' %(arg))\n \n for x in arg:\n if not os.path.isfile(x):\n raise ValidArgException('\\n\\nFile \"%s\" not found\\n' %(x))\n \n if len(arg) == 2:\n return('raw')\n elif len(arg) == 1:\n ext= os.path.splitext(arg[0])[1]\n if ext in ['.sam', '.bam']:\n return(ext.strip('.'))\n else:\n return('raw')\n else:\n raise ValidArgException('\\n\\n1 or 2 item must be in input \"%s\". Got %s\\n' %(arg, len(arg)))", "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "def validate_args(args):\n setup_logging(args.verbose)\n log.debug('Raw arguments:\\n{}'.format(args))\n\n # Check if pipeline file exists\n args.pipeline = Path(args.pipeline)\n\n if not args.pipeline.is_file():\n log.error('No such file {}'.format(args.pipeline))\n exit(1)\n\n args.pipeline = args.pipeline.resolve()\n\n return args", "def validate(config):\n runner = ScenarioRunner._get_cls(config.get(\"type\", \"continuous\"))\n jsonschema.validate(config, runner.CONFIG_SCHEMA)" ]
[ "0.75632393", "0.69152206", "0.6795271", "0.6775537", "0.6771405", "0.6737948", "0.6733013", "0.6731521", "0.6577707", "0.65763694", "0.63452816", "0.6327808", "0.62367785", "0.6152708", "0.6118632", "0.61025196", "0.60380036", "0.5982197", "0.5943263", "0.5929421", "0.587526", "0.5874747", "0.584609", "0.58105063", "0.5799155", "0.578816", "0.5782028", "0.5752429", "0.57448006", "0.5744447", "0.57205904", "0.5719164", "0.57138246", "0.5713497", "0.5705644", "0.57028884", "0.5690346", "0.5672376", "0.56463873", "0.5640912", "0.56368226", "0.56321555", "0.55724716", "0.557179", "0.55700445", "0.5565735", "0.55609536", "0.5547382", "0.5532762", "0.55265325", "0.5521955", "0.55138284", "0.55045503", "0.54960835", "0.5486397", "0.5475904", "0.545562", "0.5441734", "0.5426787", "0.54211134", "0.5420357", "0.54168266", "0.5408999", "0.5393168", "0.53789747", "0.53705245", "0.53704137", "0.5350039", "0.5336383", "0.5329001", "0.5325763", "0.5314367", "0.52980494", "0.52961946", "0.529572", "0.52916366", "0.5289648", "0.52866614", "0.5286068", "0.52748847", "0.5272324", "0.5267226", "0.5257841", "0.5248983", "0.52476853", "0.5228205", "0.5224968", "0.52247256", "0.5224321", "0.5221096", "0.5209716", "0.5206011", "0.5195764", "0.51940835", "0.51925576", "0.51916265", "0.51843715", "0.5175514", "0.5174648", "0.5170973" ]
0.5375063
65
Validate the input manifest, passed as either a file or a json string.
def validate_input_manifest(self, source, **kwargs): return self._validate_manifest("input_manifest", source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}", "def supports_manifest(manifest):\n pass", "def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def validate_json(self):\n pass", "def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest", "def validate(self, config_json):\n pass", "def validate(json_resp, schema, validictory_path, schema_base=None):\n # assumes /extern/validictory exists (see /cm for instructions)\n if not validictory_path in sys.path:\n sys.path.append(validictory_path)\n import validictory\n\n try:\n if schema_base and not json_resp[\"$schema\"].startswith(schema_base):\n print \"Warning: JSON schema is \", json_resp[\"$schema\"], \"instead of \", schema_base\n validictory.validate(json_resp, schema, required_by_default=False)\n return True\n except Exception as e:\n print \"Received exception %s while trying to validate: %s\" % (\n str(e), json_resp)\n return False", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def test_edit_manifest(self):\n \n manifest = copy.deepcopy(self.manifest)\n manifest['job']['interface']['command'] = ''\n \n json_data = {\n 'manifest': manifest,\n 'auto_update': False\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n \n # mismatch name\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['name'] = 'new-name'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)\n \n # mismatch version\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['jobVersion'] = '1.2.3'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def test_metadata_schema_json_invalid(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(files=files)\n assert not metadata_validation_form.is_valid()", "def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):\n manifest = []\n for json_line in codecs.open(manifest_path, 'r', 'utf-8'):\n try:\n json_data = json.loads(json_line)\n except Exception as e:\n raise IOError(\"Error reading manifest: %s\" % str(e))\n if (json_data[\"duration\"] <= max_duration and\n json_data[\"duration\"] >= min_duration):\n manifest.append(json_data)\n return manifest", "def test_metadata_schema_json_valid(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n with open(schema_file_path, 'r') as file_obj:\n json_schema = file_obj.read()\n assert len(json_schema) > 0\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema\": json_schema}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def load_manifest(path: Path):\n with open(path, \"rt\") as fin:\n data = json_load(fin)\n return Manifest.schema().load(data, many=True)", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def require_manifest(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # Assume the manifest is in the current directory\n try:\n # If we are in a repository, we want to look in\n # the root of that repository for the manifest\n current_repo = vcs_git.RepoTool(Path.cwd(), search_parent=True)\n root_path = current_repo.get_root_path()\n except vcs_git.InvalidRepository:\n # Since we are not in a repository we will look\n # for the manifest in the current directory\n root_path = Path.cwd()\n\n manifest_path = root_path / manifest.MANIFEST_NAME\n\n try:\n loaded_manifest = manifest.load_manifest(manifest_path)\n return func(loaded_manifest, root_path, *args, **kwargs)\n except manifest.NotFound:\n ui.error(f\"Unable to load manifest: Not found: {str(manifest_path)}\")\n sys.exit(1)\n except manifest.ValidationFailed as exc:\n ui.error(f\"Unable to load manifest: Validation failed\")\n ui.error(str(exc))\n sys.exit(1)\n\n return wrapper", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def is_valid_json(json_str):\n assert json_str is not None\n try:\n json.loads(json_str)\n return True\n except (ValueError, TypeError):\n return False", "def test_metadata_schema_json_valid_file_upload(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert metadata_validation_form.is_valid()\n assert len(metadata_validation_form.cleaned_data['mi_json_schema_file']) > 0", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def testGetManifest(self):\n manifest = self.dl_object._GetManifest()\n self.assertEqual(manifest.get('mediaType'),\n 'application/vnd.docker.distribution.manifest.v2+json')\n self.assertIn('layers', manifest)", "def test_is_valid_manifest_format_allowing_base64_encoded_md5(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\",\n allow_base64_encoded_md5=True,\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 not in error_log\n assert result == False", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validatePackage(filename, propFilename = None):\n\n if (propFilename == None):\n propFilename = filename + '.prop'\n\n if (not PackageUtil.validateProp(propFilename)):\n return False\n\n try:\n # check that the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Package (%s) does not exists' % (filename))\n return False\n\n # load in the prop file\n propFile = open(propFilename, 'r')\n prop = json.load(propFile)\n propFile.close()\n\n size = os.path.getsize(filename)\n if (size != int(prop['size'])):\n LOG.warning('package size = %s : %s' % (str(size), str(prop['size'])))\n return False\n\n md5Sum = md5sum(filename)\n propmd5 = prop['md5']\n if (md5Sum != propmd5):\n LOG.warning('package md5 = %s : %s' % (md5Sum, prop['md5']))\n return False\n\n # make sure the tar file has the expected structure\n # TPY to do after we fix the cronus-deploy\n\n except Exception, excep:\n LOG.error('validatePackage exception %s' % excep)\n return False\n\n return True", "def validate_package_metadata(filename, meta, expected_name, expected_version):\n if meta.get('name') != expected_name:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package name in %s: expected %s; got %s\"\n % (filename, expected_name, meta.get('name')))\n if meta.get('version') != expected_version:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package version in %s: expected %s; got %s\"\n % (filename, expected_version, meta.get('version')))\n if meta.get('dependencies') and not isinstance(meta['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"dependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('peerDependencies') and not isinstance(meta['peerDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"peerDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('devDependencies') and not isinstance(meta['devDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"devDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('rex'):\n if not isinstance(meta['rex'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex\\\" key should be a JSON object in %s\"\n % filename)\n if meta['rex'].get('dependencies') and not isinstance(meta['rex']['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex.dependencies\\\" key should be a JSON object in %s\"\n % filename)", "def Validate(self, relative_file, contents):\n pass", "def validateFileMetadata(samweb, md=None, mdfile=None):\n if md:\n data = json.dumps(md)\n elif mdfile:\n data = mdfile.read()\n else:\n raise ArgumentError('Must specify metadata dictionary or file object')\n return samweb.postURL('/files/validate_metadata', data=data, content_type='application/json').text", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def test_is_valid_manifest_format_with_invalid_urls(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_urls.tsv\"\n )\n error_log = caplog.text\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert '\"test/test.txt\"' in error_log\n assert '\"testaws/aws/test.txt\"' in error_log\n assert '\"://test_bucket/test.txt\"' in error_log\n assert '\"s3://\"' in error_log\n assert '\"gs://\"' in error_log\n assert '\"s3://bucket_without_object\"' in error_log\n assert '\"s3://bucket_without_object/\"' in error_log\n assert '\"test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:/test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:test_bucket/aws/test.txt\"' in error_log\n assert '\"://test_bucket/aws/test.txt\"' in error_log\n assert '\"s3test_bucket/aws/test.txt\"' in error_log\n assert '\"https://www.uchicago.edu\"' in error_log\n assert '\"https://www.uchicago.edu/about\"' in error_log\n assert '\"google.com/path\"' in error_log\n assert '\"\"\"\"' in error_log\n assert \"\\\"''\\\"\" in error_log\n assert '\"[]\"' in error_log\n assert \"\\\"['']\\\"\" in error_log\n assert '\"[\"\"]\"' in error_log\n assert '\"[\"\", \"\"]\"' in error_log\n assert '\"[\"\", \\'\\']\"' in error_log\n assert result == False", "def validate(\n declaration: str,\n version: Optional[str],\n):\n as3s = AS3Schema(version=version)\n _declaration = deserialize(declaration.name)\n try:\n as3s.validate(declaration=_declaration)\n LOG_STDOUT.info(\n \"Validation passed for AS3 Schema version: {}\",\n as3s.version,\n feature=\"f-strings\",\n )\n except AS3ValidationError as exc:\n LOG_STDERR.error(\n \"Validation failed for AS3 Schema version: {}\",\n as3s.version,\n feature=\"f-strings\",\n )\n if exc.context:\n for subexc in exc.context:\n LOG_STDERR.info(\n \"\\n{}\\n\",\n subexc,\n feature=\"f-strings\",\n )\n raise exc", "def test_metadata_schema_json_invalid_file_upload(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert not metadata_validation_form.is_valid()", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")", "def _read_manifest_json(self):\n with open(os.path.join(self._crx_dir, \"manifest.json\")) as manifest:\n return json.load(manifest)", "def is_json_valid(json_data: dict, json_schema: dict) -> bool:\r\n try:\r\n validate(instance=json_data, schema=json_schema)\r\n except jsonschema.exceptions.ValidationError as err:\r\n return False\r\n return True", "def test_sa_invalid_manifest_file_unknown_error(self, _mock_depfinder):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def validate_configuration_manifest(self, source, **kwargs):\n return self._validate_manifest(\"configuration_manifest\", source, **kwargs)", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:\n filename = filename or self.manifest_filename\n manifest = manifest or {}\n self.log.debug(f\"Updating manifest '{manifest}' to file '{filename}'\")\n with open(filename, \"w\") as f:\n json.dump(manifest, f, indent=2)", "def validate_config(\n json_schema: JsonDict, config: Any, config_path: StrSequence\n) -> None:\n try:\n jsonschema.validate(config, json_schema)\n except jsonschema.ValidationError as e:\n raise json_error_to_config_error(e, config_path)", "def is_valid_file(args):\n if args.file is not None:\n return True\n return False", "def test_is_valid_manifest_format_using_allowed_protocols(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_custom_url_protocols.tsv\",\n allowed_protocols=[\"s3\", \"gs\", \"http\", \"https\"],\n )\n error_log = caplog.text\n assert \"gs://test/test.txt\" not in error_log\n assert \"s3://testaws/aws/test.txt\" not in error_log\n assert \"https://www.uchicago.edu/about\" not in error_log\n assert \"http://en.wikipedia.org/wiki/University_of_Chicago\" not in error_log\n\n assert '\"s3://bucket_without_path\"' in error_log\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert result == False", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_with_csv(caplog):\n assert is_valid_manifest_format(\"tests/test_manifest.csv\") == True\n assert caplog.text == \"\"", "def validate_json_schema(path, name, data, schema, full_schema=not is_extension):\n errors = 0\n\n # The standard repository has an example extension.\n if 'docs/examples/organizations/organizational_units/ocds_divisionCode_extension' in path:\n full_schema = False\n\n # Kingfisher Collect uses JSON Schema files to validate Scrapy items.\n code_repo = repo_name == 'kingfisher-collect'\n\n # Non-OCDS schema don't:\n # * pair \"enum\" and \"codelist\"\n # * disallow \"null\" in \"type\" of \"items\"\n # * UpperCamelCase definitions and lowerCamelCase properties\n # * allow \"null\" in the \"type\" of optional fields\n # * include \"id\" fields in objects within arrays\n # * require \"title\", \"description\" and \"type\" properties\n json_schema_exceptions = {\n 'json-schema-draft-4.json',\n 'meta-schema.json',\n 'meta-schema-patch.json',\n }\n ocds_schema_exceptions = {\n 'dereferenced-release-schema.json',\n # standard-maintenance-scripts\n 'codelist-schema.json',\n 'extension-schema.json',\n # extension_registry\n 'extensions-schema.json',\n 'extension_versions-schema.json',\n # spoonbill\n 'ocds-simplified-schema.json',\n }\n schema_exceptions = json_schema_exceptions | ocds_schema_exceptions\n\n validate_items_type_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n '/definitions/AmendmentUnversioned/properties/changes/items', # deprecated\n '/definitions/record/properties/releases/oneOf/0/items', # 1.1\n },\n }\n\n def validate_codelist_enum_allow_missing(codelist):\n return is_extension and codelist in external_codelists\n\n validate_codelist_enum_kwargs = {\n 'fallback': {\n '/definitions/Metric/properties/id': ['string'],\n '/definitions/Milestone/properties/code': ['string', 'null'],\n },\n 'allow_missing': validate_codelist_enum_allow_missing,\n }\n\n validate_letter_case_kwargs = {\n 'property_exceptions': {'former_value'}, # deprecated\n 'definition_exceptions': {'record'}, # 1.1\n }\n\n def validate_metadata_presence_allow_missing(pointer):\n return 'links' in pointer.split('/') or code_repo # ocds_pagination_extension\n\n validate_metadata_presence_kwargs = {\n 'allow_missing': validate_metadata_presence_allow_missing,\n }\n\n def validate_object_id_allow_missing(pointer):\n parts = pointer.split('/')\n return 'versionedRelease' in parts or parts[-1] in {\n 'changes', # deprecated\n 'records', # uses `ocid` not `id`\n '0', # linked releases\n }\n\n validate_object_id_kwargs = {\n 'allow_missing': validate_object_id_allow_missing,\n 'allow_optional': {\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Amendment',\n '/definitions/Organization',\n '/definitions/OrganizationReference',\n '/definitions/RelatedProcess',\n },\n }\n if repo_name == 'infrastructure':\n validate_object_id_kwargs['allow_optional'].add('/definitions/Classification')\n\n validate_null_type_kwargs = {\n # OCDS allows null. OC4IDS disallows null.\n 'no_null': repo_name == 'infrastructure' or code_repo,\n 'allow_object_null': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n # See https://github.com/open-contracting/standard/pull/738#issuecomment-440727233\n '/definitions/Organization/properties/details',\n },\n 'allow_no_null': {\n '/definitions/Amendment/properties/changes/items/properties/property', # deprecated\n\n # Children of fields with omitWhenMerged.\n '/definitions/Link/properties/rel',\n '/definitions/Link/properties/href',\n\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Organization/properties/id',\n '/definitions/OrganizationReference/properties/id',\n '/definitions/RelatedProcess/properties/id',\n },\n }\n\n validate_array_items_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n '/definitions/Location/properties/geometry/properties/coordinates/items', # recursion\n },\n }\n\n validate_deep_properties_kwargs = {\n 'allow_deep': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n },\n }\n if is_extension: # avoid repetition in extensions\n validate_deep_properties_kwargs['allow_deep'].add('/definitions/Item/properties/unit')\n\n validator = Draft4Validator(schema, format_checker=FormatChecker())\n\n errors += validate_schema(path, data, validator)\n if errors:\n warnings.warn(f'{path} is not valid JSON Schema ({errors} errors)')\n\n if name not in schema_exceptions:\n if 'versioned-release-validation-schema.json' in path:\n validate_items_type_kwargs['additional_valid_types'] = ['object']\n errors += validate_array_items(path, data, **validate_array_items_kwargs)\n errors += validate_items_type(path, data, **validate_items_type_kwargs)\n if not code_repo:\n errors += validate_codelist_enum(path, data, **validate_codelist_enum_kwargs)\n errors += validate_letter_case(path, data, **validate_letter_case_kwargs)\n errors += validate_merge_properties(path, data)\n\n # `full_schema` is set to not expect extensions to repeat information from core.\n if full_schema:\n exceptions_plus_versioned = schema_exceptions | {\n 'versioned-release-validation-schema.json',\n }\n\n exceptions_plus_versioned_and_packages = exceptions_plus_versioned | {\n 'project-package-schema.json',\n 'record-package-schema.json',\n 'release-package-schema.json',\n }\n\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_ref(path, data)\n\n if name not in exceptions_plus_versioned:\n # Extensions aren't expected to repeat `title`, `description`, `type`.\n errors += validate_metadata_presence(path, data, **validate_metadata_presence_kwargs)\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_object_id(path, jsonref.replace_refs(data), **validate_object_id_kwargs)\n\n if name not in exceptions_plus_versioned_and_packages:\n # Extensions aren't expected to repeat `required`. Packages don't have merge rules.\n errors += validate_null_type(path, data, **validate_null_type_kwargs)\n # Extensions aren't expected to repeat referenced codelist CSV files\n # TODO: This code assumes each schema uses all codelists. So, for now, skip package schema.\n errors += validate_schema_codelists_match(path, data, cwd, is_extension, is_profile, external_codelists)\n\n else:\n # Don't count these as errors.\n validate_deep_properties(path, data, **validate_deep_properties_kwargs)\n\n assert not errors, 'One or more JSON Schema files are invalid. See warnings below.'", "def check_input_file(value):\n if not os.path.exists(value):\n raise argparse.ArgumentTypeError(f'Input file `{value}` does not exist')\n\n return value", "def load(filename: Path) -> Optional[List[\"Downloadable\"]]:\n try:\n manifest = Manifest.load_manifest(filename)\n return Manifest.create_object_list(manifest)\n except FileNotFoundError as ex:\n logger.critical(f\"Error file not found: {ex.filename}\")\n except JSONDecodeError as ex:\n logger.critical(f\"format of manifest file is valid JSON: {ex.msg}\")\n\n return None", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_using_line_limit(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\",\n line_limit=3,\n )\n error_log = caplog.text\n assert \"line 2\" in error_log\n assert \"line 3\" in error_log\n assert \"line 4\" not in error_log\n assert \"line 5\" not in error_log\n assert result == False", "def parse(manifest_filename):\n manifest = {}\n with io.open(manifest_filename, 'rt', encoding='utf8') as f:\n lineno = 0\n for line in f:\n # Split line into fields\n lineno += 1\n line = line.rstrip('\\n')\n fields = line.split(' ')\n\n # Parse fields\n stored_path = fields[0]\n if len(fields) == 1 or (len(fields) == 2 and not fields[1]):\n # line like 'foo\\n' or 'foo \\n'\n local_path = None\n elif len(fields) == 2 and fields[1]:\n # line like 'foo bar\\n'\n local_path = fields[1]\n else:\n raise error.Error('Syntax error at line %d in [%s]: %s' %\n (lineno, manifest_filename, repr(line)))\n\n # Ensure no collisions\n if stored_path in manifest:\n raise error.Error(\n ('Configuration error at line %d in [%s]: file [%s] '\n 'specified more than once') %\n (lineno, manifest_filename, stored_path))\n manifest[stored_path] = local_path\n return manifest", "def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True", "def validate(name, args, required, typ):\n value = args.get(name)\n if required and value is None:\n raise errors.Error(\"{0} is required argument\".format(name))\n if value is not None and not isinstance(value, typ):\n raise errors.Error(\"{0} should be {1}\".format(name, typ))", "def validateYaml(f):\n\tif os.path.isfile(f) and f.endswith('.yaml'):\n\t\ttry:\n\t\t\tjsonschema.validate(yaml.load(open(f)), cfg['post_schema'])\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tprint (\"Error loading post %s: %s\" % (f,e))[0:240] + \"...\\n\"\n\treturn False", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def app_validate(data):\n\n schema = json.load(open('schemas/app_description_schema.json', 'r'))\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError as e:\n raise InvalidApplicationDescription(str(e))\n except jsonschema.SchemaError:\n log.exception('BUG: invalid schema for application descriptions')\n raise ZoeLibException('BUG: invalid schema for application descriptions')\n\n # Start non-schema, semantic checks\n if data['version'] != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription('Application description version mismatch (expected: {}, found: {}'.format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION, data['version']))\n\n found_monitor = False\n for service in data['services']:\n if service['monitor']:\n found_monitor = True\n\n service['resources']['memory']['max'] = zoe_lib.config.get_conf().max_memory_limit * (1024 ** 3)\n if service['resources']['memory']['min'] is not None and service['resources']['memory']['min'] > service['resources']['memory']['max']:\n raise InvalidApplicationDescription(msg='service {} tries to reserve more memory than the administrative limit'.format(service['name']))\n\n if service['resources']['cores']['min'] is None:\n service['resources']['cores']['min'] = 0.1\n\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have the monitor property set to true\")", "def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error", "def validate(self, json_data):\n try:\n self.process_json(json_data)\n except ValueError as e:\n # self.process_errors.append(e.args[0])\n self.process_errors = [e.args[0]]\n\n self.errors = list(self.process_errors)\n\n # Run validators\n if not self.errors:\n chain = itertools.chain(self.validators)\n self._run_validation_chain(chain)\n\n return len(self.errors) == 0", "def validate_file(inp, name=''):\n validate_string(inp, name)\n assert (os.path.exists(inp)), name + ' settings with value ' + inp + ' should exist.'", "def test_is_valid_manifest_format_using_column_names_to_enums(caplog):\n column_names_to_enums = {\n \"md5_with_underscores\": Columns.MD5,\n \"file size with spaces\": Columns.SIZE,\n \"Urls With Caps\": Columns.URL,\n \"authz with special chars!@*&\": Columns.AUTHZ,\n }\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_custom_column_names.tsv\",\n column_names_to_enums=column_names_to_enums,\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def is_manifest(location):\n return as_posixpath(location).lower().endswith('meta-inf/manifest.mf')", "def _check_is_file(_string: str) -> str:\n if os.path.isfile(_string):\n return _string\n else:\n raise argparse.ArgumentTypeError(\"{0} file does \"\n \"not exists.\".format(_string))", "def json_attribs_check(func):\n @wraps(func)\n def inner_func(jsonStr):\n gslvtsSchema = {\"type\":\"object\",\n \"properties\":{\n \"tagID\": {\"type\":\"number\"}, \n \"UTC\": {\"type\":\"string\",\n \"format\":\"date-time\"}\n\t\t\t},\n\t\t\t\"required\":[\"tagID\",\"UTC\"]\n }\n try:\n jsonGslvts=json.loads(jsonStr)\n for elem in jsonGslvts:\n try: \n validate(elem, gslvtsSchema, format_checker=FormatChecker())\n except ValidationError, e:\n print \"[-] Invalid json post data. Check it, brah.\"\n print e\n raise AttributeError \n except (AttributeError, ValueError):\n print \"[-] IDk what that was, but it wasn't JSON.\"\n raise AttributeError\n\n return(func(jsonStr)) \n return inner_func", "def check_string():\n\n # Forcing check for valid json and headers with Content-Type:application/json\n content = request.get_json(silent=False, force=True)\n\n payload = content.get('data', None)\n \n if not payload:\n return response_handler(\n {\"error\": \"'data' key missing from JSON payload.\"},\n 400\n )\n if not isinstance(payload, basestring):\n return response_handler(\n {\"error\": \"Value of 'data' key is not of type 'string'.\"},\n 400\n )\n \n pangram = analyze_string(payload)\n if not pangram:\n return response_handler(\n {\"error\": False},\n 400\n )\n\n return response_handler(\n {\"success\": True},\n 200\n )", "def is_valid_file_or_directory(args):\n if is_valid_file(args) or is_valid_directory(args):\n return True\n return False", "def validator(request, schema):\n try:\n body = request.body.decode('utf-8')\n dictbody = json.loads(body) if body else {}\n validate_against_schema(request, schema, dictbody)\n except ValueError as e:\n request.errors.add('body', 'body', six.text_type(e))", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def validate_json_schema(self, json_schema):\n cls = validators.validator_for(json_schema)\n cls.check_schema(json_schema)", "def validate_input(arg):\n if not type(arg) == list:\n raise ValidArgException('Input \"%s\" must be a list. Got %s' %(arg, type(arg)))\n \n if len(arg) != len(set(arg)):\n raise ValidArgException('\\n\\nDuplicate files found in input list %s\\n' %(arg))\n \n bnames= [os.path.split(x)[1] for x in arg]\n bnames= [re.sub('\\.gz$', '', x) for x in bnames]\n if len(bnames) == 2 and len(set(bnames)) == 1:\n raise ValidArgException('\\n\\nPaired fastq files must have different, unzipped names even if they are in different directories.\\nGot %s\\n' %(arg))\n \n for x in arg:\n if not os.path.isfile(x):\n raise ValidArgException('\\n\\nFile \"%s\" not found\\n' %(x))\n \n if len(arg) == 2:\n return('raw')\n elif len(arg) == 1:\n ext= os.path.splitext(arg[0])[1]\n if ext in ['.sam', '.bam']:\n return(ext.strip('.'))\n else:\n return('raw')\n else:\n raise ValidArgException('\\n\\n1 or 2 item must be in input \"%s\". Got %s\\n' %(arg, len(arg)))", "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "def validate_args(args):\n setup_logging(args.verbose)\n log.debug('Raw arguments:\\n{}'.format(args))\n\n # Check if pipeline file exists\n args.pipeline = Path(args.pipeline)\n\n if not args.pipeline.is_file():\n log.error('No such file {}'.format(args.pipeline))\n exit(1)\n\n args.pipeline = args.pipeline.resolve()\n\n return args", "def validate(config):\n runner = ScenarioRunner._get_cls(config.get(\"type\", \"continuous\"))\n jsonschema.validate(config, runner.CONFIG_SCHEMA)" ]
[ "0.75632393", "0.69152206", "0.6795271", "0.6771405", "0.6737948", "0.6733013", "0.6731521", "0.6577707", "0.65763694", "0.63452816", "0.6327808", "0.62367785", "0.6152708", "0.6118632", "0.61025196", "0.60380036", "0.5982197", "0.5943263", "0.5929421", "0.587526", "0.5874747", "0.584609", "0.58105063", "0.5799155", "0.578816", "0.5782028", "0.5752429", "0.57448006", "0.5744447", "0.57205904", "0.5719164", "0.57138246", "0.5713497", "0.5705644", "0.57028884", "0.5690346", "0.5672376", "0.56463873", "0.5640912", "0.56368226", "0.56321555", "0.55724716", "0.557179", "0.55700445", "0.5565735", "0.55609536", "0.5547382", "0.5532762", "0.55265325", "0.5521955", "0.55138284", "0.55045503", "0.54960835", "0.5486397", "0.5475904", "0.545562", "0.5441734", "0.5426787", "0.54211134", "0.5420357", "0.54168266", "0.5408999", "0.5393168", "0.53789747", "0.5375063", "0.53705245", "0.53704137", "0.5350039", "0.5336383", "0.5329001", "0.5325763", "0.5314367", "0.52980494", "0.52961946", "0.529572", "0.52916366", "0.5289648", "0.52866614", "0.5286068", "0.52748847", "0.5272324", "0.5267226", "0.5257841", "0.5248983", "0.52476853", "0.5228205", "0.5224968", "0.52247256", "0.5224321", "0.5221096", "0.5209716", "0.5206011", "0.5195764", "0.51940835", "0.51925576", "0.51916265", "0.51843715", "0.5175514", "0.5174648", "0.5170973" ]
0.6775537
3
Validate the output manifest, passed as either a file or a json string.
def validate_output_manifest(self, source, **kwargs): return self._validate_manifest("output_manifest", source, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def check_output(out: Union[str, bytes], fmt: str) -> None:\n if fmt in [\"png\", \"pdf\"]:\n assert isinstance(out, bytes)\n elif fmt in [\"vega\", \"vega-lite\"]:\n assert isinstance(out, str)\n dct = json.loads(out)\n assert len(dct) > 0\n else:\n assert isinstance(out, str)\n assert len(out) > 0", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def valid_and_export(template, dashname):\n\n if not json_validation(template):\n print('Bad json format for ' + dashname + ' grafana dashboard')\n else:\n if export_file(template, dashname + '.json'):\n print('Successfully generated dashboard: ' + dashname)\n else:\n print('Error during export dashboard: ' + dashname)", "def schema_validate_kubernetes_output(validate_data, cache_dir):\n (kind, version), validate_files = validate_data\n KubernetesManifestValidator(cache_dir).validate(validate_files, kind=kind, version=version)", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)", "def validate(self, config_json):\n pass", "def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def validate_package_metadata(filename, meta, expected_name, expected_version):\n if meta.get('name') != expected_name:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package name in %s: expected %s; got %s\"\n % (filename, expected_name, meta.get('name')))\n if meta.get('version') != expected_version:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package version in %s: expected %s; got %s\"\n % (filename, expected_version, meta.get('version')))\n if meta.get('dependencies') and not isinstance(meta['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"dependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('peerDependencies') and not isinstance(meta['peerDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"peerDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('devDependencies') and not isinstance(meta['devDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"devDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('rex'):\n if not isinstance(meta['rex'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex\\\" key should be a JSON object in %s\"\n % filename)\n if meta['rex'].get('dependencies') and not isinstance(meta['rex']['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex.dependencies\\\" key should be a JSON object in %s\"\n % filename)", "def supports_manifest(manifest):\n pass", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def validate_yaml(\n data_json: str,\n root_dir: str,\n output_file: str = None\n) -> None:\n grep_tags, source_tags, ignored_tags, source_methods = (\n analyze.analyze_json(data_json, root_dir))\n\n (is_valid, output) = cli_yaml.validate_yaml_syntax(\n root_dir, grep_tags, source_tags)\n\n if is_valid:\n output.append('All files are valid.')\n else:\n output.append('Invalid file(s) found!')\n\n _write_output(output, output_file)", "def validate(json_resp, schema, validictory_path, schema_base=None):\n # assumes /extern/validictory exists (see /cm for instructions)\n if not validictory_path in sys.path:\n sys.path.append(validictory_path)\n import validictory\n\n try:\n if schema_base and not json_resp[\"$schema\"].startswith(schema_base):\n print \"Warning: JSON schema is \", json_resp[\"$schema\"], \"instead of \", schema_base\n validictory.validate(json_resp, schema, required_by_default=False)\n return True\n except Exception as e:\n print \"Received exception %s while trying to validate: %s\" % (\n str(e), json_resp)\n return False", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def test_metadata_schema_json_invalid(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(files=files)\n assert not metadata_validation_form.is_valid()", "def validatePackage(filename, propFilename = None):\n\n if (propFilename == None):\n propFilename = filename + '.prop'\n\n if (not PackageUtil.validateProp(propFilename)):\n return False\n\n try:\n # check that the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Package (%s) does not exists' % (filename))\n return False\n\n # load in the prop file\n propFile = open(propFilename, 'r')\n prop = json.load(propFile)\n propFile.close()\n\n size = os.path.getsize(filename)\n if (size != int(prop['size'])):\n LOG.warning('package size = %s : %s' % (str(size), str(prop['size'])))\n return False\n\n md5Sum = md5sum(filename)\n propmd5 = prop['md5']\n if (md5Sum != propmd5):\n LOG.warning('package md5 = %s : %s' % (md5Sum, prop['md5']))\n return False\n\n # make sure the tar file has the expected structure\n # TPY to do after we fix the cronus-deploy\n\n except Exception, excep:\n LOG.error('validatePackage exception %s' % excep)\n return False\n\n return True", "def test_edit_manifest(self):\n \n manifest = copy.deepcopy(self.manifest)\n manifest['job']['interface']['command'] = ''\n \n json_data = {\n 'manifest': manifest,\n 'auto_update': False\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n \n # mismatch name\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['name'] = 'new-name'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)\n \n # mismatch version\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['jobVersion'] = '1.2.3'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def validate_json(self):\n pass", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def Validate(self, relative_file, contents):\n pass", "def testGetManifest(self):\n manifest = self.dl_object._GetManifest()\n self.assertEqual(manifest.get('mediaType'),\n 'application/vnd.docker.distribution.manifest.v2+json')\n self.assertIn('layers', manifest)", "def test_metadata_schema_json_valid(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n with open(schema_file_path, 'r') as file_obj:\n json_schema = file_obj.read()\n assert len(json_schema) > 0\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema\": json_schema}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def test_make_tool_plugin_parse_invalid():\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n output = \"invalid text\"\n issues = mtp.parse_output(package, output)\n assert not issues", "def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest", "def validateFileMetadata(samweb, md=None, mdfile=None):\n if md:\n data = json.dumps(md)\n elif mdfile:\n data = mdfile.read()\n else:\n raise ArgumentError('Must specify metadata dictionary or file object')\n return samweb.postURL('/files/validate_metadata', data=data, content_type='application/json').text", "def _cli_validate(self, settings, remaining_argv):\n return None", "def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):\n manifest = []\n for json_line in codecs.open(manifest_path, 'r', 'utf-8'):\n try:\n json_data = json.loads(json_line)\n except Exception as e:\n raise IOError(\"Error reading manifest: %s\" % str(e))\n if (json_data[\"duration\"] <= max_duration and\n json_data[\"duration\"] >= min_duration):\n manifest.append(json_data)\n return manifest", "def test_is_valid_manifest_format_with_invalid_urls(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_urls.tsv\"\n )\n error_log = caplog.text\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert '\"test/test.txt\"' in error_log\n assert '\"testaws/aws/test.txt\"' in error_log\n assert '\"://test_bucket/test.txt\"' in error_log\n assert '\"s3://\"' in error_log\n assert '\"gs://\"' in error_log\n assert '\"s3://bucket_without_object\"' in error_log\n assert '\"s3://bucket_without_object/\"' in error_log\n assert '\"test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:/test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:test_bucket/aws/test.txt\"' in error_log\n assert '\"://test_bucket/aws/test.txt\"' in error_log\n assert '\"s3test_bucket/aws/test.txt\"' in error_log\n assert '\"https://www.uchicago.edu\"' in error_log\n assert '\"https://www.uchicago.edu/about\"' in error_log\n assert '\"google.com/path\"' in error_log\n assert '\"\"\"\"' in error_log\n assert \"\\\"''\\\"\" in error_log\n assert '\"[]\"' in error_log\n assert \"\\\"['']\\\"\" in error_log\n assert '\"[\"\"]\"' in error_log\n assert '\"[\"\", \"\"]\"' in error_log\n assert '\"[\"\", \\'\\']\"' in error_log\n assert result == False", "def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def test_is_valid_manifest_format_allowing_base64_encoded_md5(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\",\n allow_base64_encoded_md5=True,\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 not in error_log\n assert result == False", "def test_metadata_schema_json_valid_file_upload(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert metadata_validation_form.is_valid()\n assert len(metadata_validation_form.cleaned_data['mi_json_schema_file']) > 0", "def is_valid_file(args):\n if args.file is not None:\n return True\n return False", "def test_is_valid_manifest_format_with_csv(caplog):\n assert is_valid_manifest_format(\"tests/test_manifest.csv\") == True\n assert caplog.text == \"\"", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def validate(input, output, fields, delimiter, encoding, verbose, format_in, zipfile, rule, filter, mode):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['zipfile'] = zipfile\n options['filter'] = filter\n options['rule'] = rule\n options['mode'] = mode\n acmd = Validator()\n acmd.validate(input, options)\n pass", "def validate_with_ovftool(self, filename=None):\n if filename is None:\n filename = self.temp_file\n if (self.validate_output_with_ovftool and\n os.path.exists(filename) and\n helpers['ovftool']):\n try:\n helpers['ovftool'].call(['--schemaValidate', filename])\n except HelperError as exc:\n self.fail(\"OVF not valid according to ovftool:\\n{0}\"\n .format(exc.strerror))", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def test_metadata_schema_json_invalid_file_upload(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert not metadata_validation_form.is_valid()", "def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")", "def is_valid_file_or_directory(args):\n if is_valid_file(args) or is_valid_directory(args):\n return True\n return False", "def test_is_valid_manifest_format_using_line_limit(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\",\n line_limit=3,\n )\n error_log = caplog.text\n assert \"line 2\" in error_log\n assert \"line 3\" in error_log\n assert \"line 4\" not in error_log\n assert \"line 5\" not in error_log\n assert result == False", "def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")", "def validate():\n description = f\"Validate XML metadata.\"\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n help = \"XML file or URL\"\n parser.add_argument('infile', help=help)\n\n help = (\n \"Format ID for metadata standard. If this argument is supplied, \"\n \"only that format ID will be checked. If not, all format IDs will be \"\n \"checked.\"\n )\n parser.add_argument('--format-id',\n help=help,\n choices=d1_scimeta.util.get_supported_format_id_list())\n\n help = \"Verbosity of log messages.\"\n choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n parser.add_argument('-v', '--verbosity', help=help, choices=choices,\n default='INFO')\n\n args = parser.parse_args()\n\n validator = XMLValidator(verbosity=args.verbosity)\n validator.validate(args.infile, format_id=args.format_id)", "def validate_args(opt):\n if not PathManager.exists(opt['input']):\n raise IOError(\"Input File does not exist\")\n if opt['output'] is None:\n return None\n extension = opt['output'].split(\".\")[-1]\n if extension not in [\"html\", \"pdf\", \"png\"]:\n raise Exception(\n \"Extension not specified/supported. Specify one of '.html', '.pdf' or '.png' output files\"\n )\n opt['user_icon'] = check_icon_arg(opt['user_icon'], HUMAN_EMOJI_IMG)\n opt['alt_icon'] = check_icon_arg(opt['alt_icon'], ALT_EMOJI_IMG)\n return extension", "def require_manifest(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # Assume the manifest is in the current directory\n try:\n # If we are in a repository, we want to look in\n # the root of that repository for the manifest\n current_repo = vcs_git.RepoTool(Path.cwd(), search_parent=True)\n root_path = current_repo.get_root_path()\n except vcs_git.InvalidRepository:\n # Since we are not in a repository we will look\n # for the manifest in the current directory\n root_path = Path.cwd()\n\n manifest_path = root_path / manifest.MANIFEST_NAME\n\n try:\n loaded_manifest = manifest.load_manifest(manifest_path)\n return func(loaded_manifest, root_path, *args, **kwargs)\n except manifest.NotFound:\n ui.error(f\"Unable to load manifest: Not found: {str(manifest_path)}\")\n sys.exit(1)\n except manifest.ValidationFailed as exc:\n ui.error(f\"Unable to load manifest: Validation failed\")\n ui.error(str(exc))\n sys.exit(1)\n\n return wrapper", "def check_input_file(value):\n if not os.path.exists(value):\n raise argparse.ArgumentTypeError(f'Input file `{value}` does not exist')\n\n return value", "def validate(args):\n if args.format not in FORMATTERS:\n print(\n \"Subtitle format not supported. \"\n \"Run with --list-formats to see all supported formats.\"\n )\n return False\n\n if args.src_language not in LANGUAGE_CODES.keys():\n print(\n \"Source language not supported. \"\n \"Run with --list-languages to see all supported languages.\"\n )\n return False\n\n if args.dst_language not in LANGUAGE_CODES.keys():\n print(\n \"Destination language not supported. \"\n \"Run with --list-languages to see all supported languages.\"\n )\n return False\n\n if not args.source_path:\n print(\"Error: You need to specify a source path.\")\n return False\n\n return True", "def _is_valid(self, *args, **kwargs):\n fn = args[0]\n if not fn.endswith(\".h5\"):\n return False\n try:\n with h5py.File(fn, \"r\") as f:\n if \"arbor_type\" not in f.attrs:\n return False\n if f.attrs[\"arbor_type\"].astype(str) != \"ArborArbor\":\n return False\n except BaseException:\n return False\n return True", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def test_empty_media(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['inputs']['files'][0]['mediaTypes'] = []\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n del manifest['job']['interface']['inputs']['files'][0]['mediaTypes']\n config = copy.deepcopy(self.configuration)\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def validate_configuration_manifest(self, source, **kwargs):\n return self._validate_manifest(\"configuration_manifest\", source, **kwargs)", "def _is_valid(self, *args, **kwargs):\n fn = args[0]\n if not fn.endswith(self._suffix):\n return False\n try:\n with h5py.File(fn, \"r\") as f:\n if \"arbor_type\" not in f.attrs:\n return False\n if f.attrs[\"arbor_type\"].astype(str) != \"YTreeArbor\":\n return False\n except BaseException:\n return False\n return True", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def is_valid_file(ext, argument):\n formats = {\n 'input_dataset_path': ['csv', 'txt'],\n 'output_dataset_path': ['csv'],\n 'output_plot_path': ['png'],\n 'input_model_path': ['pkl']\n }\n return ext in formats[argument]", "def addValidateFilename(call, args=(), kwargs={}, nodeClass='Write'):", "def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")", "def test_output_invalid(self):\n assert (\n self.route.output_invalid(hug_core.output_format.json).route[\"output_invalid\"]\n == hug_core.output_format.json\n )", "def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_sa_invalid_manifest_file_unknown_error(self, _mock_depfinder):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def output_validator(klass, obj):\n\n members = [\n '_setup',\n 'render',\n ]\n interface.validate(IOutput, obj, members)", "def is_manifest(location):\n return as_posixpath(location).lower().endswith('meta-inf/manifest.mf')", "def test_invalid_output_workspace(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['output_workspaces'] = {\n 'default': 'bad_name'\n }\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertFalse(results['is_valid'])\n self.assertEqual(len(results['errors']), 1)\n self.assertEqual(results['errors'][0]['name'], 'INVALID_WORKSPACE')", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def get_manifest(self):\r\n if os.path.exists(self.manifestfile):\r\n return Manifest(json.loads(file(self.manifestfile).read()))\r\n return Manifest({})", "def test_main_typed_dict_not_required_nullable():\n with TemporaryDirectory() as output_dir:\n output_file: Path = Path(output_dir) / 'output.py'\n return_code: Exit = main(\n [\n '--input',\n str(JSON_SCHEMA_DATA_PATH / 'not_required_nullable.json'),\n '--output',\n str(output_file),\n '--output-model-type',\n 'typing.TypedDict',\n '--target-python-version',\n '3.11',\n ]\n )\n assert return_code == Exit.OK\n assert (\n output_file.read_text()\n == (\n EXPECTED_MAIN_PATH\n / 'main_typed_dict_not_required_nullable'\n / 'output.py'\n ).read_text()\n )", "def validateYaml(f):\n\tif os.path.isfile(f) and f.endswith('.yaml'):\n\t\ttry:\n\t\t\tjsonschema.validate(yaml.load(open(f)), cfg['post_schema'])\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tprint (\"Error loading post %s: %s\" % (f,e))[0:240] + \"...\\n\"\n\treturn False", "def is_valid_file_and_directory(args):\n if is_valid_file(args) and is_valid_directory(args):\n return True\n return False", "def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)", "def validate(\n declaration: str,\n version: Optional[str],\n):\n as3s = AS3Schema(version=version)\n _declaration = deserialize(declaration.name)\n try:\n as3s.validate(declaration=_declaration)\n LOG_STDOUT.info(\n \"Validation passed for AS3 Schema version: {}\",\n as3s.version,\n feature=\"f-strings\",\n )\n except AS3ValidationError as exc:\n LOG_STDERR.error(\n \"Validation failed for AS3 Schema version: {}\",\n as3s.version,\n feature=\"f-strings\",\n )\n if exc.context:\n for subexc in exc.context:\n LOG_STDERR.info(\n \"\\n{}\\n\",\n subexc,\n feature=\"f-strings\",\n )\n raise exc", "def test_complex_io_from_package(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": {\n \"url\": {\n \"type\": \"File\"\n }\n },\n \"outputs\": {\n \"files\": {\n \"type\": {\n \"type\": \"array\",\n \"items\": \"File\",\n }\n }\n }\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 1\n assert proc[\"inputs\"][0][\"id\"] == \"url\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert isinstance(proc[\"inputs\"][0][\"formats\"], list)\n assert len(proc[\"inputs\"][0][\"formats\"]) == 1\n assert isinstance(proc[\"inputs\"][0][\"formats\"][0], dict)\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"inputs\"][0][\"formats\"][0][\"default\"] is True\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 1\n assert proc[\"outputs\"][0][\"id\"] == \"files\"\n assert \"minOccurs\" not in proc[\"outputs\"][0]\n assert \"maxOccurs\" not in proc[\"outputs\"][0]\n assert isinstance(proc[\"outputs\"][0][\"formats\"], list)\n assert len(proc[\"outputs\"][0][\"formats\"]) == 1\n assert isinstance(proc[\"outputs\"][0][\"formats\"][0], dict)\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"outputs\"][0][\"formats\"][0][\"default\"] is True\n expect = KNOWN_PROCESS_DESCRIPTION_FIELDS\n fields = set(proc.keys()) - expect\n assert len(fields) == 0, \\\n \"Unexpected fields found:\\n Unknown: {}\\n Expected: {}\".format(list(fields), list(expect))" ]
[ "0.689081", "0.67470354", "0.6491389", "0.6478711", "0.64743024", "0.6428948", "0.642016", "0.6391841", "0.63199776", "0.6262872", "0.5994585", "0.59788805", "0.5946266", "0.5936269", "0.5893858", "0.58807135", "0.58602494", "0.5850808", "0.5838025", "0.5832804", "0.5797433", "0.5791419", "0.57425106", "0.5738217", "0.57367176", "0.5734439", "0.57109505", "0.5704293", "0.56795967", "0.566765", "0.56386167", "0.56368023", "0.56289476", "0.5621306", "0.5610092", "0.55944425", "0.55927694", "0.5568217", "0.55665267", "0.5542135", "0.5520769", "0.547837", "0.5452791", "0.5449079", "0.5442564", "0.5440821", "0.5439628", "0.5427391", "0.54162484", "0.53901565", "0.5389579", "0.538477", "0.53627354", "0.53525555", "0.5343147", "0.5322261", "0.5318654", "0.53122884", "0.5311953", "0.5311431", "0.53112197", "0.5300954", "0.5272938", "0.5271893", "0.5259533", "0.525537", "0.52550316", "0.52545494", "0.524318", "0.52389", "0.5235253", "0.52158463", "0.5214771", "0.5214201", "0.52119386", "0.52071494", "0.5199464", "0.51977974", "0.5197196", "0.5197117", "0.51964897", "0.5195389", "0.5185872", "0.51791036", "0.5154879", "0.51513743", "0.5148111", "0.5147076", "0.5146193", "0.51416975", "0.51375383", "0.5134312", "0.5134312", "0.5132777", "0.51296985", "0.5121746", "0.5109709", "0.51071006", "0.5099106", "0.50971895" ]
0.7151388
0
Getter that will return cls[name] if cls is a dict or cls otherwise
def _get_cls(name, cls): return cls.get(name, None) if isinstance(cls, dict) else cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(cls, name):\n cls.initialize()\n if isinstance(name, cls):\n return name\n else:\n return cls.mapping[name]", "def getInstacefromcls(cls, clsname, valuedict=None):\n for i in range(len(clslist)):\n if clsname == clslist[i]:\n return clslist[i](valuedict)\n return None", "def __get__(self, instance, cls=None):\n\n if cls is None:\n cls = type(instance)\n\n try:\n return vars(cls)[self.__cache_name__]\n except KeyError:\n result = super().__get__(instance, cls)\n setattr(cls, self.__cache_name__, result)\n return result", "def __getitem__(cls, name):\n return cls.__getattr__(name)", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def __getattr__(self, name: str) -> any:\n return self._dict[name]", "def lookup_by_class(dict_,class_):\n v = None\n for c in classlist(class_)[::-1]:\n if c in dict_:\n v = dict_[c]\n break\n return v", "def __getitem__(cls, key):\n return cls(cls._nameToValue[key])", "def withdraw(\n self, \n name: Union[str, Sequence[str]], \n kwargs: Optional[denovo.base.Dictionary] = None) -> (\n Union[Type[Any], object]):\n names = denovo.convert.listify(name)\n item = None\n for key in names:\n for catalog in ['instances', 'classes']:\n try:\n item = getattr(self, catalog)[key]\n break\n except KeyError:\n pass\n if item is not None:\n break\n if item is None:\n raise KeyError(f'No matching item for {name} was found')\n if kwargs is not None:\n if 'name' in item.__annotations__.keys() and 'name' not in kwargs:\n kwargs[name] = names[0]\n if inspect.isclass(item):\n item = item(**kwargs)\n else:\n for key, value in kwargs.items():\n setattr(item, key, value) \n return item # type: ignore", "def lookup(self, cls, name, mode):\n mro = [el.__name__ for el in cls.mro()]\n registry = self.method_registry if mode=='method' else self.type_registry\n\n for class_name in mro:\n entries = registry[class_name]\n if name in entries:\n return entries[name]\n raise KeyError(\"Could not find method named %r.\"\n \" Please ensure classes using component decorators\"\n \" are decorated with the Model.definition\"\n \" class decorator.\" % name)", "def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]", "def __getitem__(self, name):\n if name in self:\n try:\n return getattr(self, name)\n except AttributeError:\n pass\n\n raise KeyError(name)", "def _get(self, name):\n return object.__getattribute__(self, name)", "def _get(self, name):\n return object.__getattribute__(self, name)", "def class_hook(dct):\n if len(dct) == 1:\n class_name, value = next(iter(dct.items()))\n class_name = class_name.strip('_')\n if class_name == 'Dictionary':\n return Dictionary(*value)\n return dct", "def get_class(self, name):\n raise NotImplementedError", "def get_class(self, name):\n if name in self._objects_mapping:\n classname = self._objects_mapping[name]\n\n klass = None\n try:\n klass = getattr(self._sdk, classname)\n except:\n Printer.raise_error('Unknown class %s' % classname)\n\n return klass\n\n Printer.raise_error('Unknown object named %s' % name)", "def __getitem__(self, name):\n return getattr(self, name)", "def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)", "def from_string(cls, name):\n if hasattr(cls,name):\n return cls.__getattribute__(name)\n else:\n return None", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Metaclass, cls).__call__(*args, **kwargs)\n return cls._instance[cls]", "def by_name(name, cls=None):\n\n if cls is None:\n cls = base.Point\n\n if cls.__name__ == name:\n return cls\n\n for c in cls.__subclasses__():\n cc = by_name(name, c)\n if cc is not None:\n return cc\n\n return None", "def __getattr__ (self, name):\n\t\ttry:\n\t\t\treturn self.__dict__[name]\n\t\texcept KeyError:\n\t\t\treturn self.__dict__[\"value\"][name]", "def __getitem__(self, name):\r\n return self.get(name)", "def __getitem__(self, name):\n return self.get(name)", "def __getattr__(self, type_or_name):\n return self._retrieve_registered_value(type_or_name)", "def get_by_name(self, class_name, object_name, session):", "def __getitem__(self, name):\n return self.__getattr__(name)", "def __call__(self, cls_or_name: Union[str, Type]) -> Type[DTSchema]:\n if isinstance(cls_or_name, type):\n n = cls_or_name.__name__\n else:\n n = cls_or_name\n if hasattr(self, n):\n return getattr(self, n)\n raise ValueError(f\"Could not find type {cls_or_name}\")", "def __getattr__(self, name):\n return self.lookup(name)", "def __getitem__(self, name):\n ikEl = self.infoKinds.get(name, None)\n if ikEl:\n return ikEl.toDict(self)\n return None", "def __getitem__(self, item):\n if type(item) == str:\n return self.__dict__[item]\n else:\n return self.__dict__", "def __getattribute__(self, name: str) -> Optional[Any]:\n\n try:\n return type.__getattribute__(self, name)\n except AttributeError as error:\n try:\n return self.__dict__[\"members\"][name]\n except KeyError:\n raise error", "def get(self, name_or_klass):\n if not isinstance(name_or_klass, str):\n name_or_klass = name_or_klass.__name__\n return self._modes[name_or_klass]", "def __getitem__(cls, layer_name):\n return getattr(cls, layer_name)", "def get(\n cls,\n name_or_numeric, # type: Union[str, int, T]\n default=None, # type: Optional[Default]\n ):\n # type: (...) -> Union[Enum, Optional[Default]]\n if isinstance(name_or_numeric, cls):\n return name_or_numeric\n\n if isinstance(name_or_numeric, int):\n try:\n return cls(name_or_numeric)\n except ValueError:\n pass\n elif isinstance(name_or_numeric, str):\n try:\n return cls[name_or_numeric]\n except KeyError:\n pass\n\n return default", "def __get__(self, instance, owner):\n attr_name = self.get_attr_name(instance)\n return instance.__dict__.get('_%s_typed' % attr_name, self.default)", "def get(self, key, sort_key, **args):\n if klass is None:\n klass = self.klass\n record = self.storage_get(key, sort_key)\n if record is None:\n return klass(**args)\n return klass.from_json(record)", "def _get_optimised(self, cls: Type[RV]) -> Type[RV]:\n try:\n return self._optimised[cls]\n except KeyError:\n pass\n\n # Check if the class comes from psycopg.types and there is a class\n # with the same name in psycopg_c._psycopg.\n from psycopg import types\n\n if cls.__module__.startswith(types.__name__):\n new = cast(Type[RV], getattr(_psycopg, cls.__name__, None))\n if new:\n self._optimised[cls] = new\n return new\n\n self._optimised[cls] = cls\n return cls", "def get_class(name):\n try:\n cls, constructor = registry[name]\n except KeyError:\n raise UnregisteredClassError(\"'%s' is not a registered \"\n \"JSONAlizable class name\" % name, name)\n if constructor is not None:\n return constructor\n return cls", "def __getattr__(cls, name):\n name = name.lower()\n\n try:\n if cls.section is None:\n return _CONFIG_YAML[name]\n elif cls.subsection is None:\n return _CONFIG_YAML[cls.section][name]\n else:\n return _CONFIG_YAML[cls.section][cls.subsection][name]\n except KeyError as e:\n # If one of the handler lists isn't defined, return an empty list.\n log.warning(f\"{name} is not defined in the config.yaml file -- returning an falsy value.\")\n if cls._get_annotation(name) == list:\n return []\n elif cls._get_annotation(name) == dict:\n return {}\n else:\n return None", "def __getitem__(self, name):\n with pd.get_store(self.store_path) as store:\n obj = store[name]\n\n if isinstance(obj, pd.Series):\n obj = obj.to_dict()\n obj = guess_values_type(obj)\n\n return obj", "def get_module(self, cls_name, module_name='module'):\n if module_name not in self._module_dict:\n raise KeyError('{module_name} is not in registry')\n dd = self._module_dict[module_name]\n if cls_name not in dd:\n raise KeyError('{cls_name} is not registered in {module_name}')\n\n return dd[cls_name]", "def getContainerfromCls(cls, clsname):\n for i in range(len(clslist)):\n if clsname == clslist[i]:\n return clslist[i]().getcontainer()\n return None", "def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]", "def get_class(self, name):\n return self.host.get_class(name)", "def __getattr__(cls, name):\n try:\n return cls._member_map_[name.upper()]\n except KeyError:\n raise AttributeError(name)", "def __getattr__(cls, name):\n try:\n return cls._member_map_[name.upper()]\n except KeyError:\n raise AttributeError(name)", "def __getattr__(cls, name):\n try:\n return cls._member_map_[name.upper()]\n except KeyError:\n raise AttributeError(name)", "def __getattr__(cls, name):\n try:\n return cls._member_map_[name.upper()]\n except KeyError:\n raise AttributeError(name)", "def __getattr__(cls, name):\n try:\n return cls._member_map_[name.upper()]\n except KeyError:\n raise AttributeError(name)", "def __getattr__(cls, name):\n try:\n return cls._member_map_[name.upper()]\n except KeyError:\n raise AttributeError(name)", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def objects(self, cls):\n for name, info in direct_fields(self.__class__).items():\n if issubclass(cls, info.sub_fields[0].type_):\n return getattr(self, name)\n raise TypeError(cls)", "def get_instance(self, name):\n klass = self.get_class(name)\n return klass()", "def __getattr__(self, name, default=None):\n if name in [\"config\", \"configuration\", \"conf\"]:\n return self._configuration\n\n if name == \"data\":\n return self._data\n\n if name == \"http\":\n return self._http\n\n return default", "def __getitem__(self, key):\r\n\r\n return getattr(self, key)", "def get(**args):\n\tglobal _objstore\n\t_objstore = _objstore or ObjStore()\n\n\treturn _objstore.get(args['type'], args['name'])", "def __class_getitem__(cls, key):\n return Registry.for_appname(key)", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def __getitem__(self, name):\n return self.f_get(name)", "def find_by_name(self, name):\n return self.get(name)", "def get_class(self, class_name):\n try:\n return self._classes[class_name]\n except KeyError:\n raise NameError", "def safe_get(cls: Type[T]) -> T:\n global _metrics_singularities\n\n if cls not in _metrics_singularities:\n _metrics_singularities[cls] = cls()\n\n return _metrics_singularities[cls]", "def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])", "def __getattr__(self, item):\n return self.__dict__[item] if item in self.__dict__ else self.data.get(item)", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def __getattr__(self, name):\r\n\t\treturn self.properties[name]", "def __getattr__(self, name):\n c = self.context\n # Direct type or subclass of type\n if type(c).__name__.lower() == name or name in [x.__name__.lower() for x in type(c).__bases__]:\n return c\n\n # If the attached object is the wrong type then see if *it* has a property of that name\n return getattr(c, name, None)", "def field_by_name(cls, name):\n return cls.__by_name[name]", "def __getitem__(self, key):\n return getattr(self, key)", "def __getitem__(self, key):\n return getattr(self, key)", "def get_by(cls, name, value):\n return cls.query(getattr(cls, name) == value).get()", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def __new__(metacls, name, bases, classdict):\n # classdict is not always a dict wtf\n if not isinstance(classdict, dict):\n classdict = dict(classdict)\n\n for (key, value) in iteritems(classdict):\n if isinstance(value, TraitType):\n value.name = key\n elif inspect.isclass(value):\n if issubclass(value, TraitType):\n value_inst = value()\n value_inst.name = key\n classdict[key] = value_inst\n\n return super(MetaModel, metacls).__new__(metacls, name, bases, classdict)", "def get_deco_class_settings_dict(cls, clsname) -> OrderedDict:\n return cls._classname2SettingsData_dict[clsname]", "def __getattribute__(self, name):\n\n camel_name = AgaveUtils.to_camel_case(name)\n try:\n _wrapped = object.__getattribute__(self, '_wrapped')\n except AttributeError:\n _wrapped = {}\n\n if camel_name not in _wrapped:\n return object.__getattribute__(self, name)\n\n val = _wrapped.get(camel_name)\n if isinstance(val, dict):\n try:\n internal = object.__getattribute__(\n self,\n '__{name}'.format(name=name),\n )\n return internal\n except AttributeError:\n pass\n\n if 'self' in val:\n _self = val.pop('self')\n val['_self'] = copy.deepcopy(_self)\n internal = BaseAgaveResource(client=self._ac, **val)\n object.__setattr__(\n self,\n '__{name}'.format(name=name),\n internal\n )\n return internal\n\n return val", "def find(self, cls):\r\n for currentClass in self._classesToCheck(cls):\r\n if currentClass in self.config:\r\n return self.config[currentClass]\r\n else:\r\n return None", "def __getattr__(self, name):\n value = self.__dict__.get(name)\n if not value:\n raise AttributeError('No such attribute {0}'.format(name))\n return value", "def get(self, key: Hashable) -> Any: # type: ignore\n try:\n return[key]\n except (KeyError, TypeError):\n if self.default_factory is None:\n raise KeyError(f'{key} is not in {self.__class__}')\n else:\n try:\n return self.default_factory()\n except TypeError:\n return self.default_factory", "def __getattr__(self, name):\n if name.startswith('_'):\n return defaultdict.__getattribute__(self, name)\n return self[name]", "def get(self, cls, name, data_source, **attr):\n #ds = self._default_DataSource if data_source is None else data_source\n if data_source is None:\n cache = self._cache.setdefault(cls, {})\n else:\n cache = self._cache.setdefault(data_source._id, {}).setdefault(cls, {})\n\n try:\n return cache[name]\n except KeyError:\n if cls in ['Neuron', 'NeuronFragment', 'NeuronAndFragment', 'Synapse', 'InferredSynapse']:\n q = self._find(cls, data_source, uname = name)\n else:\n q = self._find(cls, data_source, name = name)\n if len(q) == 1:\n obj = q.node_objs[0]\n if data_source is None:\n tmp = q.owned_by(cls = 'DataSource', cols = '@rid')\n if len(tmp) == 1:\n ds_rid = list(tmp.nodes)[0].oRecordData['rid'].get_hash()\n self.set(cls, name, obj, ds_rid)\n elif len(tmp) > 1:\n raise ValueError('unexpected more than 1 DataSource found')\n else:\n self.set(cls, name, obj, None)\n else:\n self.set(cls, name, obj, None)\n elif len(q) > 1:\n raise DuplicateNodeError('Hit more than one instance of {} with name {} in database.'.format(cls, name))\n else:\n raise RecordNotFoundError('{} {} not found in database.'.format(cls, name))\n return obj", "def __getitem__(self, name):\n value = self.get(name)\n if value is not None:\n return value\n raise KeyError(name)", "def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n instance = super().__call__(*args, **kwargs)\n cls._instances[cls] = instance\n return cls._instances[cls]", "def from_dict(cls, d):\n attr_cls = etau.get_class(d[\"type\"])\n return attr_cls._from_dict(d)", "def get_obj(self, name):\n val = self.get(name)\n if not val:\n return None\n if name.find('queue') >= 0:\n obj = boto.lookup('sqs', val)\n if obj:\n obj.set_message_class(ServiceMessage)\n elif name.find('bucket') >= 0:\n obj = boto.lookup('s3', val)\n elif name.find('domain') >= 0:\n obj = boto.lookup('sdb', val)\n else:\n obj = None\n return obj", "def get_obj(self, name):\r\n val = self.get(name)\r\n if not val:\r\n return None\r\n if name.find('queue') >= 0:\r\n obj = boto.lookup('sqs', val)\r\n if obj:\r\n obj.set_message_class(ServiceMessage)\r\n elif name.find('bucket') >= 0:\r\n obj = boto.lookup('s3', val)\r\n elif name.find('domain') >= 0:\r\n obj = boto.lookup('sdb', val)\r\n else:\r\n obj = None\r\n return obj", "def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)", "def get_type_by_name(self, name):\n raise NotImplementedError()", "def __getitem__(self, key):\n return self.__getattr__(key)", "def get_class(mod, class_name: str):\n for name_val in inspect.getmembers(mod, inspect.isclass):\n name = name_val[0]\n val = name_val[1]\n if name == class_name:\n return val\n return None", "def lookup(self, name):\n return self.fieldDict[name]", "def get_class(self, key: Union[int, str]) -> Union[GDScriptClass, None]:\n if (isinstance(key, str)):\n if (key.startswith(\"res://\")):\n return self._classes_by_resource_path[key]\n else:\n return self._classes_by_name[key]\n elif isinstance(key, int):\n return self._classes_by_type_id[key]\n\n raise Exception(\"Key must be str or int\")", "def __getitem__(self, name):\n if name in self.data: return self.data[name]", "def by_name(cls, name):\n if name in cls._registry:\n result = cls._registry[name]\n else:\n result = cls._registry[name] = cls(bind=Session._datastores.get(name))\n return result", "def get_from_dict(cls, data):\n s = cls()\n for x in data:\n if x in s.__dict__:\n s.__dict__[x] = data[x]\n return s", "def _get(self, name):\n raise NotImplementedError", "def get(cls,name,as_type = str):\n inst = cls.inst()\n if name in inst.options:\n return as_type(inst.options[name])\n else:\n raise OptionsError(\"No option with key '%s'\" % name)", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def factory_get(self, name):\n try:\n return registry[name]\n except KeyError:\n import traceback\n traceback.print_exc()\n Log.error(\"Cannot find %s in {%s}\" % (name, ', '.join(registry.keys())))" ]
[ "0.7089814", "0.68150103", "0.6775328", "0.6689568", "0.65000015", "0.6219528", "0.6086408", "0.60403633", "0.6031711", "0.6026237", "0.5955669", "0.5954231", "0.5952677", "0.5952677", "0.59358865", "0.5935747", "0.59133816", "0.5905671", "0.58998924", "0.5898058", "0.5878466", "0.5868883", "0.5852091", "0.58493954", "0.584092", "0.584062", "0.5837674", "0.58309555", "0.58293474", "0.5822067", "0.57969075", "0.5777052", "0.5743826", "0.5741477", "0.5724956", "0.5693387", "0.5674271", "0.5621341", "0.5620656", "0.5584028", "0.55814314", "0.5570313", "0.5563164", "0.5560803", "0.55575114", "0.5544266", "0.5536068", "0.5536068", "0.5536068", "0.5536068", "0.5536068", "0.5536068", "0.5533961", "0.553293", "0.55111563", "0.55019724", "0.54973674", "0.5491948", "0.549169", "0.5490506", "0.54880714", "0.54824996", "0.54748875", "0.54720354", "0.5465645", "0.5449655", "0.54427105", "0.54427105", "0.54392576", "0.54389465", "0.54381514", "0.54381514", "0.5426449", "0.54247373", "0.54191935", "0.54058903", "0.5399134", "0.53927153", "0.53878206", "0.53804284", "0.53768736", "0.53648365", "0.53632736", "0.53626245", "0.5350491", "0.53431964", "0.53388286", "0.5337055", "0.5332989", "0.53298426", "0.53258175", "0.53234524", "0.5323105", "0.53218967", "0.5312239", "0.5306872", "0.53033864", "0.5296291", "0.5289861", "0.52840126" ]
0.8500315
0
Validate strands from sources provided as keyword arguments
def validate(self, allow_missing=False, allow_extra=False, cls=None, **kwargs): # pop any strand name:data pairs out of kwargs and into their own dict source_kwargs = tuple(name for name in kwargs.keys() if name in ALL_STRANDS) sources = dict((name, kwargs.pop(name)) for name in source_kwargs) for strand_name, strand_data in sources.items(): if not allow_extra: if (strand_data is not None) and (strand_name not in self.available_strands): raise exceptions.StrandNotFound( f"Source data is provided for '{strand_name}' but no such strand is defined in the twine" ) if not allow_missing: if (strand_name in self.available_strands) and (strand_data is None): raise exceptions.TwineValueException( f"The '{strand_name}' strand is defined in the twine, but no data is provided in sources" ) if strand_data is not None: # TODO Consider reintroducing a skip based on whether cls is already instantiated. For now, leave it the # responsibility of the caller to determine what has already been validated and what hasn't. # # Use the twine to validate and instantiate as the desired class # if not isinstance(value, type(cls)): # self.logger.debug( # "Instantiating %s as %s and validating against twine", name, cls.__name__ if cls else "default_class" # ) # return self.twine.validate(name, source=value, cls=cls) method = getattr(self, f"validate_{strand_name}") klass = self._get_cls(strand_name, cls) sources[strand_name] = method(strand_data, cls=klass, **kwargs) else: sources[strand_name] = None return sources
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_strand(self, name, source, **kwargs):\n return self.validate({name: source}, **kwargs)[name]", "def validate(cls, **kwargs: Any) -> None: # pragma no cover", "def validate_source(cls, source_data: Dict[str, dict], verbose: bool = True):\n cls._validate_source_data(source_data=source_data, verbose=verbose)", "def __checkArgs(self, kwargs):\n requiredArgs = self.__class__.__requiredArgs + \\\n self.__class__.__singleCompArgs if self.singleComp else\\\n self.__class__.__requiredArgs + self.__class__.__doubleCompArgs\n for arg in requiredArgs:\n if arg not in kwargs:\n raise ValueError(\"Essential keyword argument %s missing\" % arg)\n for (k, v) in kwargs.items():\n assert k in self.__class__.__allowedArgs, \"Invalid Argument %s\" % k", "def test_empty_arguments(self):\n arg1 = {'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg1)\n\n arg2 = {'src': '.',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg2)\n\n arg3 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg3)\n\n arg4 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg4)", "def validate_input(self, *args):\n return", "def validate_args(args):\n setup_logging(args.verbose)\n log.debug('Raw arguments:\\n{}'.format(args))\n\n # Check if pipeline file exists\n args.pipeline = Path(args.pipeline)\n\n if not args.pipeline.is_file():\n log.error('No such file {}'.format(args.pipeline))\n exit(1)\n\n args.pipeline = args.pipeline.resolve()\n\n return args", "def _validate_kwargs(self, kwargs):\n pass", "def _validate_sources(generated_sources, original_sources):\n\n generated_sources = list(set(generated_sources))\n original_sources = list(set(original_sources))\n not_existent_source = []\n for source in original_sources:\n if source not in generated_sources:\n not_existent_source.append(source)\n\n if not_existent_source:\n print('WARN: Some sources did exist in generated file')\n print(not_existent_source)\n return False\n\n return True", "def validate_inputs(self, extra_inputs=[]):\n pass", "def validate(args):\n if args.format not in FORMATTERS:\n print(\n \"Subtitle format not supported. \"\n \"Run with --list-formats to see all supported formats.\"\n )\n return False\n\n if args.src_language not in LANGUAGE_CODES.keys():\n print(\n \"Source language not supported. \"\n \"Run with --list-languages to see all supported languages.\"\n )\n return False\n\n if args.dst_language not in LANGUAGE_CODES.keys():\n print(\n \"Destination language not supported. \"\n \"Run with --list-languages to see all supported languages.\"\n )\n return False\n\n if not args.source_path:\n print(\"Error: You need to specify a source path.\")\n return False\n\n return True", "def validate():", "def valid(self, *args, **kwargs) -> Any:\n pass", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def validate(args, limit_to=None):\n missing_key_list = []\n no_value_list = []\n validation_error_list = []\n\n required_keys = [\n 'workspace_dir',\n 'dem_uri',\n 'outlet_shapefile_uri',\n 'flow_threshold',\n 'snap_distance']\n\n for key in required_keys:\n if limit_to is None or limit_to == key:\n if key not in args:\n missing_key_list.append(key)\n elif args[key] in ['', None]:\n no_value_list.append(key)\n\n if len(missing_key_list) > 0:\n # if there are missing keys, we have raise KeyError to stop hard\n raise KeyError(\n \"The following keys were expected in `args` but were missing \" +\n ', '.join(missing_key_list))\n\n if len(no_value_list) > 0:\n validation_error_list.append(\n (no_value_list, 'parameter has no value'))\n\n file_type_list = [\n ('dem_uri', 'raster'),\n ('outlet_shapefile_uri', 'vector')]\n\n # check that existing/optional files are the correct types\n with utils.capture_gdal_logging():\n for key, key_type in file_type_list:\n if (limit_to in [None, key]) and key in required_keys:\n if not os.path.exists(args[key]):\n validation_error_list.append(\n ([key], 'not found on disk'))\n continue\n if key_type == 'raster':\n raster = gdal.OpenEx(args[key])\n if raster is None:\n validation_error_list.append(\n ([key], 'not a raster'))\n del raster\n elif key_type == 'vector':\n vector = gdal.OpenEx(args[key])\n if vector is None:\n validation_error_list.append(\n ([key], 'not a vector'))\n del vector\n\n return validation_error_list", "def validate(self):\n\n for required in [SourceIdentifierParameter, DestinationPath, Project]:\n if not self.get_param_by_type(required):\n raise JobSetValidationError(\n f\"Missing required parameter {required}\"\n )\n\n if not self.has_path_source():\n # If this set has no path source, root source does not matter\n _, params = self.split_parameter(type_in=RootSourcePath)\n else:\n params = self.parameters\n try:\n self.make_unc_paths(params)\n except ParameterMappingError as e:\n raise JobSetValidationError(\n \"Source and destination need to be absolute windows paths\"\n ) from e", "def validate(self, *, destination: FieldModel, source: list) -> bool:\n if not isinstance(source, list):\n raise ValueError(f\"Action source script does not conform to required structure. ({source})\")\n for chunk in self.core.chunks(lst=source, n=len(self.structure)):\n # Loops through the phrasing of the structure, and checks that each term is as expected\n # e.g. [ModifierModel, FieldModel] for [modifier1, field1, modifier2, field2]\n # does not check that the actual terms match, though\n for i, term in enumerate(self.structure):\n if isinstance(chunk[i], term):\n continue\n elif isinstance(chunk[i], dict):\n # Nested source\n if not chunk[i].get(\"action\") and not not chunk[i].get(\"source\"):\n raise ValueError(f\"Nested script does not conform to required structure. ({chunk[i]})\")\n chunk[i][\"action\"].validate(source=chunk[i][\"source\"])\n else:\n raise ValueError(\n f\"Source structure ({chunk}) doesn't conform to ACTION structure requirements ({self.structure}).\"\n )\n return True", "def _check_kwargs(cls, kwargs: Mapping[str, Any]):\n for name, prop in cls._props_by_name.items():\n if prop.required and name not in kwargs:\n raise ValueError(f'Required property {name} is missing.')\n for name, value in kwargs.items():\n if name not in cls._props_by_name:\n raise KeyError(f'Unknown property {name}.')\n prop = cls._props_by_name[name]\n prop.validate(value)", "def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")", "def __validate():\n # TODO: implement", "def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)", "def _validateInputs(self):\n if self.args[\"Counties\"] == [] and self.args[\"BBox\"] == None:\n raise Exception(\"Invalid arguments provided. Must provide either a geographical bounding box or a list of counties.\")\n\n if self.args[\"StartDateTime\"] > self.args[\"EndDateTime\"]:\n raise Exception(\"Invalid arguments provided. StartDateTime cannot be after EndDateTime\")", "def validate(self):\n super(ReferenceMapping, self).validate()\n self.check_observatory()\n self.check_instrument()\n self.check_filekind()\n self.check_schema_uri()\n if \"reference_to_dataset\" in self.header:\n parkeys = self.get_required_parkeys()\n for _reference, dataset in self.reference_to_dataset.items():\n assert dataset.upper() in parkeys, \\\n \"reference_to_dataset dataset keyword not in parkey keywords.\"\n with log.augment_exception(\"Invalid mapping:\", self.instrument, self.filekind):\n self.selector.validate_selector(self.tpn_valid_values)", "def _kwargs_check(feature_extraction, kwargs):\n # When using policy_kwargs parameter on model creation,\n # all keywords arguments must be consumed by the policy constructor except\n # the ones for the cnn_extractor network (cf nature_cnn()), where the keywords arguments\n # are not passed explicitly (using **kwargs to forward the arguments)\n # that's why there should be not kwargs left when using the mlp_extractor\n # (in that case the keywords arguments are passed explicitly)\n if feature_extraction == 'mlp' and len(kwargs) > 0:\n raise ValueError(\"Unknown keywords for policy: {}\".format(kwargs))", "def uses_all(word, required):\n pass", "def validate_args(args):\n\n # check epiweek specification\n first, last, week = args.first, args.last, args.epiweek\n for ew in [first, last, week]:\n if ew is not None:\n flu.check_epiweek(ew)\n if week is not None:\n if first is not None or last is not None:\n raise ValueError('`week` overrides `first` and `last`')\n first = last = week\n if first is not None and last is not None and first > last:\n raise ValueError('`first` must not be greater than `last`')\n\n # validate and extract name-location pairs\n pair_regex = '[^-,]+-[^-,]+'\n names_regex = '%s(,%s)*' % (pair_regex, pair_regex)\n if not re.match(names_regex, args.names):\n raise ValueError('invalid sensor specification')\n\n return args.names, first, last, args.valid, args.test", "def validate_arguments(self,args):\n\t\tif args.org == None:\n\t\t\tprint('Please specify Organization name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.repo == None:\n\t\t\tprint('Please specify Repositories name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == None:\n\t\t\tprint('Please specify type of the event. Exiting.')\n\t\t\tsys.exit(0)", "def _validate_params(title, start, end, description, show_me_as):\n if start and end:\n start_date = datetime.datetime.strptime(start, '%Y-%m-%d')\n end_date = datetime.datetime.strptime(end, '%Y-%m-%d')\n if start_date > end_date:\n raise ValueError(\"Start date cannot be after end date}\")\n\n if title and not isinstance(title, str):\n raise TypeError(\"title must be a string\")\n\n if description and not isinstance(description, str):\n raise TypeError(\"description must be a string\")\n\n if show_me_as and transparency_d.get(show_me_as) is None:\n raise ValueError(f\"Invalid value ({show_me_as}) for show_me_as. \"\n f\"Accepted values are: {list(transparency_d.keys())}\")", "def validate_args(args):\n if len(args) != 4:\n print(\"ERROR: incorrect length of args.\")\n cli_help_msg()\n sys.exit()\n\n #first argument: check it is a read directory\n if args[1].lower().strip() == 'docid' or args[1].lower().strip() == 'docno':\n print(\"ERROR: incorrect key. Please pass 'docid' or 'docno'\")\n sys.exit()", "def check_common_args(args, function_name,\n valid_functions=['gaperture', 'gmap', 'gfind'],\n allow_no_coords=False):\n\n try:\n function_name = function_name.strip().lower()\n except AttributeError:\n raise gPhotonArgsError(\"Invalid function: {f}\".format(f=function_name))\n\n if not function_name in valid_functions:\n raise gPhotonArgsError(\"Invalid function: {f}\".format(f=function_name))\n\n try:\n args.band = args.band.strip()\n except AttributeError:\n raise SystemExit(\"Invalid band: {b}\".format(b=args.band))\n\n # This will ensure calpath has a trailing '/'.\n if function_name in ['gaperture', 'gmap']:\n args.calpath = os.path.join(args.calpath, '')\n # [Future]: Consider fixing this statement. This is breaking nosetests,\n # but it's not a bad idea...\n # if not os.path.isdir(args.calpath):\n # raise SystemExit(\"Calibration path not found: \" + args.calpath)\n\n if (not (args.ra and args.dec) and not args.skypos and\n not allow_no_coords):\n raise SystemExit(\"Must specify either both RA/DEC or SKYPOS.\")\n elif (args.ra and args.dec) and args.skypos:\n if not (args.ra == args.skypos[0] and args.dec == args.skypos[1]):\n raise SystemExit(\"Must specify either RA/DEC or SKYPOS, not both.\")\n elif (args.ra and args.dec) and not args.skypos:\n args.skypos = [args.ra, args.dec]\n elif not (args.ra and args.dec) and args.skypos:\n args.ra, args.dec = args.skypos\n\n if args.suggest and function_name in ['gfind', 'gaperture']:\n (args.ra, args.dec, args.radius, args.annulus1,\n args.annulus2) = dbt.suggest_parameters(args.band, args.skypos,\n verbose=0)\n args.skypos = [args.ra, args.dec]\n if args.verbose:\n print(\"Recentering on [\"+str(args.ra)+\", \"+str(args.dec)+\"]\")\n print(\"Setting radius to \"+str(args.radius))\n print(\"Setting annulus to [\"+str(args.annulus1)+\", \"+\n str(args.annulus2)+\"]\")\n\n if args.skypos:\n if np.array(args.skypos).shape != (2,):\n raise gPhotonArgsError(\n \"Skypos (--skypos) must be a 2-element array.\")\n args.ra, args.dec = args.skypos\n\n if args.ra and not 0. <= args.ra <= 360.:\n raise SystemExit(\n \"RA of {ra} does not satisfy 0 <= RA <= 360\".format(ra=args.ra))\n\n if args.dec and not -90 <= args.dec <= 90:\n raise SystemExit(\n \"Dec of {dec} does not satisfy -90 <= DEC <= 90\".format(\n dec=args.dec))\n\n if args.detsize and args.detsize <= 0.:\n raise SystemExit(\"Effective field diameter (--detsize) must be > 0\")\n\n if args.maxgap and args.maxgap <= 0.:\n raise SystemExit(\"Maximum gap length (--maxgap) must be > 0 seconds.\")\n if args.minexp and args.minexp <= 0.:\n raise SystemExit(\"Minimum valid exposure depth (--minexp) must be > 0\"\n \" seconds.\")\n\n if args.retries and args.retries <= 0.:\n raise SystemExit(\"Number of retries (--retries) must be > 0.\")\n\n # tmin / tmax must be defined and reasonable\n if not args.tmin or args.tmin <= 0.:\n raise SystemExit(\"T0 (--t0) must be > 0.\")\n if not args.tmax or args.tmax <= 0.:\n raise SystemExit(\"T1 (--t1) must be > 0.\")\n if args.tmin >= args.tmax:\n raise SystemExit(\"Minimum time (--t0) must be < maximum time (--t1).\")\n\n if args.trange:\n if np.array(args.trange).shape == (2, ):\n args.trange = [args.trange]\n if not (len(np.array(args.trange).shape) == 2 and\n np.array(args.trange).shape[1] == 2):\n raise SystemExit(\"trange (--trange) must be a pairwise list.\")\n # Individually check the entries for sanity\n for t in args.trange:\n if t[0] <= 0 or t[1] <= 0:\n raise SystemExit('Times must be positive: {t}'.format(t=t))\n if t[1] <= t[0]:\n raise SystemExit('Start time ({t0}) must preceed end time'\n ' ({t1})'.format(t0=t[0], t1=t[1]))\n elif not allow_no_coords and function_name in ['gmap', 'gaperture']:\n args.trange = dbt.fGetTimeRanges(args.band, args.skypos,\n trange=[args.tmin, args.tmax],\n maxgap=args.maxgap, minexp=args.minexp,\n detsize=args.detsize,\n skyrange=args.skyrange)\n else:\n # If no coordinates specified then use a huge time range for now.\n args.trange = [args.tmin, args.tmax]\n\n return args", "def test_required_unknown():\n parser=argparse.ArgumentParser()\n parser.add_argument('--region',\n help='Enter a region like us-east-2.',\n dest=\"region\",\n action=ValidateRegion,\n required=True)\n parser.add_argument('--output',\n help='pretty, json, yaml',\n dest=\"output\",\n action=Validateoutput,\n nargs=\"?\",\n default=\"yaml\"\n )\n parser.add_argument('--filter-types',\n help='eg: AWS::IAM::Role or AWS::EC2::Instance. Using \"ALL\" with no quotes and we will run it for all current supported resource types',\n nargs='+',\n dest=\"types\",\n action=Validatefilter,\n required=True)\n parser.add_argument('--tag_keys',\n help='Allows you to exclude particular AWS Resources based on the presence of a particular tag key on the resource. This will only be applied to AWS Resources that support tagging. Valid values: any string that is a valid tag - multiple values can be supplied.',\n dest=\"tags\")\n \n #This should raise an error since this will cause a SystemExit since bad params were passed in \n args = [\"--region\", \"NADA\",'--output', \"NADA\",'--filter-types',\"NADA\"]\n with pytest.raises(SystemExit):\n parser.parse_args(args)\n \n \n \n \n #This should NOT raise an error since good params were passed into the parser\n args = [\"--region\", \"us-east-1\",'--output', \"yaml\",'--filter-types',\"AWS::EC2::Instance\"] \n with not_raises(SystemExit):\n parser.parse_args(args)", "def check_params(params):\n\n required = ['initlandmarks']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def valid_args(args):\n is_valid = True\n if not args.ts_url or not args.username or not args.password or not args.from_user or not args.to_user:\n eprint(\"Missing required parameters.\")\n is_valid = False\n\n return is_valid", "def validate(**vkargs):\r\n depr('Use route wildcard filters instead.')\r\n def decorator(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kargs):\r\n for key, value in vkargs.iteritems():\r\n if key not in kargs:\r\n abort(403, 'Missing parameter: %s' % key)\r\n try:\r\n kargs[key] = value(kargs[key])\r\n except ValueError:\r\n abort(403, 'Wrong parameter format for: %s' % key)\r\n return func(*args, **kargs)\r\n return wrapper\r\n return decorator", "def validate_input(arg):\n if not type(arg) == list:\n raise ValidArgException('Input \"%s\" must be a list. Got %s' %(arg, type(arg)))\n \n if len(arg) != len(set(arg)):\n raise ValidArgException('\\n\\nDuplicate files found in input list %s\\n' %(arg))\n \n bnames= [os.path.split(x)[1] for x in arg]\n bnames= [re.sub('\\.gz$', '', x) for x in bnames]\n if len(bnames) == 2 and len(set(bnames)) == 1:\n raise ValidArgException('\\n\\nPaired fastq files must have different, unzipped names even if they are in different directories.\\nGot %s\\n' %(arg))\n \n for x in arg:\n if not os.path.isfile(x):\n raise ValidArgException('\\n\\nFile \"%s\" not found\\n' %(x))\n \n if len(arg) == 2:\n return('raw')\n elif len(arg) == 1:\n ext= os.path.splitext(arg[0])[1]\n if ext in ['.sam', '.bam']:\n return(ext.strip('.'))\n else:\n return('raw')\n else:\n raise ValidArgException('\\n\\n1 or 2 item must be in input \"%s\". Got %s\\n' %(arg, len(arg)))", "def validate_args(*args: Any) -> bool:\n\n return len(args) == 4 and Item.validate_price(args[2]) and Entity.validate_discount(args[3])", "def _validate_from_args(self, fget=None, fset=None, fdel=None, fval=None, fitr=None, doc=None):\n if fitr is None:\n fitr = iter\n if doc is None and fget is not None:\n doc = fget.__doc__\n return fget, fset, fdel, fval, fitr, doc", "def validate_chain():", "def validate(self, name, values):\r\n \r\n pass", "def validate_args(args):\n if args.mix is not None:\n for folder in args.mix:\n if folder not in valid_folders:\n print('The --mix argument only accepts a list of the following folders: {}'.format(', '.join(valid_folders)))\n exit()\n if len(set(args.mix)) != len(args.mix):\n print('Each folder listed for --mix can only appear once.')\n exit()\n # If all or none of them are given, treat it like it's None\n if len(args.mix) == len(valid_folders) or len(args.mix) == 0:\n args.mix = None\n\n # Explicit seed generation (allows for seed sharing even when no seed is specified)\n if args.seed == None:\n args.seed = random.randrange(sys.maxsize)", "def _validate_stan_inputs(self, **kwargs):\n\n # Copy the dictionary of keywords.\n kwds = {}\n kwds.update(kwargs)\n\n # Allow for a keyword that will disable any verification checks.\n if not kwds.pop(\"validate\", True):\n return kwds\n\n # Check chains and init values.\n if \"init\" in kwds.keys() and isinstance(kwds[\"init\"], dict) \\\n and kwds.get(\"chains\", 1) > 1:\n\n init, chains = (kwds[\"init\"], kwds.get(\"chains\", 1))\n logger.info(\n \"Re-specifying initial values to be list of dictionaries, \"\\\n \"allowing one dictionary per chain ({}). \"\\\n \"Specify validate=False to disable this behaviour\"\\\n .format(chains))\n \n kwds[\"init\"] = [init] * chains\n\n if kwargs.get(\"data\", None) is None:\n try:\n self._data \n except AttributeError:\n self._data, self._metadata = self._prepare_data()\n\n kwds[\"data\"] = self._data\n\n return kwds", "def _check_params(self):\n pass", "def _validate(self):\n REQUIRED_KEYS = [ 'name', 'year', 'artist_id', 'genre_ids', 'sources' ]\n\n missing_keys = get_missing_keys(self.request.data, REQUIRED_KEYS)\n if len(missing_keys) > 0:\n return f\"Request body is missing the following required properties: {', '.join(missing_keys)}.\"\n\n artist_id = self.request.data['artist_id']\n\n try:\n Artist.objects.get(pk=artist_id)\n except Artist.DoesNotExist:\n return \"`artistId` supplied does not match an existing artist.\" \n\n genre_ids = self.request.data['genre_ids']\n if len(genre_ids) == 0:\n return \"You must specify at least one genre id in `genreIds` array.\"\n\n for genre_id in genre_ids:\n try:\n Genre.objects.get(pk=genre_id)\n except Genre.DoesNotExist:\n return f\"The genre id {genre_id} does not match an existing genre.\"\n\n sources = self.request.data['sources']\n if len(sources) == 0:\n return \"You must specify at least one source in `sources` array.\"\n\n for source in sources:\n if 'service' not in source or 'url' not in source or 'is_primary' not in source:\n return \"All sources must contain `service`, `url`, and `is_primary` properties.\"\n\n primary_sources = [ source for source in sources if source['is_primary'] == True ]\n if len(primary_sources) != 1:\n return \"There must be one and only one primary source.\"\n\n return False", "def _check_args(self, args_):\n\n pass", "def _validate_builtin(_):\n pass", "def _validate_kwargs(exclude=[], **kwargs) -> None:\n valid_kwargs = [\n # \"auto_reconnect\",\n \"keep_alive\",\n \"proxy_options\",\n \"websockets\",\n ]\n\n for kwarg in kwargs:\n if (kwarg not in valid_kwargs) or (kwarg in exclude):\n # NOTE: TypeError is the conventional error that is returned when an invalid kwarg is\n # supplied. It feels like it should be a ValueError, but it's not.\n raise TypeError(\"Unsupported keyword argument: '{}'\".format(kwarg))", "def error_file_list_command_args_validation(source_types: List[str], categories: List[str], statuses: List[str],\n file_types: List[str], search_options: List[str], severities: List[str]) -> None:\n validate_array_arg(categories, 'Categories', CATEGORIES_OPTIONS)\n validate_array_arg(file_types, 'File types', FILE_TYPES_OPTIONS)\n validate_array_arg(search_options, 'Search options', SEARCH_OPTIONS_OPTIONS)\n validate_array_arg(severities, 'Severities', SEVERITIES_OPTIONS)\n validate_array_arg(source_types, 'Source types', SOURCE_TYPES_OPTIONS)\n validate_array_arg(statuses, 'Statuses', STATUSES_OPTIONS)", "def _checkSSFormatArg(ssformat):\n if ssformat == '':\n raise ShortStrException('ssformat argument cannot be the empty string')\n\n if not isinstance(ssformat, str):\n raise ShortStrException('ssformat argument must be a string with only characters *, c, l, u, and d')\n\n for c in ssformat:\n if c not in '*clud':\n raise ShortStrException('ssformat argument must be a string with only characters *, c, l, u, and d')", "def validate_arguments(args):\n if not args.input_file[-4:] == \".pdb\":\n exit(\"ERROR: Input file should be in PDB format\")\n if args.n_decoys < 0:\n exit(\"ERROR: Number of decoys must be a non-negative value\")\n if args.n_steps < 0:\n exit(\"ERROR: Number of PASSO steps must be a non-negative value\")", "def _validate_stan_inputs(self, **kwargs):\n\n # Copy the dictionary of keywords.\n kwds = {}\n kwds.update(kwargs)\n\n # Allow for a keyword that will disable any verification checks.\n if not kwds.pop(\"validate\", True):\n return kwds\n\n # Check chains and init values.\n if \"init\" in kwds.keys() and isinstance(kwds[\"init\"], dict) \\\n and kwds.get(\"chains\", 1) > 1:\n\n init, chains = (kwds[\"init\"], kwds.get(\"chains\", 1))\n logger.info(\n \"Re-specifying initial values to be list of dictionaries, \"\\\n \"allowing one dictionary per chain ({}). \"\\\n \"Specify validate=False to disable this behaviour\"\\\n .format(chains))\n \n kwds[\"init\"] = [init] * chains\n\n return kwds", "def check_options(_parser, _options):\n\n opterr = False\n\n # Check required options\n reqd_opts = [\"topic\", \"input_prefix\", \"output_prefix\", \"queue\"]\n for attr in reqd_opts:\n if not getattr(_options, attr):\n _parser.print_help()\n raise MissingRequiredOptionsException(\n \"Required option '%s' missing\" % attr)\n\n # Create mapping of all sources for which values have been supplied\n all_sources = [\"year\", \"file\", \"directory\", \"window\", \"lookback\"]\n sources = dict()\n for src in all_sources:\n opt_val = getattr(_options, src)\n if opt_val:\n sources[src] = opt_val\n\n # Check for conflicting options\n if len(sources.keys()) != 1:\n _parser.print_help()\n raise InvalidSourceException(\n \"Exactly one of these options required: [-y | -D | -f | -w | -l]\")\n\n # At this time, we've ensured that sources contains only one key. Return its\n # value\n return sources.keys()[0]", "def _check_params(self):\n\t\tstrange_param_helper = False\n\t\tfor param in self.params:\n\t\t\n\t\t\t# It could be that the param encapsulates several values (e.g., \"FLUX_RADIUS(10)\")\n\t\t\t# So we have to dissect this\n\t\t\tmatch = re.compile(\"(\\w*)\\(\\d*\\)\").match(param)\n\t\t\tif match:\n\t\t\t\tcleanparam = match.group(1)\n\t\t\telse:\n\t\t\t\tcleanparam = param\n\t\t\t\t\n\t\t\tif cleanparam not in self.fullparamlist:\n\t\t\t\tlogger.warning(\"Parameter '%s' seems strange and might be unknown to SExtractor\" \\\n % (param))\n\t\t\t\tstrange_param_helper = True\n\t\t\t\t\n\t\tif strange_param_helper:\n\t\t\tlogger.warning(\"Known parameters are: %s\" % (self.fullparamtxt))", "def validate_args(args):\n\n if args.batch_size % args.batch_splits != 0:\n raise ValueError(BATCH_SIZE_SPLIT_ERR.format(args.batch_size, args.batch_splits))\n\n if args.data_parallel and args.model_parallel:\n raise ValueError(DATA_AND_MODEL_PARALLEL_ERR)\n\n if args.class_bal and args.year_weighted_class_bal:\n raise ValueError(CONFLICTING_WEIGHTED_SAMPLING_ERR)\n\n assert args.ten_fold_test_index in range(-1, 10)", "def _check_kwargs(self):\n valid_kw = {\n 'hf_type': 'str',\n 'hierarchy': 'bool',\n 'smooth': 'bool',\n 'water_level': 'float',\n # Object modifier kw\n 'no_shadow': 'bool',\n 'no_image': 'bool',\n 'no_reflection': 'bool',\n 'inverse': 'bool',\n 'double_illuminate': 'bool',\n 'hollow': 'bool'\n }\n\n self._validate_kwargs(valid_kw)\n\n valid_types = [\n 'gif', 'tga', 'pot', 'png', 'pgm',\n 'ppm', 'jpeg', 'tiff', 'sys', 'function'\n ]\n self._checkKwargValue('hf_type', valid_types)", "def checkArguments(args, log):\n\n\t\n\n \t\n\tif not args.variant_caller or not args.genome_ref or not args.bam or not args.bed or not args.vcf:\n\t\tlog.error(\"necessary pre-requisite arguments\")\n\t\tsys.exit()\n\n\t\n\tif args.genome_ref:\n\t\tif not os.path.isfile(args.genome_ref): \n \t\t\n \t\t\tlog.error(\"it does not exist file corresponding to the reference genome\")\n \t\t\tsys.exit()\n\n \t\tif not os.access(args.genome_ref, os.R_OK):\n \t\t\tlog.error(\"permission to read the reference genome file is not accorded\")\n \t\t\tsys.exit()\n\n\t \n \tif args.bam:\n \t\tif not os.path.isfile(args.bam): \n \t\t\n \t\t\tlog.error(\"it does not exist file corresponding to the bam\")\n\n \t\t\tsys.exit()\n\n \t\tif not os.access(args.bam, os.R_OK):\n \t\t\tlog.error(\"permission to read the bam file is not accorded\")\n \t\t\tsys.exit()\n\n\n \tif args.bed:\n \t\tif not os.path.isfile(args.bed):\n \t\t\tlog.error(\"it does not exist file corresponding to the target regions\")\n \t\t\tsys.exit()\n\n \t\tif not os.access(args.bed, os.R_OK):\n \t\t\tlog.error(\"permission to read the target regions file is not accorded\")\n \t\t\tsys.exit()", "def validate_syntax(self):\n self._validate_network_prefix()\n self._validate_zero_network()\n self._validate_families()\n self._validate_unicast_addresses()\n self._validate_addresses()\n self._validate_gateway()\n self._validate_metric()", "def validate(self, name: str, expansion: str) -> List:", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1", "def _validate_args(self):\n\n self.logger.info(\"Validating instance arguments.\")\n\n # test if @self.account_name is an identifier.\n if not self.account_name.isidentifier():\n msg = \"Account name '{}' is not a valid identifier; problems may occur.\".format(\n self.account_name)\n self.logger.warning(msg)\n\n # verify @self.pst_file exists.\n if not os.path.isfile(self.pst_file):\n msg = \"Can't find PST file: {}\".format(self.pst_file)\n raise FileNotFoundError(msg)\n\n # verify @self.output_path exists.\n if not os.path.isdir(self.output_path):\n msg = \"Can't find folder: {}\".format(self.output_path)\n raise NotADirectoryError(msg)\n\n # make sure @self.mime_path doesn't already exist.\n if os.path.isdir(self.mime_path):\n msg = \"Can't overwrite existing MIME folder: {}\".format(self.mime_path)\n raise IsADirectoryError(msg)\n\n return", "def parameters_are_valid():\n # The only accepted number of command line arguments is 3: they are\n # aggregator.py, the filename, and the topic\n if len(sys.argv) != 3:\n # Issue error message if invalid number of command line arguments\n print(\"Error: invalid number of arguments\")\n print(\"Usage: aggregator.py filename topic\")\n return False\n else:\n return True", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def validate_resource_manager_parameters(user_defined_parameters,\r\n auto_defined_parameters):\r\n for udp in user_defined_parameters:\r\n for adp in auto_defined_parameters:\r\n if udp.startswith(adp):\r\n raise STAPLERerror.STAPLERerror('Resource manager parameter {0} '\r\n 'should not be defined in '\r\n 'the staplerfile, '\r\n 'as it is automatically '\r\n 'inferred by {1}.'.format(adp, NAME))", "def _validate_links(cls, links, relationship: Optional[str] = None):\n errors = []\n for name in links:\n qual_name = cls._qualname(name, relationship)\n if qual_name in cls.__links_factories__:\n if not isinstance(links[name], Mapping):\n errors.append(f\" You must provide an arguments dictionary for '{qual_name}' link.\")\n continue\n provided_link = links.get(name)\n if provided_link is None:\n errors.append(f\" Nothing provided for building '{qual_name}' link.\")\n elif not isinstance(links[name], str):\n errors.append(f\" Provided '{qual_name}' link is not a string.\")\n if errors:\n raise ValueError(\"\\n\" + \"\\n\".join(errors))", "def _validate_input(self):\n self.data.validate()\n self.meta_hybridizer.validate_input()", "def _validate_placeholders(self):\n if self.implementation.container is None:\n return\n\n valid_inputs = {} if self.inputs is None else self.inputs\n valid_outputs = {} if self.outputs is None else self.outputs\n for arg in itertools.chain(\n (self.implementation.container.command or []),\n (self.implementation.container.args or [])):\n check_placeholder_references_valid_io_name(valid_inputs,\n valid_outputs, arg)", "def check_gs_argument(ground_state):\n required_fields = [\"bc\", \"cf\", \"eci\", \"atoms\"]\n keys = ground_state.keys()\n for key in keys:\n if key not in required_fields:\n raise ValueError(\n \"The GS argument has to contain {} keys. Given {}\".format(\n required_fields, keys))", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def test_arg_validation_all_seven_posn(self):\n assert_raises_message(\n TypeError,\n \"drivername must be a string\",\n url.URL,\n b\"somedriver\",\n \"user\",\n \"secret\",\n \"10.20.30.40\",\n 1234,\n \"DB\",\n {\"key\": \"value\"},\n )", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def validate_input_values(self, source, **kwargs):\n return self._validate_values(\"input_values\", source, **kwargs)", "def _validate_subsets(self, subsets: Sequence[str]) -> Sequence[str]:\n if not subsets:\n raise ValueError(\"no subsets specified\")\n for subset in subsets:\n if subset not in self.data_files.keys():\n raise ValueError(f\"{subset} is not valid\")\n return subsets", "def validate(self, *args, **kwargs):\n raise NotImplementedError('Validate not implemented')", "def validate_short_path(short_path):", "def validate_args(self):\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose output\", action=\"store_true\")\n parser.add_argument(\"-u\", \"--user\", help=\"Google user email\", default=\"none\")\n parser.add_argument(\"-p\", \"--password\", help=\"Google user email password\", default=\"none\")\n parser.add_argument(\"-l\", \"--library\", help=\"Remove duplicate songs from library\", action=\"store_true\")\n parser.add_argument(\"-y\", \"--playlist\", help=\"Remove duplicate songs from playlists\", action=\"store_true\")\n # Built-in:\n # parser.add_argument(\"-h\", \"--help\", help=\"Usage help\", action=\"store_true\")\n\n args = parser.parse_args()\n if len(self.argv) == 0 or args.user == \"none\" or args.password == \"none\" or not (args.library or args.playlist):\n parser.print_help()\n exit(0)\n\n return args", "def __init__(self, file_pattern, validate, **nucleus_kwargs):\n\n super(_GenomicsSource, self).__init__(\n file_pattern=file_pattern, splittable=False, validate=validate)\n self.nucleus_kwargs = nucleus_kwargs", "def _validate_params(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def validate_args(cmd_args):\n valid = cmd_args.bag is not None\n\n if not valid:\n print('Must specify a bag file')\n\n if valid:\n for bag_file in cmd_args.bag:\n valid = os.path.isfile(bag_file)\n if not valid:\n print('Invalid bag file: ' + bag_file)\n break\n\n if valid:\n \"\"\" 1. If info is requested, that is the only argument allowed.\n 2. Topics and output files may be specified.\n 3. Topics may be specified. Output file names will be autogenerated.\n 4. Stats may be requested.\n \"\"\"\n ops_requested = [False] * 3\n ops_requested[0] = cmd_args.info\n ops_requested[1] = (cmd_args.topic is not None)\n ops_requested[2] = cmd_args.stats\n\n valid = (sum(ops_requested) == 1)\n if not valid:\n print('Must specify either bag info, a topic and output file, or statistics')\n\n if valid and cmd_args.out_file is not None:\n valid = (len(cmd_args.out_file) == len(cmd_args.bag) * len(cmd_args.topic))\n if not valid:\n print('Number of output files must be enough for bags and topics passed in')\n\n return valid", "def valid_args(args):\n return args is not None and len(args) > 0", "def check_params(self, name, fs_in, fs_out, window):\n if not isinstance(name, str):\n raise TypeError('name must be a string, not %s' % name)\n if fs_in <= 0:\n raise ValueError('fs_in should not be less than 0.')\n if fs_out <= 0:\n raise ValueError('fs_out should not be less than 0.')\n if window <= 0:\n raise ValueError('window must be greater than than 0.')", "def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))", "def check_arguments(self):\n self.check_num_arguments()\n self.are_readable_files(self.args)", "def _find_verify_arguments(filters):\n if (\"minsize\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"minsize\"]):\n exit_with_error(\"Maximum size cannot be less than minimum size.\")\n if (\"size\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"size\"]):\n exit_with_error(\"Maximum size cannot be less than (exact) size.\")\n if (\"size\" in filters and \"minsize\" in filters and\n filters[\"minsize\"] > filters[\"size\"]):\n exit_with_error(\"Minimum size cannot be more than (exact) size.\")", "def validate_class_args(self, **kwargs):\n pass", "def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")", "def validate_arguments(args):\n if not os.path.exists(args.training_dir):\n return \"{0} is not a valid directory\".format(args.training_dir)\n\n if not os.path.exists(args.test_dir):\n return \"{0} is not a valid directory\".format(args.test_dir)\n\n if not os.path.isfile(args.label_file):\n return \"{0} is not a valid file\".format(args.label_file)\n return None", "def validate_params(cls, args):\n if not (len(args) == 3 or len(args) == 5 or len(args) == 7):\n sys.exit(\n 'Execute o script passando o caminho do diretório das'\n ' imagens, ou apenas o path de uma imagem e decida se'\n ' deseja mover ou não'\n )\n args_dict = cls.__make_params(args)\n keys_args_set = set(args_dict.keys())\n if keys_args_set.difference(KEYS_DEFAULT_AS_SET) != set():\n sys.exit(\n 'Verifique a passagem de parâmetros.'\n ' Foi encontrado parâmetros desconhecidos.'\n )\n\n return cls.__check_args(args_dict)", "def validate(args):\n args = {k.lstrip('-').lower().replace('-', '_'): v\n for k, v in args.items()}\n schema = Schema({\n 'ptvsd': Or(None, And(Use(int), lambda port: 1 <= port <= 65535)),\n 'root_dir': os.path.exists,\n 'resume': bool,\n })\n args = schema.validate(args)\n return args", "def validate(scheme, data):\n return validate_common(scheme, data)", "def _check_args(\n decl_args: Mapping[str, IntType | ReferenceType], code_args: Mapping[str, Storage]\n) -> None:\n for name, arg in code_args.items():\n try:\n typ = decl_args[name]\n except KeyError:\n raise ValueError(f'code block uses undeclared argument \"{name}\"') from None\n\n if isinstance(typ, ReferenceType):\n typ = typ.type\n if typ.width != arg.width:\n raise ValueError(\n f'argument \"{name}\" is declared with width {typ.width} '\n f\"but has width {arg.width} in code block\"\n )", "def validate_arguments(self, contest):\n #\n # validation if the contest argument\n self.validate_variable(Contest, contest)", "def verify(self):\n \n # Check non-optional args\n pList = ['basePath', 'dateStart', 'dateStop','sensor']\n for p in pList:\n if not hasattr(self, p):\n self.showUsage()\n raise '\\nMissing keyword arg: %s.\\n' % p\n \n if not self.isValidBasePath():\n raise '\\nbasePath (%s) does not exist.\\n' % self.basePath\n\n if not isValidPimsDateString(self.dateStart):\n raise '\\ndateStart (%s) is not valid\\n' % self.dateStart\n else:\n self.uStart = stringTimeToUnix(self.dateStart)\n\n if not isValidPimsDateString(self.dateStop):\n raise '\\ndateStop (%s) is not valid\\n' % self.dateStop\n else:\n self.uStop = stringTimeToUnix(self.dateStop)", "def valid(self, target):", "def _verify_arguments(self):\n # if self.options.action == \"create\":\n # if self.options.encrypt_payload and not self.options.payload_secret:\n # self.parser.error('A secret must be supplied with --payload-secret option when the --encrypt-payload option is in use.')\n pass", "def test_require_at_least_one_and_several_provided(self):\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('ahoy', 'there'), 'foo')\n self.assertEqual(_func(arg1='ahoy', arg2='there'), 'foo')\n self.assertEqual(_func('ahoy', arg2='there', arg3='matey'), 'foo')", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def __validate_info(title: str, artist: str, runtime: str, path_name: str):\n if (type(title) != str) or (type(artist) != str) or (type(runtime) != str) \\\n or (type(path_name) != str):\n raise ValueError", "def validate_args(self, parser: argparse):\n pass", "def _ValidateArgs(self, args):\n if not (args.IsSpecified('description') or\n args.IsSpecified('security_policy')):\n parameter_names = ['--description', '--security_policy']\n raise exceptions.MinimumArgumentException(\n parameter_names, 'Please specify at least one property to update')", "def test_non_existant_required_arg(self):\n with self.assertRaises(ValueError):\n _func = required_parameters('arg1', 'wibble', 'wobble')\n _func(undecorated_func)\n\n with self.assertRaises(ValueError):\n _func = mutually_exclusive_parameters(\n 'arg1',\n 'wibble',\n 'wobble'\n )\n _func(undecorated_func)" ]
[ "0.65343577", "0.6175937", "0.6175629", "0.60430217", "0.58976775", "0.5827241", "0.580448", "0.57669264", "0.57641464", "0.57474285", "0.57235986", "0.5695725", "0.5665001", "0.56555843", "0.56499135", "0.5635747", "0.5620332", "0.5607189", "0.55930436", "0.55805653", "0.5558013", "0.55496866", "0.55422497", "0.5523993", "0.5510623", "0.55080885", "0.5507116", "0.5502876", "0.5502553", "0.5493999", "0.54846025", "0.54817945", "0.54566497", "0.5452172", "0.5449126", "0.54483575", "0.5446165", "0.5446054", "0.5441841", "0.5441046", "0.54353875", "0.5419341", "0.54110146", "0.54042315", "0.5398301", "0.5390944", "0.5373484", "0.5372881", "0.53689", "0.53662497", "0.53641576", "0.53572077", "0.53563344", "0.5352506", "0.5345511", "0.53448045", "0.53419757", "0.5341806", "0.53294873", "0.53249943", "0.53163713", "0.5312925", "0.5300637", "0.52961916", "0.5291742", "0.52898777", "0.5288104", "0.5284652", "0.52841914", "0.52841914", "0.5271274", "0.5270896", "0.52689135", "0.5268847", "0.5266738", "0.5266431", "0.526534", "0.5254983", "0.52545065", "0.5253735", "0.5253516", "0.5251427", "0.52501553", "0.5245332", "0.5243944", "0.5242485", "0.52399814", "0.5239392", "0.52322865", "0.521951", "0.5213116", "0.52078277", "0.5205097", "0.5196864", "0.5194398", "0.5184813", "0.51846313", "0.5180986", "0.5171678", "0.5171325" ]
0.6043267
3
Validate a single strand by name.
def validate_strand(self, name, source, **kwargs): return self.validate({name: source}, **kwargs)[name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid(name):\n return bool(name)", "def validated_name(cls, name):\n if (name[:5] == 'hive-'\n and name[5] in ['1', '2', '3']\n and re.match(r'^hive-[123]\\d{4,6}$', name)):\n return name\n return None", "def check_schema_name(name: str):\n if not is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")", "def validname(name):\r\n return len(name)>0 and (\r\n Context.__invalid_character.search(name) is None)", "def validate_name(name: str) -> None:\n\n # Disallow empty.\n if not name:\n raise CleanError('Feature set name cannot be empty.')\n\n # Require starting with a letter.\n if not name[0].isalpha():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - names must start with a letter.'\n )\n\n # Require only letters, numbers, and underscores.\n if not name.replace('_', '').isalnum():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only letters, numbers, and underscores are allowed.'\n )\n\n # Require all lowercase.\n if not name.islower():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only lowercase letters are allowed.'\n )\n\n # Disallow leading, trailing, or consecutive underscores.\n # (these will result in a '' in the split results which evals to False)\n if not all(name.split('_')):\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - leading, trailing, and consecutive underscores are'\n ' not allowed.'\n )", "def is_valid_compound_name(name: str) -> bool:\n return n2s.has_smiles(name)", "def check_input(naming):\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')", "def verify_name(name):\n try:\n if name.index(' '):\n return False\n except ValueError:\n return True", "def validateName(name):\r\n if not name:\r\n raise IllegalName('Name can not be an empty string.')\r\n\r\n m = _NAME_RE.match(name)\r\n\r\n if m is None or m.group(0) != name:\r\n raise IllegalName('Name has to start with a letter followed by an '\r\n 'arbitrary number of alphanumeric characters or '\r\n 'underscores.')", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str[0-9]+$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def is_valid(self, qstr):\r\n pass", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str([0-9]+|L)$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))", "def _validate(self, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django project name \"{}\": '\n 'must be a valid Python identifier').format(s))", "def validate_names(name):\n return isinstance(name, str) and not re.search(r'[\\s]', name)", "def validate_species(self, name):\n accepted_species = Species.objects.values_list('name', flat=True)\n if name not in accepted_species:\n raise serializers.ValidationError(\n 'Species {0} is not allowed.'.format(name))\n else:\n return name", "def isValidDataTypeName(name: unicode) -> bool:\n ...", "def verify_spec_name(spec_name):\n if not isinstance(spec_name, text_type):\n raise ValueError(\n \"expected spec name of string type, but got '{0}' of type '{1}'\".\n format(spec_name, to_str(type(spec_name))))", "def check_dog_name(dog):\n if not isinstance(dog.name, str):\n raise NotStringError(\"Dog name entered is not a string\")", "def name_valid(name):\n return name.isalpha()", "def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)", "def _assert_valid_name(name, container):\n container.file.name_validation(container.directory, name)", "def valid_street_name(cls, new_street):\n if type(new_street) is str:\n return True\n # else\n return False", "def verify_name(name):\n if name and not name.isspace(): # if it's not empty/NULL and it's not whitespace\n return True\n else:\n return False", "def test_reserved_name(self):\n with self.assertRaises(ValidationError):\n field_name_validator('_id')", "def validate_species_name(self, species_name, require_full=True, require_prefix=True):\n\n if species_name == 's__':\n return True, None\n\n # test for prefix\n if require_prefix:\n if not species_name.startswith('s__'):\n return False, 'name is missing the species prefix'\n\n # remove prefix before testing other properties\n test_name = species_name\n if test_name.startswith('s__'):\n test_name = test_name[3:]\n\n # test for full name\n if require_full:\n if 'candidatus' in test_name.lower():\n if len(test_name.split(' ')) <= 2:\n return False, 'name appears to be missing the generic name'\n else:\n if len(test_name.split(' ')) <= 1:\n return False, 'name appears to be missing the generic name'\n\n # check for tell-tale signs on invalid species names\n if \" bacterium\" in test_name.lower():\n return False, \"name contains the word 'bacterium'\"\n if \" archaeon\" in test_name.lower():\n return False, \"name contains the word 'archaeon'\"\n if \" archeaon\" in test_name.lower():\n return False, \"name contains the word 'archeaon'\"\n if \"-like\" in test_name.lower():\n return False, \"name contains '-like'\"\n if \" group \" in test_name.lower():\n return False, \"name contains 'group'\"\n if \" symbiont\" in test_name.lower():\n return False, \"name contains 'symbiont'\"\n if \" endosymbiont\" in test_name.lower():\n return False, \"name contains 'endosymbiont'\"\n if \" taxon\" in test_name.lower():\n return False, \"name contains 'taxon'\"\n if \" cluster\" in test_name.lower():\n return False, \"name contains 'cluster'\"\n if \" of \" in test_name.lower():\n return False, \"name contains 'of'\"\n if test_name[0].islower():\n return False, 'first letter of name is lowercase'\n if 'sp.' in test_name.lower():\n return False, \"name contains 'sp.'\"\n\n return True, None", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def check_name(name, allow_services=False):", "def check_name(self, name):\n status, msg = utils.validate_name(name, \"36\", \"storageview name\")\n if not status:\n LOG.error(msg)\n self.module.fail_json(msg=msg)\n else:\n LOG.info(msg)", "def _check_name(self):\n\t\tpass", "def isValidPart(name):\n\tfor n in name_forms:\n\t\tif re.match(n, name.lower()) is not None:\n\t\t\treturn True\n\treturn False", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def is_valid_attribute_name(self, name):\n try:\n self.validate_attribute_name(name)\n return True\n except etal.LabelsSchemaError:\n return False", "def validate(self, name):\n return name in self.dict", "def validate_plan(plan_name):\n pass", "def validate_name(name, units=None, strict=False):\n\n if not units:\n units = TILE_SIZES.keys()\n else:\n if isinstance(units, str):\n units = [units]\n\n for unit in units:\n if unit not in TILE_SIZES.keys():\n raise ValueError(\"{0} is not a valid kvadratnet unit.\".format(unit))\n\n if strict:\n begin, end = \"^\", \"$\"\n else:\n begin, end = \"\", \"\"\n\n for unit in units:\n expr = \"{begin}{expr}{end}\".format(begin=begin, expr=REGEX[unit], end=end)\n if re.match(expr, name):\n return True\n\n return False", "def validName(varname):\r\n if (len(varname[0])>32):\r\n return False\r\n if not(varname[0][0].isalpha()):\r\n return False \r\n for ch in varname[0][1:]:\r\n if not(ch.isalpha() or ch.isdigit() or ch=='_'):\r\n return False\r\n \r\n return True", "def nameIsValid(self, name):\n self.notify.debug('nameIsValid')\n if (name in self.usedNames):\n return OTPLocalizer.ToonAlreadyExists % (name)\n\n problem = NameCheck.checkName(name, font=self.nameEntry.getFont())\n if problem:\n return problem\n\n # name has passed local checks\n return None", "def is_valid_name(self):\n\n if self.whitelist_name == '':\n return True\n\n if len(self.whitelist_name) >= 64:\n LOGGER.debug('invalid name %s; must be less than 64 bytes',\n self.whitelist_name)\n return False\n\n return True", "def _validate_snap_name(name, snap_name, strict=True, runas=None):\n snap_name = salt.utils.data.decode(snap_name)\n\n # Try to convert snapshot name to an ID without {}\n if re.match(GUID_REGEX, snap_name):\n return snap_name.strip(\"{}\")\n else:\n return snapshot_name_to_id(name, snap_name, strict=strict, runas=runas)", "def validate_strand(strand: str) -> bool:\n strand = strand.upper()\n count = dict(Counter(strand))\n for k in count.keys():\n if k not in NUCLEOTIDES:\n raise Exception(\"Invalid DNA sequence\")\n return True", "def validate_rss_name(rss_name):\n regex = r'^[\\w]+$'\n if not match(regex, str(rss_name)):\n raise ValueError('invalid rss name ({})'.format(rss_name))", "def is_dev_name_valid(self):\n return self._name_re.match(self.dev_name) is not None", "def validName(configsetname):\n for c in configsetname:\n if not c in string.letters+string.digits+\"$_-\":\n return False\n return configsetname != \"\"", "def test_is_valid_kubernetes_resource_name_valid_input():\n # test valid names\n assert is_valid_kubernetes_resource_name(name=\"l0l\")\n assert is_valid_kubernetes_resource_name(name=\"l-l\")\n assert is_valid_kubernetes_resource_name(name=\"l.l\")\n assert is_valid_kubernetes_resource_name(name=\"4-you\")\n assert is_valid_kubernetes_resource_name(name=\"you.2\")", "def isValidName(theString, minimum, maximum) :\n\n return theString.isalpha() == True \\\n and len(theString) >= minimum \\\n and len(theString) <= maximum", "def name_check(f_name):\r\n if len(f_name) == 0:\r\n print('The first name must be filled in.')\r\n if len(f_name) < 2:\r\n print(f_name + ' is not a valid name. Itis too short.')", "def _solid_name_check(self, solid_name, chk_dict=None):\n return self._name_check(solid_name, 'solids', chk_dict=chk_dict)", "def validate_supply_name(self, supply_name, path=\".\"):\n self.console.runcmd(f\"find {path} -name {supply_name}\", expected=\"\\r\\n\")\n if self.console.output():\n return True\n else:\n return False", "def validate_identifier(self, identifier):\n pass", "def validate_custom_name(self, name):\n if not re.match( r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$', name):\n raise ValueError('Invalid name for node (%s)' % name)\n return", "def validate_name(benchmark):\n if VALID_BENCHMARK_REGEX.match(benchmark) is None:\n logs.error('%s does not conform to %s pattern.', benchmark,\n VALID_BENCHMARK_REGEX.pattern)\n return False\n return True", "def validateMemberName(n):\n try:\n if len(n) < 1:\n raise Exception('Name must be at least one byte in length')\n if len(n) > 255:\n raise Exception('Name exceeds maximum length of 255')\n if n[0].isdigit():\n raise Exception('Names may not begin with a digit')\n if mbr_re.search(n):\n raise Exception(\n 'Names contains a character outside the set [A-Za-z0-9_]')\n except Exception as e:\n raise MarshallingError(f'Invalid member name \"{n}\": {str(e)}')", "def test_is_valid(self, address):\n self.test_string(address)\n self.test_alnum(address)", "def isNameUsed(self, name: unicode, startId: long, stopId: long) -> bool:\n ...", "def _validate_name(name):\r\n\tif HOST_NAME != name and len(name) > 0 and ZOOM_PHRASES[0] not in name and name not in WAITING_ROOM:\r\n\t\treturn True\r\n\treturn False", "def validate_string(s, name=''):\n assert (s is not None), name + ' settings should not be None'\n assert (isinstance(s, str)), name + ' settings should be a string'\n assert (s != ''), name + ' settings should be not be empty'", "def validate_name(self, username: str) -> bool:\n\t\treturn not self.registry.name_taken(username)", "def validateName(name):\n if not name:\n # This happens when the name is an existing directory\n raise BadCommand('Please give the name of a layer.')\n # 'setup' is a valid controller name, but when paster controller is ran\n # from the root directory of a project, importing setup will import the\n # project's setup.py causing a sys.exit(). Blame relative imports\n if name != 'setup' and can_import(name):\n raise BadCommand(\n \"\\n\\nA module named '%s' is already present in your \"\n \"PYTHON_PATH.\\nChoosing a conflicting name will likely cause \"\n \"import problems in\\nyour controller at some point. It's \"\n \"suggested that you choose an\\nalternate name, and if you'd \"\n \"like that name to be accessible as\\n'%s', add a route \"\n \"to your projects config/routing.py file similar\\nto:\\n\"\n \" map.connect('%s', controller='my_%s')\" \\\n % (name, name, name, name))\n return True", "def CHECK_NAME(name):\n if WORDPAT.match(name):\n return name\n return None", "def _check_key_name(cls, name):\n return (isinstance(name, basestring) and\n re.match('^[A-Za-z][A-Za-z0-9_]*$', name) and\n not hasattr(cls, name))", "def test__check_invalid_nickname__valid_name(self):\n self.assertEqual(\n first=trainercode.format_trainer_code(\"111122223333\"),\n second=\"1111 2222 3333\"\n )", "def validate_attribute_name(self, name):\n if not self.has_attribute(name):\n raise AttributeContainerSchemaError(\n \"Attribute '%s' is not allowed by the schema\" % name\n )", "def _validate(self, django_project_name: str, s: str):\n if not s.isidentifier():\n raise ValueError(('Invalid Django app name \"{}\": '\n 'must be a valid Python identifier').format(s))\n\n if django_project_name == s:\n raise ValueError(\n ('Invalid Django project name \"{}\": '\n 'must be different than Django project name').format(s))", "def test_asterisk(self):\n with self.assertRaises(ValidationError):\n db_name_validator('logstash*')", "def isValid(self):\n if(not self.name or len(self.name) == 0):\n return False\n return True", "def validate_team_name(name):\n if not re.match('^[A-Za-z0-9_]*$', name):\n print('INVALID NAME. LETTERS, NUMBERS AND UNDERSCORES ONLY')\n return False\n elif len(name) > 10:\n print('INVALID NAME. 10 CHARACTERS MAX')\n return False\n elif len(name) == 0:\n print('INVALID NAME. NOT LONG ENOUGH')\n else:\n return True", "def _check_if_valid_dataset_name(dataset_name: str) -> str:\n if not re.match(r\"^[A-Za-z0-9_]+$\", dataset_name):\n raise ExecutionEngineError(\n f\"dataset_name: {dataset_name} is not valid, because it contains non-alphanumeric and _ characters.\"\n f\"Please check your configuration.\"\n )\n\n if len(dataset_name) >= MAX_TABLE_NAME_LENGTH:\n # starting from the end, so that we always get the index and sub_index\n new_dataset_name = dataset_name[-MAX_TABLE_NAME_LENGTH:]\n logger.info(\n f\"dataset_name: '{dataset_name}' was truncated to '{new_dataset_name}' to keep within length limits.\"\n )\n dataset_name = new_dataset_name\n\n while not re.match(r\"^[A-Za-z]+$\", dataset_name[0]):\n dataset_name = dataset_name[1:]\n\n return dataset_name", "def is_bank_name_valid(name_to_check: str):\n def is_name_short_enough():\n return True if len(name_to_check) <= 12 else False\n\n def is_name_only_letter():\n return True if name_to_check.isalpha() else False\n\n return True if is_name_short_enough() and is_name_only_letter() else False", "def is_valid_instance(instance):\n return re.match(r'^[a-z0-9\\-_]+$', instance)", "def validate_name(name):\n name = name.strip()\n m = re.search('^[a-zA-Z0-9 ]{3,30}$', name)\n if m is None:\n return False\n else:\n return True", "def test_valid(self):\n template = '{0} just right {1}'\n value_count = 2\n try:\n validate_str_substitution(template, value_count)\n except ValidationError:\n self.fail('Name raised ValidationError unexpectedly')", "def checkValidClusterName(self):\n p = re.compile('^[a-zA-Z0-9][a-zA-Z0-9_.\\-]*[a-zA-Z0-9]$')\n if len(self.clusterName) < 2 or len(self.clusterName) > 64 or not p.match(self.clusterName):\n raise RuntimeError(\"Invalid cluster name (%s).\"\n \" It must be between 2 and 64 characters and contain only alpha-numeric\"\n \" characters, hyphens, underscores, and periods. It must start and\"\n \" end only with alpha-numeric characters.\" % self.clusterName)", "def test_asterisk(self):\n with self.assertRaises(ValidationError):\n field_name_validator('logstash*')", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def check_name(self, name: str):\n if name[0] == \"/\" or self.check_end_streaming(name):\n return True\n else:\n return False", "def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))", "def isEditName(id):\n for char in id:\n if re.compile('[0-9]+').match(char[0]) == None:\n print NameError(\"'%s' is not valid name. \\n Id should be numeric\" % (name))\n return -1\n return 0", "def _is_valid_keyspace_name(self, keyspace_name):\n if keyspace_name == None or not keyspace_name:\n return False\n return re.match(r\"^[a-z_]*[^-]$\", keyspace_name)", "def test_hyphen(self):\n try:\n db_name_validator('logstash-')\n except ValidationError:\n self.fail('Name raised ValidationError unexpectedly')", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def validate_name(self, value):\n if not value:\n raise serializers.ValidationError(\"Name cannot be null\")\n return value", "def is_basic_name(name):\n if name is None:\n raise AdasDBError(\"Invalid name '%s'.\" % name)\n return name.find(PATH_SEPARATOR)", "def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username", "def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def validate_name(self, name):\n import re\n\n if not re.findall(\"^[\\w',]+$\", name):\n self.msg(\"That category name contains invalid characters.\")\n return False\n return True", "def test_from_smiles_name(self):\n mol = Molecule.from_smiles(\"C\")\n assert mol.name == \"\"\n\n mol = Molecule.from_smiles(\"C\", name=\"bob\")\n assert mol.name == \"bob\"", "def cfcheck_from_name(varname, vardata):\n data = VARIABLES[varname]\n if \"cell_methods\" in data:\n check_valid(\n vardata, \"cell_methods\", parse_cell_methods(data[\"cell_methods\"]) + \"*\"\n )\n if \"standard_name\" in data:\n check_valid(vardata, \"standard_name\", data[\"standard_name\"])", "def test_name_validation(self, attr):\n kwargs = {'kind': POSITIONAL_ONLY, attr: 3}\n with pytest.raises(TypeError) as excinfo:\n FParameter(**kwargs)\n assert excinfo.value.args[0] == \\\n '{} must be a str, not a {}'.format(attr, 3)", "def name_error(name):\n\n if len(name) > MAX_NAME_LENGHT:\n raise InputError(description=\"Name cannot be more than 20 characters long\")", "def check_name(name):\n name = sanitize_name(name)\n for letter in name:\n if letter not in all_letters:\n # print(f\"Bad letter = {letter}\")\n return False\n role = extract_role(name)\n # remove group\n name = name.replace(f' - {role}', '')\n try:\n parts = name.split(' ')\n firstname = parts[0].title()\n if firstname[0] not in letters:\n return False\n for letter in firstname[1:]:\n if letter not in LETTERS:\n return False\n familynames = parts[1:]\n for familyname in familynames:\n if familyname[0] not in letters:\n return False\n for letter in familyname[1:]:\n if letter not in LETTERS:\n return False\n return True\n except:\n return False", "def legal_name(name, is_param_name=False):\n if name.startswith('_'):\n return False\n\n if name in ('self',):\n return False\n\n if keyword.iskeyword(name):\n return False\n\n regex = r'^[a-zA-Z][a-zA-Z0-9_]*$' if is_param_name else (\n r'^[a-zA-Z][.\\w-]*$')\n return bool(re.match(regex, name))", "def _validate(self, s: str):\n if not re.match(r'[a-z][a-z0-9\\-]{5,29}', s):\n raise ValueError(('Invalid Google Cloud Platform Project ID \"{}\": '\n 'must be between 6 and 30 characters and contain '\n 'lowercase letters, digits or hyphens').format(s))", "def clean_name(self):\n name = self.cleaned_data['name']\n if not re.match(r'[\\w{4}\\s*]+', name) or len(name) < 4:\n v_err('no_name')\n return name", "def _block_id_is_guid(self, name):\r\n return len(name) == 32 and re.search(r'[^0-9A-Fa-f]', name) is None", "def validateNamePart(self, passed_name):\n ## Declaring a Flag to control a while loop\n name_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not name_ok:\n if passed_name.isalpha():\n name_ok = True\n return True\n\n else:\n print(\"You have entered an invalid character. Please try again.\")\n return False", "def _check_is_name_valid(self, name):\n if name in self.forbidden_names or name.endswith(\n self.forbidden_extensions) or self.__check_is_match_regex(name):\n return False\n return True", "def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)" ]
[ "0.62414205", "0.6123533", "0.60466254", "0.59945595", "0.5968563", "0.5950491", "0.5935591", "0.5917729", "0.5904584", "0.58871233", "0.5855124", "0.5852563", "0.58318865", "0.5813858", "0.5813858", "0.58076316", "0.57668716", "0.576247", "0.5755813", "0.57409114", "0.57358676", "0.57354623", "0.5715307", "0.5704926", "0.5694756", "0.5690485", "0.56901884", "0.56880516", "0.5669288", "0.5626796", "0.5615035", "0.56027305", "0.5578831", "0.5575408", "0.5565115", "0.5533735", "0.55212235", "0.55176306", "0.5516951", "0.550913", "0.5507784", "0.54836684", "0.5476649", "0.5472244", "0.54706025", "0.54702806", "0.54614484", "0.54509664", "0.5448033", "0.54239154", "0.54127574", "0.54126424", "0.5412612", "0.5401034", "0.5380382", "0.5344112", "0.53437", "0.5334328", "0.5331336", "0.5328534", "0.53114784", "0.5300404", "0.52955985", "0.52861595", "0.5279098", "0.5274997", "0.5268092", "0.5266701", "0.52574915", "0.52523553", "0.52447194", "0.524271", "0.5239963", "0.5237016", "0.52258986", "0.52244824", "0.52233946", "0.5222476", "0.522167", "0.52191144", "0.521874", "0.52148086", "0.52148086", "0.5212372", "0.52015567", "0.5187372", "0.5183547", "0.51818407", "0.51705056", "0.5169491", "0.51661205", "0.5165703", "0.5156044", "0.5149097", "0.5140528", "0.5134695", "0.51269144", "0.5126213", "0.5125567", "0.5121301" ]
0.67900914
0
Prepare instance for strand data using a class map.
def prepare(self, *args, cls=None, **kwargs): prepared = {} for arg in args: if arg not in ALL_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand '{arg}'") elif arg not in self.available_strands: prepared[arg] = None else: klass = self._get_cls(arg, cls) prepared[arg] = klass(**kwargs) if klass else dict(**kwargs) if hasattr(prepared[arg], "prepare"): prepared[arg] = prepared[arg].prepare(getattr(self, arg)) return prepared
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare(self, class_map=None):\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)", "def prepare(self, class_map=None):\n\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n # Mapping from source class and image IDs to internal IDs\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n self.image_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.image_info, self.image_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)", "def prepare(cls):", "def __init__(self, data):\n assert isinstance(data, SplitDict)\n self.first = Classifier(data.super)\n self.second = dict()\n for meta in data.keys():\n self.second[meta] = Classifier(data[meta])", "def __init__(self, engine: Engine, class_id: int):\n\n self.engine = engine\n\n # initialize base-class data\n self.data = engine.get_class(class_id)\n self.media = engine.get_class_media(class_id)\n assert self.data is not None\n assert self.media is not None\n self.icon = self.media[\"assets\"][0][\"value\"]\n\n self.name = self.data[\"name\"]\n self.to_serialize = {\n \"name\": self.name,\n \"slug\": self.name.lower().replace(\" \", \"_\"),\n \"icon\": self.icon,\n }\n\n # initialize specialization data\n self.specs = {}\n for spec_idx_data in self.data[\"specializations\"]:\n spec = Specialization(self.engine, spec_idx_data[\"id\"])\n self.specs[spec_idx_data[\"name\"].lower()] = spec\n\n self.to_serialize[\"specs\"] = []\n for spec in self.specs.values():\n self.to_serialize[\"specs\"].append(spec.to_serialize)", "def __init__(self):\n\n for layer in self._layer_class_map:\n setattr(self, layer, self._layer_class_map[layer]())", "def __init__(self):\n self.classes = {}", "def initialize(cls):", "def setup_class(self):\n\n class SubFLRW(FLRW):\n def w(self, z):\n return super().w(z)\n\n self.cls = SubFLRW\n # H0, Om0, Ode0\n self.cls_args = (70 * u.km / u.s / u.Mpc, 0.27 * u.one, 0.689 * u.one)\n self.cls_kwargs = dict(Tcmb0=3.0 * u.K, name=self.__class__.__name__, meta={\"a\": \"b\"})", "def initMaps(self):\r\n assert isinstance(self.CLASSES, (list, tuple))\r\n assert self.CLASSES[0] == \"__background__\"\r\n cls = self.CLASSES\r\n self.name_to_id = dict(zip(cls, range(len(cls))))\r\n self.id_to_name = dict(zip(range(len(cls)), cls))", "def setup_class(klass):", "def setup_class(klass):", "def __init__(self, class_name):\n self.class_name = class_name.lower()\n\n try:\n if _req.json is not None:\n self.parse.json = _req.json\n\n if bool(_req.form):\n self.parse.form = _req.form.to_dict(flat=False)\n\n if bool(_req.files):\n self.parse.file = _req.files.to_dict(flat=False)\n\n if bool(_req.args):\n self.parse.args = _req.args\n except AttributeError:\n pass", "def __init__(self, set_args, load_sensor_names,\n sensor_names, \n cnt_preprocessors, marker_def):\n self.__dict__.update(locals())\n del self.self\n if self.load_sensor_names == 'all':\n self.load_sensor_names = None", "def __init__(self, mappings):\r\n for key, value in mappings.iteritems():\r\n setattr(self, key, value)", "def _instantiate(clz, **data):\n\n new_obj = clz()\n setattr(new_obj, \"data\", data)\n for key, val in deepcopy(data).items():\n setattr(new_obj, key, val)\n return new_obj", "def build_class(classname, values):\n values['FIELDS'] = [x for x in values.keys()]\n return type(classname, (object,), values)", "def setup_class(cls):", "def setup_class(cls):", "def __init__(self, inst, class_type):\n\t\tself.type = str(class_type)[7:]\n\t\tself.type = self.type[:-1]\n\t\tself.inst = inst\n\t\treturn", "def initialize(cls):\n if len(cls.mapping) == 0:\n cls.mapping[\"noop\"] = cls(Transform.identity, Combiner.noop)\n cls.mapping[\"sigmoid\"] = cls(Transform.sigmoid, Combiner.multiplies)\n cls.mapping[\"log-sigmoid\"] = cls(Transform.log_sigmoid, Combiner.plus)\n\n for p in range(1, 5):\n cls.mapping[\"l{}-hinge\".format(p)] = cls(\n Transform.get_lp_hinge(p), Combiner.multiplies\n )\n cls.mapping[\"log-l{}-hinge\".format(p)] = cls(\n Transform.get_log_lp_hinge(p), Combiner.plus\n )\n\n # for backward compatibility\n cls.mapping[True] = cls.mapping[\"l3-hinge\"]\n cls.mapping[False] = cls.mapping[\"noop\"]\n cls.mapping[None] = cls.mapping[\"noop\"]", "def setUpClass(cls) -> None:\n cls.example_map: FeedlineMapCollection = get_default_map_collection()\n cls.existing_map_id: str = 'S17'\n cls.existing_feedline_nr: int = 0\n cls.not_existing_map_id: str = 'NULL'\n cls.not_existing_feedline_nr: int = -1", "def __init__(self, mappings):\n self.mappings = mappings", "def __init__(self,classes=['normalizeText','tagger','stem','stopWord','spellChecker']):\n self._support = prebotSupport()\n self._classes = classes\n if(\"tagger\" in self._classes):\n self._tagger = tagger()\n if(\"normalizeText\" in self._classes):\n self._normalize = normalizeText()\n if(\"spellChecker\" in self._classes):\n self._spellChecker = spellChecker()\n if(\"stopWord\" in self._classes):\n self._stopWord = stopWord()\n if(\"stem\" in self._classes):\n self._stem = stemming()", "def setup(self):\n\n logger.info('Setting up SimulatedMaps module.')\n\n # Save the cls as a class attribute\n self.cls = self.read_cls()\n\n logger.info('Setup done!')", "def __init__(self, name, pair_instance, dof_cls):\n self._name = name\n self._dof_cls = dof_cls\n self._pair_instance = pair_instance\n self._indexer = hoomd.data.parameterdicts._SmartTypeIndexer(2)\n self._data = {}", "def __init__(self):\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)", "def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None", "def __init__(self):\n self._map = {}", "def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"", "def prepare_data(self):", "def __init__(self, kwargs):\n\n self.postcode = kwargs[\"postcode\"]\n self.east = float(kwargs[\"east\"])\n self.north = float(kwargs[\"north\"])\n self.latitude = kwargs[\"latitude\"]\n self.longitude = kwargs[\"longitude\"]", "def __init__(self):\n\n self._datamappers = {\n '*/*': DataMapper()\n }", "def __call__(cls, inst = None, initDict = None, *args, **kwargs):\n if not inst:\n inst = blue.classes.CreateInstance(cls.__cid__)\n inst.__klass__ = cls\n if initDict:\n for k, v in initDict.iteritems():\n setattr(inst, k, v)\n\n try:\n inst.__init__()\n except AttributeError:\n pass\n\n return inst", "def __init__(self, data):\n self.jssp_instance_data = data", "def _create_Work(classname, dataclass):\n globals()[classname] = type(classname, (Work, dataclass), {})", "def __init__(self, config, set_name, preprocess_image):\n\t\t\tself.data_dir = config['data_dir']\n\t\t\tself.set_name = set_name\n\t\t\tself.coco = COCO(os.path.join(self.data_dir, 'annotations', 'instances_' + set_name + '.json'))\n\t\t\tself.image_ids = self.coco.getImgIds()\n\t\t\tself.mask = config['mask']\n\n\t\t\tself.load_classes()\n\n\t\t\tsuper(CocoGenerator, self).__from_config__(config, preprocess_image=preprocess_image)", "def __init__(self, *, base=None):\n self._base = base\n\n self._map = {}", "def __init__(self, class_names=None, sample_weight=None, digits=4, format=None):\n self.class_names = None\n self.sample_weight = sample_weight\n self.digits = digits\n self.pred = None\n self.target = None\n ClassNamesHandler.__init__(self, class_names)\n FormatHandler.__init__(self, format)", "def __init__(self, data, data_class, projection_dim=2):\n # data\n self.data = data\n self.data_class = data_class\n self.data_ninstances = data.shape[0]\n self.data_dim = data.shape[1]\n # projection\n self.projection = np.zeros((self.data_ninstances, projection_dim))\n self.projection_dim = projection_dim", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def setup_class(self):\n class SubCosmology(Cosmology):\n\n H0 = Parameter(unit=u.km / u.s / u.Mpc)\n Tcmb0 = Parameter(unit=u.K)\n\n def __init__(self, H0, Tcmb0=0*u.K, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self._H0 = H0\n self._Tcmb0 = Tcmb0\n\n self.cls = SubCosmology\n self.cls_args = (70 * (u.km / u.s / u.Mpc), 2.7 * u.K)\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})", "def __init__(self, mapping: Mapping[str, Any]) -> None:\n self.__dict__.update(mapping)", "def _reconstruct_object(deserialized_data):\n for key, value in deserialized_data.items():\n key = key.strip('__')\n if key in registry:\n # Gather the keyword arguments for class *key*\n kwargs = dict(value.items())\n return registry[key](**kwargs)", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self):\n self.map = {}", "def __init__(self, configGroups):\r\n self.config = {cls:configGroup[classes] for configGroup in configGroups for classes in configGroup for cls in IterWrapper(classes)}", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def __init__(self, *args, **kwargs):\n Base.__init__(self, *args, **kwargs)\n self.fixture_ids = [fix['id'] for fix in self.load_season_fixture().values()]\n self.fixture_dispatch_map = {'fixture_player_stats': self.fixture_player_stats,\n 'fixture_stats' : self.fixture_stats,\n 'fixture_info': self.fixture_info,}", "def __init__(self,paramDict):\n self.pandeia_params = paramDict\n self.prep_and_run()", "def _prepare(self):", "def _prepare(self):", "def initpridict(cls):\n for i in range(len(clslist)):\n instcls = clslist[i]\n prilist = cls.pristage(instcls)\n configlist = cls.getConfigStages()\n tmpdict = dict()\n for j in range(len(configlist)):\n tmpdict.update(dict({configlist[j]: prilist[j]}))\n pridict.update(dict({instcls: tmpdict}))", "def __init__(self):\n self._inst = {}", "def __init__(self, samples_per_class=10, n_classes=10, n_features=1):\n self.samples_per_class = samples_per_class\n self.n_classes = n_classes\n self.n_features = n_features\n\n # Create a dataframe to be consistent with other Datasets\n self.df = pd.DataFrame({\n 'class_id': [i % self.n_classes for i in range(len(self))]\n })\n self.df = self.df.assign(id=self.df.index.values)", "def __init__(self):\n\n self._stationId = None # shortName like HTM or SE-NOR\n self._valid = False # if stationId is set and valid, return True\n self._name = None # longName\n self._theme = None # AS | ES | OS\n self._icosclass = None # 1 | 2 | Associated\n self._siteType = None # description of site\n\n # locations\n self._lat = None # latitude\n self._lon = None # longitude\n self._eas = None # elevation above sea level\n\n # pi information\n self._firstName = None # Station PI first name\n self._lastName = None # Station PI last name\n self._email = None # Station PI email\n\n # other information\n self._country = None\n self._project = None # list, project affiliation,\n self._uri = None # list, links to ressources, landing pages\n\n # data and products\n self._datacheck = False # check if data and products have been asked for already\n self._data = None # list of associated data objects\n self._products = None # list of available products", "def load(cls, data):\n return cls(**data)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def __init__(self, db_handler_class=DBhandler, normalizer=Normalizer.NONE):\n assert isinstance(db_handler_class, type)\n\n self.train_feats, self.train_labels, self.test_feats, self.test_labels = \\\n db_handler_class(normalizer)()", "def __init__(self, data_config):\n self._brands = self._load_from_directory(data_config['targeted_brands_dir'])\n self._keywords = self._load_from_directory(data_config['keywords_dir'])\n self._fqdn_keywords = self._load_from_directory(data_config['fqdn_keywords_dir'])\n self._similarity_words = self._load_from_directory(data_config['similarity_words_dir'])\n self._tlds = self._load_from_directory(data_config['tld_dir'])", "def __init__(self, maps):\n self._maps = maps", "def from_data(cls, data):\n # Validation\n if data.get(\"_Serializable_classname\") != cls.__name__:\n return None\n del data[\"_Serializable_classname\"]\n if data.get(\"_Serializable_version\") is not None:\n del data[\"_Serializable_version\"]\n\n this = cls(None)\n this.__dict__.update(data)\n return this", "def setup_class(cls):\n tensor1 = np.array([32.0, 4096.0], np.float32)\n name1 = \"Parameter.data-1.0.0.\"\n info1 = d.TensorInfo(node_name=\"Default--data-1\",\n slot=0, iteration=0, rank_id=0, root_graph_id=0, is_output=True)\n tensor_name = [name1]\n cls.tensor_info = [info1]\n tensor_list = [tensor1]\n cls.temp_dir = build_dump_structure_with_constant(tensor_name, tensor_list, \"Cst\", cls.tensor_info)", "def __init__(self, **kwargs):\n self.club_id = kwargs[\"club_id\"]\n self.club_name = kwargs[\"club_name\"]\n\n self.contact_name = kwargs[\"contact_name\"]\n self.address_1 = kwargs[\"address_1\"]\n self.address_2 = kwargs[\"address_2\"]\n self.city = kwargs[\"city\"]\n self.state = kwargs[\"state\"]\n self.zipcode = kwargs[\"zipcode\"]\n self.phone_number = kwargs[\"phone_number\"]\n\n self.division = kwargs[\"division\"]\n self.ncca_conf = kwargs[\"ncca_conf\"]", "def from_data(cls, data):\n return object.__new__(cls)", "def _construct_instance(cls, names, values):\r\n field_dict = dict((cls._db_map.get(k, k), v) for k, v in zip(names, values))\r\n if cls._is_polymorphic:\r\n poly_key = field_dict.get(cls._polymorphic_column_name)\r\n\r\n if poly_key is None:\r\n raise PolyMorphicModelException('polymorphic key was not found in values')\r\n\r\n poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base\r\n\r\n klass = poly_base._get_model_by_polymorphic_key(poly_key)\r\n if klass is None:\r\n poly_base._discover_polymorphic_submodels()\r\n klass = poly_base._get_model_by_polymorphic_key(poly_key)\r\n if klass is None:\r\n raise PolyMorphicModelException(\r\n 'unrecognized polymorphic key {} for class {}'.format(poly_key, poly_base.__name__)\r\n )\r\n\r\n if not issubclass(klass, cls):\r\n raise PolyMorphicModelException(\r\n '{} is not a subclass of {}'.format(klass.__name__, cls.__name__)\r\n )\r\n\r\n field_dict = {k: v for k, v in field_dict.items() if k in klass._columns.keys()}\r\n\r\n else:\r\n klass = cls\r\n\r\n instance = klass(**field_dict)\r\n instance._is_persisted = True\r\n return instance", "def __init_subclass__(cls) -> None:\n super().__init_subclass__()\n dataclass(cls)", "def from_dict(self, data):\n for field in [\"first_name\", \"last_name\", \"username\", \n \"email\", \"city\", \"state\", \"active_plan\"]:\n if field in data:\n setattr(self, field, data[field])", "def __init__(self, space):\n self.mapper = {}\n self.names = []\n\n for param in space:\n # Set the order for the parameters\n if param['name'] in self.names:\n raise ValueError(\"Duplicated name {}\".format(param['name']))\n self.names.append(param['name'])\n\n # Construct a mapper for conversion of discrete and categorical\n if param[\"type\"] == TYPE.CATEGORICAL or param[\"type\"] is TYPE.DISCRETE:\n self.mapper[param['name']] = sorted(set(param['values']))", "def __init__(self, **kwargs):\n cls = self.__class__\n\n # Initialize all configurables and input arguments\n for arg in cls.configurables():\n try: # Read from class constructor\n setattr(self, arg, kwargs[arg])\n except KeyError:\n try: # Set from default value defined in class\n default_value = getattr(self, arg).kwargs[\"default\"]\n setattr(self, arg, default_value)\n except KeyError: # if nothing is provided, fallbakcs to None\n setattr(self, arg, None)\n\n self.input_arguments = None\n if cls.input_configurables():\n self.input_arguments = [\n getattr(self, arg) for arg in cls.input_configurables()\n ]\n\n self.json_config = cfg.JsonConfig(self.config)\n self.output_objects = []\n self.file = None", "def __init__(self, klass, *args, **kwargs):\n self._klass = klass(*args, **kwargs)", "def make_receptor(cls, receptor_template_class, domain_of_values_synsets):\n receptor_mixin_instance = receptor_template_class()\n receptor_mixin_instance.CANONIC_DOMAIN = domain_of_values_synsets.keys()\n receptor_mixin_instance.synsets = domain_of_values_synsets\n receptor_mixin_instance.flat_norm = ReceptorFactory.synsets_to_flat_norm_index(domain_of_values_synsets)\n return receptor_mixin_instance", "def __init__(self, preston, base_url):\n self.data = {}\n self._preston = preston\n self.base_url = base_url", "def setUpClass(cls):\n values = {'A': 'a', 'B': 'b'}\n dummy_record = MetadataRecord(**values)\n cls.records = [dummy_record]", "def setUpClass(cls):\n v = {\n \"A[2,8]\": \"VC\",\n \"H[2,8]\": \"MS\",\n \"1-4[B,G]\": [\"MLR\", \"NT\", \"1.1\", \"1.2\"],\n \"E-G[8,10]\": [\"Val1\", \"Val2\", \"Val3\"],\n }\n v1 = {\n \"A[2,8]\": \"VC1\",\n \"H[2,8]\": \"MS1\",\n \"1-4[B,G]\": [\"MLR1\", \"NT1\", \"1.3\", \"1.4\"],\n \"E-G[8,10]\": [\"Val4\", \"Val5\", \"Val6\"],\n }\n v2 = {\n \"A[2,8]\": \"Top\",\n \"H[2,8]\": \"MS\",\n \"1-4[B,G]\": [\"MLR\", \"NT\", \"1.1\", \"1.2\"],\n \"E-G[8,10]\": [\"Val1\", \"Val2\", \"Val3\"],\n }\n v3 = {\n \"A[2,8]\": \"Bot\",\n \"H[2,8]\": \"MS1\",\n \"1-4[B,G]\": [\"MLR1\", \"NT1\", \"1.3\", \"1.4\"],\n \"E-G[8,10]\": [\"Val4\", \"Val5\", \"Val6\"],\n }\n cls.plt = BioPlate(12, 8)\n cls.plt.set(v)\n cls.plt1 = BioPlate(12, 8)\n cls.plt1.set(v1)\n cls.stack = cls.plt + cls.plt1\n cls.Inserts = BioPlate(12, 8, inserts=True)\n cls.Inserts.top.set(v)\n cls.Inserts.bot.set(v3)\n cls.Inserts1 = BioPlate(12, 8, inserts=True)\n cls.Inserts1.bot.set(v1)\n cls.Inserts1.top.set(v2)\n cls.stacki = cls.Inserts + cls.Inserts1", "def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])", "def create_class(self):\n temp_class = self.temp('separated.class')\n return temp_class.format(**self.__dict__)", "def __init__(self):\r\n self.unique_classes = []\r\n self.total_classes_number = 0\r\n self.class_number_dict = {}\r\n self.unique_word_number = 0\r\n self.class_word_number_dict = {}\r\n self.class_total_words_dict = {}", "def build(cls, **kwargs):\n new_object = cls()\n fields = get_fields(cls)\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(new_object, name, value)\n \n return new_object", "def __init__(self, raw_data: Dict) -> None:\n self.raw_data = raw_data\n self.__extract_common_attrs(raw_data)\n\n # Fetch data with DATA_KEY or simply use the initial data.\n # In some cases the DATA_KEY is the same as the object attribute.\n # For example:\n # \"comments\": [{\n # \"comment_id\": 44444,\n # \"comment\": \"Hello, world!\"\n # }]\n # This object has a `comment` attribute but its DATA_KEY is also `comment`:\n # \"comment\": {\"comment_id\": 44444,\n # \"key_id\": 12345,\n # \"comment\": \"This is a test.\"}\n # This is an edge case happening only twice, so to overcome it\n # just check the value type under the given key.\n if self.DATA_KEY in raw_data and \\\n (isinstance(raw_data[self.DATA_KEY], dict)):\n data = raw_data[self.DATA_KEY]\n else:\n data = raw_data\n\n for attr in self.ATTRS:\n setattr(self, attr, data.get(attr, None))", "def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def class_to_db(self):", "def __init__(self, dataset, device, tokenizer=None, max_support_size=128, subset_classes=True, temp_map=True):\n\n self.data = dataset\n self.device = device\n self.classes = list(self.data.keys())\n self.tokenizer = tokenizer\n\n self.subset_classes = subset_classes\n self.max_support_size = max_support_size\n self.temp_map = temp_map", "def __init__(self):\n\n data_extract=DataExtracter()\n self.data = tuple()", "def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()", "def __init__(self):\n self.map = [None] * 103", "def prepare(self, time_range=None):\n if self.ref == None:\n self.load()\n\n ########DO PREPARING###########", "def __init__(self, factory, *args, **kwargs):\n super(HelperDict, self).__init__(*args, **kwargs)\n self.factory = factory", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def __init__(self, data_dir, split, transform=None, target_transform=None, keep_difficult=False):\n self.data_dir = data_dir\n self.split = split\n self.transform = transform\n self.target_transform = target_transform\n image_sets_file = os.path.join(self.data_dir, \"ImageSets\", \"Main\", \"%s.txt\" % self.split)\n self.ids = VOCDataset._read_image_ids(image_sets_file)\n self.keep_difficult = keep_difficult\n\n self.class_dict = {class_name: i for i, class_name in enumerate(self.class_names)}", "def __init__(self):\n self.model = self._get_model()\n\n # NOTE: The order of this list hardcoded here, and needs to be changed when re-training the model!\n # When exporting the model in tflite format, the model_spec is lost, so we cannot do it like that:\n # classes = ['???'] * model.model_spec.config.num_classes\n # label_map = model.model_spec.config.label_map\n # for label_id, label_name in label_map.as_dict().items():\n # classes[label_id-1] = label_name\n self.classes = ['Baked Goods', 'Salad', 'Cheese', 'Seafood', 'Tomato']", "def from_dict(data_class: Type[T], data: Data, config: Optional[Config] = None) -> T:\n init_values: MutableMapping[str, Any] = {}\n post_init_values: MutableMapping[str, Any] = {}\n config = config or Config()\n try:\n data_class_hints = cache(get_type_hints)(data_class, localns=config.hashable_forward_references)\n except NameError as error:\n raise ForwardReferenceError(str(error))\n data_class_fields = cache(get_fields)(data_class)\n if config.strict:\n extra_fields = set(data.keys()) - {f.name for f in data_class_fields}\n if extra_fields:\n raise UnexpectedDataError(keys=extra_fields)\n for field in data_class_fields:\n field_type = data_class_hints[field.name]\n if field.name in data:\n try:\n field_data = data[field.name]\n value = _build_value(type_=field_type, data=field_data, config=config)\n except DaciteFieldError as error:\n error.update_path(field.name)\n raise\n if config.check_types and not is_instance(value, field_type):\n raise WrongTypeError(field_path=field.name, field_type=field_type, value=value)\n else:\n try:\n value = get_default_value_for_field(field, field_type)\n except DefaultValueNotFoundError:\n if not field.init:\n continue\n raise MissingValueError(field.name)\n if field.init:\n init_values[field.name] = value\n elif not is_frozen(data_class):\n post_init_values[field.name] = value\n instance = data_class(**init_values)\n for key, value in post_init_values.items():\n setattr(instance, key, value)\n return instance", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def _get_constructor_parameters(self) -> Dict[str, Any]:\n return dict(\n obs_space=self.obs_space,\n action_space=self.action_space,\n scale_imgs=self.scale_imgs,\n )", "def __init__(self, map):\n\n self.map = map\n\n self.ip = None\n self.name = None\n self.expiry = None\n self.expires = None\n self.created = None", "def _from_dict(self, data=None):\n for key in self.shopkeys:\n setattr(self, key, getattr(data, key))" ]
[ "0.70301276", "0.7003586", "0.6602486", "0.6218813", "0.61369634", "0.612211", "0.5974682", "0.5971825", "0.59364706", "0.59063816", "0.5905724", "0.5905724", "0.5896591", "0.58913803", "0.58463514", "0.5846305", "0.58299506", "0.57877094", "0.57877094", "0.5729577", "0.5678805", "0.567838", "0.56561744", "0.5649519", "0.5647338", "0.5646633", "0.5644483", "0.56113327", "0.56069493", "0.56000304", "0.55648565", "0.5559308", "0.5559014", "0.5551938", "0.5546657", "0.55411506", "0.55388635", "0.5535637", "0.5530203", "0.55300367", "0.5526318", "0.55224633", "0.55191433", "0.5517039", "0.551335", "0.54895246", "0.54810137", "0.5472936", "0.54725665", "0.54618615", "0.5459998", "0.5459998", "0.5456459", "0.5440151", "0.54316735", "0.54238296", "0.54224336", "0.5420791", "0.5420791", "0.5420791", "0.54168755", "0.5412648", "0.5410277", "0.5408634", "0.54075956", "0.5407035", "0.5403523", "0.5403483", "0.5401794", "0.53993756", "0.53967303", "0.5386533", "0.5376658", "0.5375714", "0.5371268", "0.5370676", "0.5370457", "0.53699857", "0.5369859", "0.53690165", "0.5365393", "0.5364539", "0.5364071", "0.5363748", "0.5363132", "0.5358209", "0.5354872", "0.53545076", "0.53531426", "0.5349313", "0.53465074", "0.53461385", "0.53446513", "0.53362066", "0.53353196", "0.53297013", "0.53290415", "0.5322215", "0.53145045", "0.5310175" ]
0.6651037
2
Evaluates the model based on classsification accuracy. Receives the logits that are output from the network and saves the result in the given output directory file.
def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32): sampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size) print("*** Evaluating ***") eval_loss = 0.0 num_steps = 0 preds = None out_label_ids = None for i, batch in enumerate(dataloader): if i % 200 == 199: print("=", end="") if i % 5000 == 4999: print("[Step " + str(i+1) + " / " + str(len(dataloader)) + "] " ) model.eval() batch = tuple(t.to(device) for t in batch) with torch.no_grad(): labels = batch[3] outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() num_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / num_steps preds_label = np.argmax(preds, axis=1) accuracy = (preds_label == out_label_ids).mean() output_dir = os.path.dirname(output_test_file) if not os.path.exists(output_dir): os.makedirs(output_dir) with open(output_test_file, "w") as writer: all_logits = preds.tolist() for i, logit in enumerate(all_logits): line = '<CODESPLIT>'.join( [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]]) writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\n') print("Accuracy =", str(accuracy)) return accuracy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_model(self, model, testX_norm, testY_bin, batch_size, label_names, n_epochs, output_filename):\n # Predictions\n predictions = model.predict(testX_norm, batch_size=batch_size)\n \n # Classification report\n classification = classification_report(testY_bin.argmax(axis=1),\n predictions.argmax(axis=1),\n target_names=label_names)\n \n # Print classification report\n print(classification)\n \n # Save classification report\n output_path = os.path.join(\"..\", \"output\", output_filename)\n with open(output_path, 'w', encoding='utf-8') as f:\n f.write(f\"Below are the classification metrics for the trained model. Batch size = {batch_size} and number of epochs = {n_epochs}.\\n\\n {classification}\")", "def model_analysis(self, model_name: str, history) -> None:\n # probabilites\n y_pred_prob = self.recognizer.predict(self.X_test)\n # most likely class\n y_pred = np.argmax(y_pred_prob, axis=1)\n # compare true and predicted classes on test set\n\n # path handling for writing to file\n output_dir = Path(os.environ[\"MODEL_DATA\"]) / model_name\n out_name = \"classification_report.txt\"\n out_path = output_dir / out_name\n\n acc = history.history[\"accuracy\"]\n val_acc = history.history[\"val_accuracy\"]\n loss = history.history[\"loss\"]\n val_loss = history.history[\"val_loss\"]\n\n epochs = range(1, len(acc) + 1)\n\n # plot accuracies and losses with respect to epochs\n plt.plot(epochs, acc, \"r\", label=\"Train accuracy\")\n plt.plot(epochs, val_acc, \"b\", label=\"Val accuracy\")\n\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n\n plt.savefig(output_dir / \"acc-plot\")\n\n plt.figure()\n plt.plot(epochs, loss, \"r\", label=\"Training loss\")\n plt.plot(epochs, val_loss, \"b\", label=\"Val loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend()\n\n plt.savefig(output_dir / \"loss-plot\")\n\n # create, print and write to file a sklearn classification report\n print(set(self.y_test) - set(y_pred))\n report = classification_report(self.y_test, y_pred)\n print(report)\n with open(out_path, \"w\") as f:\n f.write(report)\n\n self.make_heatmap(y_pred, output_dir)", "def train_and_evaluate(self) -> None:\n with tf.Session() as self.sess:\n # Initialize computation graph.\n self.create_model()\n\n # Initialize variables.\n tf.global_variables_initializer().run()\n\n # Initialize summary writer.\n self.writer = tf.summary.FileWriter(logdir='conv_vis')\n\n for epoch_no in range(self.nb_epochs):\n # Train model on next batch\n batch_x, batch_y = self.mnist.train.next_batch(self.mb_size)\n results = self.train_on_batch(batch_x, batch_y, global_step=epoch_no)\n\n if epoch_no > 0 and epoch_no % self.lr_decay_time == 0:\n # Test on all samples.\n self.test_on_all()\n # Perform learning rate decay.\n self.learning_rate /= 2\n if epoch_no % 100 == 0:\n self.logger.info(\"Epoch {0}: Loss: {1[0]}, accuracy: {1[1]}\".format(epoch_no, results))\n batch_x_t, batch_y_t = self.mnist.test.next_batch(self.mb_size)\n test_results = self.test_on_batch(batch_x_t, batch_y_t)\n self.logger.info(\"(Test(batch): Loss: {0[0]}, accuracy: {0[1]}\".format(test_results))\n self.test_on_all()\n\n # Save the trained model with all valuable variables.\n saver = tf.train.Saver()\n saver.save(sess=self.sess, save_path='./saved_model', global_step=epoch_no)", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate(probs, y_test, output_folder, file_prefix='test', model_names=None):\n colours = ['b', 'g', 'm', 'c', 'y', 'r', 'k']\n\n if not os.path.isdir(output_folder):\n os.makedirs(output_folder)\n test_log = open(output_folder + '/' + file_prefix + '.log', 'w+')\n\n fprs, tprs, aucs = [], [], []\n for prob, model_name in zip(probs, model_names):\n test_log.write(model_name + \"\\n\\n\")\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n fpr, tpr, thr = roc_curve(y_test, prob[:, 1])\n ## find best threshold : http://www.medicalbiostatistics.com/roccurve.pdf\n dist = np.sqrt((1. - tpr) ** 2 + (fpr) ** 2)\n best_thr = thr[np.argmin(dist)]\n best_thr_pred = (prob[:,1] > best_thr) * 1\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"F1 score (thrs : {:.3f}) : \".format(best_thr) + str(f1_score(y_test, best_thr_pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n if len(probs) > 1:\n model_names.extend(['mean', 'geom_mean'])\n test_log.write(\"Ensemble (mean)\\n\\n\")\n prob = (np.array(probs).sum(axis=0) / 2)\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n fpr, tpr, _ = roc_curve(y_test, prob[:, 1])\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n test_log.write(\"Ensemble (geom. mean)\\n\\n\")\n prob = (np.array(probs).prod(axis=0) / np.array(probs).prod(axis=0).sum(axis=1)[:, np.newaxis])\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n fpr, tpr, _ = roc_curve(y_test, prob[:, 1])\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n #plt.figure(figsize=(15, 15))\n for fpr, tpr, roc_auc, col, name in zip(fprs, tprs, aucs, colours, model_names):\n plt.plot(fpr, tpr, col, label='[%s] AUC = %0.5f' % (name, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(output_folder + '/' + file_prefix + '_auc.png')\n plt.close()\n\n test_log.close()", "def run_classification_models(train,test,metric_file_path,classes):\n metric_names = ['accuracy','weightedRecall','weightedPrecision']\n f = open(metric_file_path,'w')\n f.write('model,'+','.join(metric_names)+'\\n')\n name = 'Logistic Regression'\n model = LogisticRegression()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.regParam,[0,.25,.5]) \\\n .addGrid(model.elasticNetParam,[0,.25,.5])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel\n print name\n print '\\t Best regParam (lambda): %.2f'%best_model._java_obj.getRegParam()\n print '\\t Best elasticNetparam (alpha): %.2f'%best_model._java_obj.getElasticNetParam()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Decision Tree'\n model = DecisionTreeClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.maxBins,[8,16,32])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best maxBins: %d'%best_model._java_obj.getMaxBins()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Random Forest'\n model = RandomForestClassifier(seed=7)\n param_grid = ParamGridBuilder()\\\n .addGrid(model.maxDepth,[5,10,15]) \\\n .addGrid(model.numTrees,[10,15,20])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best maxDepth: %d'%best_model._java_obj.getMaxDepth()\n print '\\t Best numTrees: %d'%best_model._java_obj.getNumTrees()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n name = 'One vs Rest'\n model = OneVsRest(classifier=LogisticRegression()).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Naive Bayes'\n model = NaiveBayes()\n param_grid = ParamGridBuilder()\\\n .addGrid(model.smoothing,[.5,1,2])\\\n .build()\n model_cv = CrossValidator(\n estimator = model,\n estimatorParamMaps = param_grid,\n evaluator = MulticlassClassificationEvaluator(),\n numFolds = 3,\n seed = 7).fit(train)\n best_model = model_cv.bestModel \n print name\n print '\\t Best smoothing: %.1f'%best_model._java_obj.getSmoothing()\n eval_model(f,name,model_cv,test,MulticlassClassificationEvaluator,metric_names)\n if classes == 2:\n name = 'Gradient Boosted Trees'\n model = GBTClassifier(seed=7).fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names)\n name = 'Linear Support Vector Machine'\n model = LinearSVC().fit(train)\n print name\n eval_model(f,name,model,test,MulticlassClassificationEvaluator,metric_names) \n f.close()", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def test_classifier(self):\n \n files = 0\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n \n for testFile in os.listdir(self.testing_dir):\n if os.path.splitext(testFile)[1] in self.valid:\n\n files += 1\n fileName = self.testing_dir + \"/\" + testFile\n\n img = cv2.imread(fileName).astype(np.float32)\n self.Helpers.logger.info(\"Loaded test image \" + fileName)\n \n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n prediction = self.get_predictions(img)\n \n msg = \"\"\n if prediction == 1 and \"_1.\" in testFile:\n tp += 1\n msg = \"Acute Lymphoblastic Leukemia correctly detected (True Positive)\"\n elif prediction == 1 and \"_0.\" in testFile:\n fp += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly detected (False Positive)\"\n elif prediction == 0 and \"_0.\" in testFile:\n tn += 1\n msg = \"Acute Lymphoblastic Leukemia correctly not detected (True Negative)\"\n elif prediction == 0 and \"_1.\" in testFile:\n fn += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly not detected (False Negative)\"\n self.Helpers.logger.info(msg)\n \n self.Helpers.logger.info(\"Images Classifier: \" + str(files))\n self.Helpers.logger.info(\"True Positives: \" + str(tp))\n self.Helpers.logger.info(\"False Positives: \" + str(fp))\n self.Helpers.logger.info(\"True Negatives: \" + str(tn))\n self.Helpers.logger.info(\"False Negatives: \" + str(fn))", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred_grid = model.predict(X_test)\n print(\n classification_report(Y_test.values, y_pred_grid, target_names=category_names)\n )", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n Y_pred = pd.DataFrame(Y_pred, columns=category_names)\n \n # calculate summary stats on test data\n results = pd.DataFrame()\n for column_name in Y_pred.columns:\n col_report = classification_report(y_true=Y_test[[column_name]], y_pred=Y_pred[[column_name]], output_dict=True)\n accuracy = col_report['accuracy']\n precision = col_report['macro avg']['precision']\n recall = col_report['macro avg']['recall']\n results[column_name] = [accuracy, precision, recall]\n results.index = ['accuracy', 'precision', 'recall']\n results.mean(axis=1) \n \n # save results to local csv file\n model_name = type(model.best_params_['clf']).__name__\n avg_accuracy = results.mean(axis=1)['accuracy']\n avg_precision = results.mean(axis=1)['precision']\n avg_recall = results.mean(axis=1)['recall']\n params = model.best_params_\n stored_results = pd.DataFrame({'Model': [model_name], 'Accuracy': [avg_accuracy], 'Precision': [avg_precision], \n 'Recall': [avg_recall], 'Parameters': [params]})\n\n add_header = not os.path.isfile('models/model_results.csv')\n with open('models/model_results.csv', 'a') as f:\n stored_results.to_csv(f, header=add_header, index=False)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=Y_test.keys()))", "def evaluate_model(model, X_test, Y_test, category_names):\n \n Y_pred = model.predict(X_test)\n \n print(classification_report(Y_test.values, Y_pred, target_names=category_names))", "def test_evaluate(self):\n tf.reset_default_graph()\n if os.path.isdir(self.training_dir):\n shutil.rmtree(self.training_dir)\n\n nas_trainer = DefaultNASTrainer(\n encoded_network=self.net_nsc,\n input_shape=infer_data_shape(self.train_data),\n n_classes=infer_n_classes(self.train_labels),\n batch_size=self.batch_size,\n log_path=self.training_dir,\n variable_scope=\"cnn\"\n )\n\n nas_trainer.train(\n train_data=self.train_data,\n train_labels=self.train_labels,\n train_input_fn=\"default\"\n )\n\n res = nas_trainer.evaluate(\n eval_data=self.eval_data,\n eval_labels=self.eval_labels,\n eval_input_fn=\"default\"\n )\n\n self.assertTrue(os.path.isdir(self.training_dir))\n self.assertTrue(\"accuracy\" in list(res.keys()))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=category_names))\n pass", "def evaluate_model(model, X_test, Y_test, category_names):\n\n logging.info(\"run evaluate_model\")\n\n # find current foler path for savings\n folder_path = os.path.dirname(__file__)\n\n # predict outputs on test data\n Y_pred = model.predict(X_test)\n\n # create classification report with precision, recall, and F1 score for each categories\n clf_report_df = pd.DataFrame(classification_report(Y_test, Y_pred,\n target_names=category_names, output_dict=True)).T\n clf_report_df.to_markdown(buf=os.path.join(folder_path,'test','classification_report.md'), mode='w')\n\n # calculate confusion matrix for each categories and save corresponding heatmap plots\n conf_matrix_df = multilabel_confusion_matrix(Y_test, Y_pred)\n plot_confusion_matrix(conf_matrix_df, category_names,\n os.path.join(folder_path,'test','confusion_matrix.png'))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n y_actu = Y_test.values\n\n results_dict = {}\n for i in range(1, 37):\n predicted = \"pred_\" + str(i)\n actual = \"actu_\" + str(i)\n pred_values = []\n actu_values = []\n for ii in range(len(y_pred)):\n\n pred_values.append(int(y_pred[ii][i-1]))\n actu_values.append(int(y_actu[ii][i-1]))\n\n results_dict[predicted] = pred_values\n results_dict[actual] = actu_values\n\n for i in range(1, 37):\n pred = results_dict['pred_' + str(i)]\n actu = results_dict['actu_' + str(i)]\n\n print(\"\\n### \" + category_names[i-1] + \" ###\\n\")\n print(classification_report(pred, actu))", "def evaluate(model, datagen, X_test, Y_test, batch_size, save_folder_path=None):\n\n print(\"[INFO] Evaluating model...\")\n\n scores = model.evaluate_generator(\n datagen.flow(X_test, Y_test, batch_size=batch_size),\n verbose=1)\n \n print(\"[INFO] Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n \n if save_folder_path is not None:\n # Write results to path\n assert os.path.isdir(save_folder_path) == True, \"Unable to save evaluation results, save_folder_path is not a folder\"\n eval_results_path = save_folder_path + \"/eval_results.txt\"\n eval_handle = open(eval_results_path, 'w')\n eval_handle.write(\"Model name: {}\\n\\n\".format(MODEL_NAME))\n eval_handle.write(\"Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n eval_handle.close()", "def evaluate_model(model, X_test, y_test, category_names):\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate(self, model, y_test, y_pred, softmax_predictions,\n config_dict, y_paths):\n print('Model metrics: ', model.metrics_names)\n assert(y_test.shape == y_pred.shape)\n\n if len(y_pred.shape) > 2: # If sequential data\n y_pred, paths = get_majority_vote_3d(y_pred, y_paths)\n softmax_predictions, _ = get_majority_vote_3d(softmax_predictions, y_paths)\n y_test, _ = get_majority_vote_3d(y_test, y_paths)\n # self.look_at_classifications(y_test, y_pred, paths, softmax_predictions)\n nb_preds = len(y_pred)\n nb_tests = len(y_test)\n\n if nb_preds != nb_tests:\n print(\"Warning, number of predictions not the same as the length of the y_test vector.\")\n print(\"Y test length: \", nb_tests)\n print(\"Y pred length: \", nb_preds)\n if nb_preds < nb_tests:\n y_test = y_test[:nb_preds]\n else:\n y_pred = y_pred[:nb_tests]\n\n # Print labels and predictions.\n print('y_test.shape: ', y_test.shape)\n print('y_pred.shape: ', y_pred.shape)\n # print('y_test and y_test.shape: ', y_test, y_test.shape)\n # print('y_pred and y_pred.shape: ', y_pred, y_pred.shape)\n\n self.print_and_save_evaluations(y_test, y_pred, softmax_predictions, config_dict)", "def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()", "def score(self, experiment_path: str, result_file: str, **kwargs):\n # Update config parameters with new kwargs\n\n config = torch.load(glob.glob(\n \"{}/run_config*\".format(experiment_path))[0],\n map_location=lambda storage, loc: storage)\n config_parameters = dict(config, **kwargs)\n model = torch.load(glob.glob(\n \"{}/run_model*\".format(experiment_path))[0],\n map_location=lambda storage, loc: storage)\n encoder = torch.load(glob.glob(\n '{}/run_encoder*'.format(experiment_path))[0],\n map_location=lambda storage, loc: storage)\n testlabel = config_parameters['testlabel']\n testdata = config_parameters['testdata']\n # Only a single item to evaluate\n if isinstance(testlabel, list) and len(testlabel) == 1:\n testlabel = testlabel[0]\n if isinstance(testdata, list) and len(testdata) == 1:\n testdata = testdata[0]\n\n labels_df = pd.read_csv(testlabel, sep=' ')\n labels_df['encoded'], encoder = utils.encode_labels(\n labels=labels_df['bintype'], encoder=encoder)\n config_parameters.setdefault('colname', ('filename', 'encoded'))\n dataloader = dataset.getdataloader(\n data_frame=labels_df,\n data_file=testdata,\n num_workers=4,\n batch_size=1, # do not apply any padding\n colname=config_parameters[\n 'colname'] # For other datasets with different key names\n )\n model = model.to(DEVICE).eval()\n genuine_label_idx = encoder.transform(['genuine'])[0]\n\n with torch.no_grad(), open(result_file,\n 'w') as wp, tqdm(total=len(dataloader),\n unit='utts') as pbar:\n datawriter = csv.writer(wp, delimiter=' ')\n datawriter.writerow(['filename', 'score'])\n for batch in dataloader:\n inputs, _, filenames = batch\n inputs = inputs.float().to(DEVICE)\n preds = model(inputs)\n for pred, filename in zip(preds, filenames):\n # Single batchsize\n datawriter.writerow([filename, pred[0].item()])\n pbar.update()\n print(\"Score file can be found at {}\".format(result_file))", "def classification_evaluation(self, test_set, predicted_values, certainty):\r\n\r\n percent_accuracy = self.percent_accuracy(test_set, predicted_values)\r\n one_zero = self.one_zero_loss(test_set, predicted_values)\r\n log_loss = self.log_loss(test_set, predicted_values, certainty)\r\n print(f\"Percent correct:\\t{percent_accuracy * 100:.2f}%\")\r\n print(f\"1/0 Loss:\\t\\t\\t{one_zero:.2f}\")\r\n print(\"Log Loss: \", log_loss)", "def evaluate(self, X, y):\n\n inputs = np.concatenate((X,-np.ones((np.shape(X)[0],1))),axis=1)\n outputs = self.forwardPass(inputs)\n nclasses = np.shape(y)[1]\n\n # 1-of-N encoding\n outputs = np.argmax(outputs,1)\n targets = np.argmax(y,1)\n\n cm = np.zeros((nclasses,nclasses))\n for i in range(nclasses):\n for j in range(nclasses):\n cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))\n\n print(\"The confusion matrix is:\")\n print(cm)\n self.accuracy = np.trace(cm)/np.sum(cm)*100\n print(\"The accuracy is \",np.trace(cm)/np.sum(cm)*100)", "def train_and_evaluate(OUTPUT_DIR,do_train = True,do_eval=True):\n\n\t\n\tBATCH_SIZE = 32\n\tLEARNING_RATE = 2e-5\n\tNUM_TRAIN_EPOCHS = 5.0\n\n\t#in this steps lr will be low and training will be slow\n\tWARMUP_PROPORTION = 0.1\n\n\n\n\tif os.path.exists(OUTPUT_DIR) and os.listdir(OUTPUT_DIR) and do_train:\n\t\traise ValueError(\"Output directory ({}) already exists and is not empty.\".format(OUTPUT_DIR))\n\tif not os.path.exists(OUTPUT_DIR):\n\t\tos.makedirs(OUTPUT_DIR)\n\t\t\n\t#create train and test data\n\n\ttrain_sents,train_labels,test_sents,test_labels = create_train_test(\"ADE/DRUG-AE.rel\",\"ADE/negative_data_AE.rel\")\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\tif do_train:\n\n\t\ttrain_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(train_sents, train_labels)]\n\t\tnum_train_examples = len(train_examples)\n\n\t\tnum_train_steps = int(math.ceil(num_train_examples / BATCH_SIZE * NUM_TRAIN_EPOCHS))\n\t\tnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\n\t\tmodel = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\",num_labels = num_labels)\n\t\tmodel.to(device)\n\n\t\tparam_optimizer = list(model.named_parameters())\n\t\tno_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\t\toptimizer_grouped_parameters = [\n\t\t\t{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n\t\t\t{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n\t\t\t]\n\n\t\toptimizer = BertAdam(optimizer_grouped_parameters,lr=LEARNING_RATE,warmup=WARMUP_PROPORTION,t_total=num_train_steps)\n\n\t\tglobal_step = 0\n\t\tnb_tr_steps = 0\n\t\ttr_loss = 0\n\n\t\ttrain_features = convert_examples_to_features(\n\t\t\ttrain_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\n\t\tlogger.info(\"***** Running training *****\")\n\t\tlogger.info(\" Num examples = %d\", num_train_examples)\n\t\tlogger.info(\" Batch size = %d\", BATCH_SIZE)\n\t\tlogger.info(\" Num steps = %d\", num_train_steps)\n\n\n\t\tall_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n\n\t\ttrain_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\t\ttrain_sampler = RandomSampler(train_data)\n\n\t\ttrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n\n\t\tmodel.train()\n\t\t# for name, param in model.named_parameters():\n\t\t# if param.requires_grad:\n\t\t# print(name)\n\t\t# return\n\t\tfor _ in trange(int(NUM_TRAIN_EPOCHS), desc=\"Epoch\"):\n\t\t\ttr_loss = 0\n\t\t\tnb_tr_examples, nb_tr_steps = 0, 0\n\t\t\tfor step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n\t\t\t\tbatch = tuple(t.to(device) for t in batch)\n\t\t\t\tinput_ids, input_mask, segment_ids, label_id = batch\n\t\t\t\tloss = model(input_ids, segment_ids, input_mask, label_id)\n\t\t\t\tloss.backward()\n\n\t\t\t\ttr_loss += loss.item()\n\t\t\t\tnb_tr_examples += input_ids.size(0)\n\t\t\t\tnb_tr_steps += 1\n\t\t\t\toptimizer.step()\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\tglobal_step += 1\n\t\t\tprint(tr_loss)\n\n\t\t# Save a trained model and the associated configuration\n\t\tmodel_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\ttorch.save(model_to_save.state_dict(), output_model_file)\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\twith open(output_config_file, 'w') as f:\n\t\t\tf.write(model_to_save.config.to_json_string())\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)} \n\t\tmodel_config = {\"bert_model\":\"bert-base-uncased\",\"do_lower\":True,\"max_seq_length\":MAX_SEQ_LENGTH,\"num_labels\":num_labels,\"label_map\":label_map}\n\t\tjson.dump(model_config,open(os.path.join(OUTPUT_DIR,\"model_config.json\"),\"w\"))\n\n\telse:\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\tconfig = BertConfig(output_config_file)\n\t\tmodel = BertForSequenceClassification(config, num_labels=num_labels)\n\t\tmodel.load_state_dict(torch.load(output_model_file))\n\n\tmodel.to(device)\n\n\tif do_eval:\n\n\t\tEVAL_BATCH_SIZE = 32\n\n\t\teval_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(test_sents, test_labels)]\n\t\tnum_eval_examples = len(eval_examples)\n\n\t\teval_features = convert_examples_to_features(\n\t\t\teval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\t\tlogger.info(\"***** Running evaluation *****\")\n\t\tlogger.info(\" Num examples = %d\", num_eval_examples)\n\t\tlogger.info(\" Batch size = %d\", EVAL_BATCH_SIZE)\n\t\tall_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\t\teval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) \n\t\t# # Run prediction for full data\n\t\teval_sampler = SequentialSampler(eval_data)\n\t\teval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE)\n\t\tmodel.eval()\n\n\t\teval_loss, eval_accuracy = 0, 0\n\t\tnb_eval_steps, nb_eval_examples = 0, 0\n\t\ty_true = []\n\t\ty_pred = []\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)}\n\t\tfor input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n\t\t\tinput_ids = input_ids.to(device)\n\t\t\tinput_mask = input_mask.to(device)\n\t\t\tsegment_ids = segment_ids.to(device)\n\t\t\tlabel_ids = label_ids.to(device)\n\n\t\t\twith torch.no_grad():\n\t\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\t\t\t\t\n\t\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\tlabel_ids = label_ids.to('cpu').numpy()\n\t\t\ty_pred.extend(logits)\n\t\t\ty_true.extend(label_ids)\n\t\tprint(len(y_pred))\n\t\tprint(len(y_true))\n\t\treport = classification_report(y_true, y_pred)\n\t\toutput_eval_file = os.path.join(OUTPUT_DIR, \"eval_results.txt\")\n\t\twith open(output_eval_file, \"w\") as writer:\n\t\t\tlogger.info(\"***** Eval results *****\")\n\t\t\tlogger.info(\"\\n%s\", report)\n\t\t\twriter.write(report)", "def class_accuracy(model, dataset, filename, batchsize=2):\n labels = ['yes','no','up','down','left','right','on','off','stop','go','unknown','silence']\n class_correct = list(0. for i in range(12))\n class_total = list(0. for i in range(12))\n model.eval()\n dataloader = DataLoader(dataset, batch_size = batchsize, drop_last = False)\n with torch.no_grad():\n for i_batch, batch in enumerate(dataloader):\n outputs = model(batch['audio'])\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == batch['label'].to(DEVICE)).squeeze()\n\n for i in range(batchsize):\n label = batch['label'][i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n with open(filename, 'w') as myFile:\n for i in range(12): \n myFile.write('Accuracy of %5s : %2d %%' % (\n labels[i], 100 * class_correct[i] / class_total[i])+'\\n')\n model.train()", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def evaluate_model(self, predictions, expected, bypass_data_to_eval):\n coco_api = COCO(annotation_file=self._args.annotation_path)\n image_ids = coco_api.getImgIds()\n\n coco_detections = []\n for i, image_id in enumerate(image_ids):\n coco_img = coco_api.imgs[image_id]\n image_width = coco_img['width']\n image_height = coco_img['height']\n\n for j in range(int(predictions['num_detections'][i])):\n bbox = predictions['boxes'][i][j]\n y1, x1, y2, x2 = list(bbox)\n bbox_coco_fmt = [\n x1 * image_width, # x0\n y1 * image_height, # x1\n (x2-x1) * image_width, # width\n (y2-y1) * image_height, # height\n ]\n coco_detection = {\n 'image_id': image_id,\n 'category_id': int(predictions['classes'][i][j]),\n 'bbox': [int(coord) for coord in bbox_coco_fmt],\n 'score': float(predictions['scores'][i][j])\n }\n coco_detections.append(coco_detection)\n\n # write coco detections to file\n tmp_dir = \"/tmp/tmp_detection_results\"\n\n try:\n shutil.rmtree(tmp_dir)\n except FileNotFoundError:\n pass\n\n os.makedirs(tmp_dir)\n\n coco_detections_path = os.path.join(tmp_dir, 'coco_detections.json')\n with open(coco_detections_path, 'w') as f:\n json.dump(coco_detections, f)\n\n cocoDt = coco_api.loadRes(coco_detections_path)\n\n shutil.rmtree(tmp_dir)\n\n # compute coco metrics\n eval = COCOeval(coco_api, cocoDt, 'bbox')\n eval.params.imgIds = image_ids\n\n eval.evaluate()\n eval.accumulate()\n eval.summarize()\n\n return eval.stats[0] * 100, \"mAP %\"", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n for x in range(0, len(category_names)):\n print(category_names[x])\n print(classification_report(Y_test[:,x], y_pred[:,x]))\n print(\"Accuracy: \" + str(accuracy_score(Y_test[:, x], y_pred[:, x])))", "def evaluate_model(model, X_test, Y_test, category_names):\n\n # Predict labels using model\n Y_pred = model.predict(X_test)\n\n # Generate accuracy report\n report = pd.DataFrame.from_dict(classification_report(Y_test, Y_pred,\n target_names=category_names, output_dict=True))\n report = pd.DataFrame.transpose(report)\n\n print(report)", "def _evaluate(self,\n logits,\n predictions,\n data, evaluation_fn,\n max_eval_batches=None,\n calculate_scores=True,\n write_results=False):\n # counting the evaluation batches\n num_eval_batches = 0\n # logits and predictions from the model\n all_logits = []\n all_predictions = []\n # fetched data that led to the predictions\n # dictionary of {seq_1: [], seq_2: [], target: []}\n all_fetched_data = collections.defaultdict(list)\n try:\n while True:\n # sample predictions\n (fetched_logits,\n fetched_predictions,\n fetched_data) = self._fetch_data_batch(\n logits=logits, predictions=predictions, data=data)\n\n # Cache the data\n all_logits += fetched_logits\n all_predictions += fetched_predictions\n all_fetched_data[\"target\"] += fetched_data[\"target\"]\n\n # break the loop if max_eval_batches is set\n num_eval_batches += 1\n if (max_eval_batches and\n num_eval_batches >= max_eval_batches):\n break\n\n except tf.errors.OutOfRangeError:\n pass\n\n # Evaluate\n scores = None\n if calculate_scores:\n scores = evaluation_fn(\n all_predictions,\n all_fetched_data[\"seq_1\"], # Should be empty\n all_fetched_data[\"seq_2\"], # Should be empty\n all_fetched_data[\"target\"])\n\n if write_results:\n _write_results_to_csv(\n all_logits,\n all_predictions,\n all_fetched_data,\n output_dir=os.path.join(\n self._logdir, RESULTS_CSV_FNAME))\n\n return len(all_predictions), scores", "def evaluate(epoch_number):\r\n model.eval() # turn on the eval() switch to disable dropout\r\n total_loss = 0\r\n total_correct = 0\r\n total_spl = 0\r\n total_xrl = 0\r\n total_Xrl = 0\r\n total_Yrl = 0\r\n total_cl = 0\r\n total_ol = 0\r\n Ysave = []\r\n for batch, i in enumerate(range(0, len(data_val), args.batch_size)):\r\n data, targets, lenth = package(data_val[i:min(len(data_val), i+args.batch_size)], volatile=True)\r\n if args.cuda:\r\n data = data.cuda()\r\n targets = targets.cuda()\r\n hidden = model.init_hidden(data.size(1))\r\n x, y, x_re, X, Y, Y_fromX, X_fromY, pred, outp, outp_fromY = model.forward(data, hidden,lenth, \"eval\",epoch_number)\r\n Ysave.append( (Y.cpu(), pred.cpu(), targets.cpu()) )\r\n output_flat = pred.view(data.size(1), -1)\r\n loss, sparse_loss, x_re_loss, X_re_loss, Y_re_loss, class_loss, outp_loss= \\\r\n criterion(x, y, x_re, X, Y, Y_fromX, X_fromY, pred, targets, data.size(1), outp, outp_fromY, lenth, epoch_number)\r\n total_loss += loss.data\r\n total_spl += sparse_loss.data\r\n total_xrl += x_re_loss.data\r\n total_Xrl += X_re_loss.data\r\n total_Yrl += Y_re_loss.data\r\n total_cl += class_loss.data\r\n total_ol += outp_loss.data\r\n\r\n prediction = torch.max(output_flat, 1)[1]\r\n total_correct += torch.sum((prediction == targets).float())\r\n\r\n ave_loss = total_loss / (len(data_val) // args.batch_size)\r\n ave_spl = total_spl / (len(data_val) // args.batch_size)\r\n ave_xrl = total_xrl / (len(data_val) // args.batch_size)\r\n ave_Xrl = total_Xrl / (len(data_val) // args.batch_size)\r\n ave_Yrl = total_Yrl / (len(data_val) // args.batch_size)\r\n ave_cl = total_cl / (len(data_val) // args.batch_size)\r\n ave_ol = total_ol / (len(data_val) // args.batch_size)\r\n\r\n if epoch_number is 15:\r\n f = open(\"../Y.pkl\",\"wb\")\r\n pkl.dump(Ysave, f)\r\n f.close()\r\n return ave_loss, total_correct.data[0] / len(data_val), ave_spl, ave_xrl, ave_Xrl,ave_Yrl, ave_cl, ave_ol", "def classification(features, scores, n_classes, model_type=0, save_path='results/',\n lr=.01, batch_size=10, n_epochs=20, test_size=.3,\n verbose=False, save_results=False, normalize=True):\n # features, scores = read_data_from_csv()\n verbose_opc = 0\n if verbose:\n print(\"[INFO] Shuffle Data\")\n verbose_opc = 1\n\n features, scores = shuffle(features, scores, random_state=0)\n\n if normalize:\n if verbose:\n print(\"[INFO] Normalizing Data\")\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n\n if verbose:\n print(\"[INFO] Splitting data into train and test sets\")\n x_train, x_test, y_train, y_test = train_test_split(features, scores, test_size=test_size)\n\n\n\n if verbose:\n print(\"[INFO] Creating the machine learning model\")\n\n model = None\n if model_type == 0:\n model = res_model(x_train.shape[1:], n_classes)\n elif model_type == 1:\n model = simple_model(x_train.shape[1:], n_classes)\n elif model_type == 2:\n model = sklearn.svm.SVC(gamma='auto')\n elif model_type == 3:\n model = RandomForestClassifier()\n elif model_type == 4:\n model = AdaBoostClassifier()\n elif model_type == 5:\n model = xgb.XGBClassifier(objective=\"multi:softprob\", random_state=42)\n\n h = None\n if model_type >= 0 and model_type <= 1:\n # classes 0.0 ,0.5, 1.0, 1.5, 2.0\n y_cat_train = to_categorical(y_train, n_classes)\n y_cat_test = to_categorical(y_test, n_classes)\n\n model.compile(loss=\"logcosh\",\n #optimizer=keras.optimizers.SGD(lr=lr, momentum=.3),\n optimizer=\"adamax\",\n metrics=['accuracy'])\n\n h = model.fit(x_train, y_cat_train,\n batch_size=batch_size,\n epochs=n_epochs,\n validation_data=(x_test, y_cat_test),\n verbose=verbose_opc)\n\n evaluate_model(x_test, y_cat_test, batch_size, model, n_epochs, h, n_classes, folder_name=save_path,\n save_results=save_results, is_rna=True)\n else:\n model.fit(x_train, y_train)\n\n evaluate_model(x_test, y_test, batch_size, model, n_epochs, h, n_classes, folder_name=save_path,\n save_results=save_results)\n\n return model", "def evaluate(model, optimizer, loss_function, loader, device, labels, log_every_n=10):\n\n model.eval()\n\n batch_wise_true_labels = []\n batch_wise_predictions = []\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n with torch.no_grad(): # Disable gradient computation - required only during training\n for i, batch in tqdm(enumerate(loader)):\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n loss = loss_function(logits, batch[2].to(device))\n loss_history.append(loss.item())\n\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n running_loss_history.append(running_loss)\n\n predictions = torch.sigmoid(logits)\n\n batch_wise_true_labels.append(batch[2].view(-1).tolist())\n batch_wise_predictions.append(predictions.view(-1).tolist())\n\n # flatten the list of predictions using itertools\n all_true_labels = list(chain.from_iterable(batch_wise_true_labels))\n all_predictions = list(chain.from_iterable(batch_wise_predictions))\n all_predictions = [1 if p > 0.5 else 0 for p in all_predictions]\n\n\n print(\"Evaluation Loss: \", running_loss)\n # Now we can generate a classification report\n print(\"Classification report after epoch:\")\n print(f1_score(all_true_labels, all_predictions, average='micro'))\n print(classification_report(all_true_labels, all_predictions, labels=labels))\n\n return loss_history, running_loss_history", "def evaluate_individual(predictions, test_files, models):\n\n print(\"\\nAccuracy for individual models\\n\")\n \n # Fix Location\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_location\"] == prediction[\"predicted_location\"]:\n correct_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = correct_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n if prediction[\"predicted_location\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"correct_data\"][\"correct_type\"]].value] = total_predictions[FixType[\n prediction[\"correct_data\"][\"correct_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Location accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Location accuracy overall is {accuracy * 100} %\")\n \n # Fix type\n correct_predictions = [0, 0, 0]\n total_predictions = [0, 0, 0]\n num_failed_predictions = 0\n\n for prediction in predictions:\n if prediction[\"correct_data\"][\"correct_type\"] == prediction[\"predicted_type\"]:\n correct_predictions[FixType[prediction[\"predicted_type\"]].value] = correct_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n if prediction[\"predicted_type\"] is None:\n num_failed_predictions = num_failed_predictions + 1\n total_predictions[FixType[prediction[\"predicted_type\"]].value] = total_predictions[FixType[\n prediction[\"predicted_type\"]].value] + 1\n\n for i in range(3):\n if total_predictions[i] == 0: # If the type was never predicted\n accuracy = 0\n else:\n accuracy = correct_predictions[i] / total_predictions[i]\n print(f\"Fix Type accuracy for class {FixType(i).name}: {accuracy * 100} %\")\n\n accuracy = sum(correct_predictions) / (len(predictions) - num_failed_predictions)\n print(f\"Fix Type accuracy overall is {accuracy * 100} %\")\n \n # We repeat the predictions to evaluate the insert and modify models individually, regardless of the predicted fix type \n\n raw_training_samples = []\n\n if test_files.endswith(\".json\"): # Single JSON file\n with open(test_files) as file:\n logging.info(\"Source ending in .json. Predicting on single JSON file.\")\n raw_training_samples = json.load(file)\n else: # Folder path\n for filename in listdir(test_files):\n with open(test_files + filename) as file:\n raw_training_samples.extend(json.load(file))\n \n correct_predictions_insert = 0\n total_predictions_insert = 0\n correct_predictions_modify = 0\n total_predictions_modify = 0\n insert_tokens = []\n modify_tokens = []\n\n for sample in raw_training_samples:\n # Insert\n if sample[\"metadata\"][\"fix_type\"] == \"insert\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[2])\n token = IOProcessor.postprocess(pred, 2)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_insert = correct_predictions_insert + 1\n else: # Incorrect prediction\n insert_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_insert = total_predictions_insert + 1\n # Modify\n if sample[\"metadata\"][\"fix_type\"] == \"modify\":\n actual_sample, tokens = IOProcessor.preprocess(sample[\"wrong_code\"])\n pred = predict_single(actual_sample, models[3])\n token = IOProcessor.postprocess(pred, 3)\n if token == sample[\"metadata\"][\"fix_token\"]: # Correct Prediction\n correct_predictions_modify = correct_predictions_modify + 1\n else: # Incorrect prediction\n modify_tokens.append([token, sample[\"metadata\"][\"fix_token\"]])\n total_predictions_modify = total_predictions_modify + 1\n\n insert_accuracy = correct_predictions_insert / total_predictions_insert\n modify_accuracy = correct_predictions_modify / total_predictions_modify\n print(f\"Fix Token accuracy for insert is {insert_accuracy * 100} %\")\n print(f\"Fix Token accuracy for modify is {modify_accuracy * 100} %\")\n\n # The following code may be used to create a swarm plot of the erroneous predictions for fix locations\n # This does, however, require the installation of the pandas, seaborn, and matplotlib libraries.\n \n # import seaborn as sns\n # import matplotlib.pyplot as plt\n # import pandas as pd\n # location_distance_array = []\n # for prediction in predictions:\n # actual_sample, tokens = IOProcessor.preprocess(prediction[\"correct_data\"][\"wrong_code\"])\n # label = get_token_index(prediction[\"correct_data\"][\"wrong_code\"], tokens, prediction[\"correct_data\"][\"correct_location\"])\n # if prediction[\"predicted_token_location\"] - label == 0:\n # pass\n # else:\n # location_distance_array.append([prediction[\"predicted_token_location\"] - label, prediction[\"correct_data\"][\"correct_type\"]])\n \n # df = pd.DataFrame(data=location_distance_array)\n # sns.set_theme(style=\"whitegrid\")\n # f, ax = plt.subplots(figsize=(6, 4))\n # sns.despine(bottom=True, left=True)\n # sns.swarmplot(y=0, x=1, data=df, palette=\"dark\", size=6)\n # ax.set_xlabel('')\n # ax.set_ylabel('')\n # plt.ylim([-15, 16])\n \n # plt.savefig('line_plot.pdf', bbox_inches='tight', pad_inches=0)", "def evaluate_model(model, train_input, train_target, test_input, test_target, loss, save_plot, mname=None):\n # Evalute Model in train set\n epochs_number = len(loss)\n output = model.forward(train_input)\n train_loss = model.compute_loss(output, train_target).item()\n train_error = compute_number_error(output, train_target).item()\n\n print(\"\\nTraining Loss: \", train_loss)\n print(\"Training Number of errors: \", train_error)\n\n id_class_train = output.argmax(dim=1)\n if save_plot:\n plot_result(train_input, train_target, id_class_train, fname=mname)\n plot_loss(range(0, epochs_number), loss, fname=mname)\n\n # Deactivate dropout to test models\n model.enable_dropout(False)\n \n # Evaluate Model in test set\n output = model.forward(test_input)\n test_loss = model.compute_loss(output, test_target).item()\n test_error = compute_number_error(output, test_target).item()\n\n print(\"\\nTest Loss: \", test_loss)\n print(\"Test Number of errors: \", test_error)\n\n\n id_class_test = output.argmax(dim=1)\n if save_plot:\n plot_result(test_input, test_target, id_class_test, train=False, fname=mname)\n \n return [train_loss, train_error, test_loss, test_error]", "def _eval_during_training(\r\n self, evaluator, output_path, save_best_model, epoch, steps):\r\n if evaluator is not None:\r\n score = evaluator(\r\n self, output_path=output_path, epoch=epoch, steps=steps)\r\n if score > self.best_score and save_best_model:\r\n self.save(output_path)\r\n self.best_score = score", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def evaluate():\n\n # seed provides the mechanism to control the shuffling which takes place reading input\n seed = tf.placeholder(tf.int64, shape=())\n \n # Generate placeholders for the images and labels.\n iterator = input_data.input_pipeline_binary(FLAGS.data_dir,\n FLAGS.batch_size,\n fake_data=FLAGS.fake_data,\n num_epochs=1,\n read_threads=FLAGS.read_threads,\n shuffle_size=FLAGS.shuffle_size,\n num_expected_examples=FLAGS.num_examples,\n seed=seed)\n image_path, label_path, images, labels = iterator.get_next()\n\n if FLAGS.verbose:\n print_op = tf.print(\"images and labels this batch: \", \n image_path, label_path, labels)\n else:\n print_op = tf.constant('No printing')\n\n if FLAGS.random_rotation:\n images, labels = harmonics.apply_random_rotation(images, labels)\n\n # Build a Graph that computes predictions from the inference model.\n logits = topology.inference(images, FLAGS.network_pattern)\n \n # Add to the Graph the Ops for loss calculation.\n loss = topology.binary_loss(logits, labels)\n \n # Set up some prediction statistics\n predicted = tf.round(tf.nn.sigmoid(logits))\n correct_pred = tf.equal(predicted, labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n \n while True:\n eval_once(sess, iterator, saver, seed, labels, loss, accuracy, predicted)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_prediction = model.predict(X_test)\n Y_prediction_df = pd.DataFrame(Y_prediction, columns=category_names)\n \n for col in category_names:\n print(f\"category:{col}\")\n print(classification_report(Y_test[col], Y_prediction_df[col]))\n print('------------------------------------------------------')\n \n accuracy = np.mean(Y_prediction == Y_test.values)\n print(f\"Accuracy: {accuracy:.2%}\")", "def train_epoch(self, epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard):\n\n # Train an epoch\n self.model.train()\n print('Start epoch', epoch)\n train_itr = iter(self.loader_train)\n total_err = 0\n total_acc = 0\n\n for index, (data_pixel, data_labels) in enumerate(train_itr):\n\n # compute\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n\n # Use the model the produce the classification\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n\n # produce evaluator results\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n\n # set optimizer to zero.\n optimizer.zero_grad()\n\n # back propogate the evaluation results.\n eval_result['loss'].backward()\n\n # optimizer take step forward.\n optimizer.step()\n\n # tabulate the steps from the evaluation\n eval_result = {k: eval_result[k].item() for k in eval_result}\n\n # update every hundreds' of\n if index % 100 == 0:\n print(index, eval_result['loss'], eval_result['acc'])\n train_result = evaluator.evalulate_on_cache()\n train_total_err = train_result['loss']\n writer_tensorboard.add_scalar('Loss/Train', train_total_err, global_step=epoch)\n # log_metric('loss', train_total_err)\n train_total_acc = train_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Train', train_total_acc, global_step=epoch)\n # log_metric('acc', train_total_acc)\n train_kaggle_score = train_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Train', train_kaggle_score, global_step=epoch)\n # log_metric('kaggle_score', train_kaggle_score)\n dict_metrics_train = {\n 'Loss/Train': train_total_err,\n 'Accuracy/Train': train_total_acc,\n 'Kaggle_Score/Train': train_kaggle_score,\n }\n log_metrics(dict_metrics_train, step=epoch)\n print(f\"Epoch {epoch} Training, Loss {train_total_err}, Acc {train_total_acc}\")\n evaluator.clear_cache()\n # compute validation error\n self.model.eval()\n val_itr = iter(self.loader_val)\n with torch.no_grad():\n for index, (data_pixel, data_labels) in enumerate(val_itr):\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n eval_result = {k: eval_result[k].item() for k in eval_result}\n total_err += eval_result['loss']\n total_acc += eval_result['acc']\n # print(total_err / (1 + input_index), total_acc / (1 + input_index))\n val_result = evaluator.evalulate_on_cache()\n val_total_err = val_result['loss']\n writer_tensorboard.add_scalar('Loss/Val', val_total_err, global_step=epoch)\n val_total_acc = val_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Val', val_total_acc, global_step=epoch)\n val_kaggle_score = val_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Val', val_kaggle_score, global_step=epoch)\n dict_metrics_val = {\n 'Loss/Validation': val_total_err,\n 'Accuracy/Validation': val_total_acc,\n 'Kaggle_Score/Validation': val_kaggle_score,\n }\n log_metrics(dict_metrics_val, step=epoch)\n # Write to disk.\n writer_tensorboard.flush()\n print(f\"Epoch {epoch} Eval, Loss {val_total_err}, Acc {val_total_acc}\")\n evaluator.clear_cache()\n print(\"Saving the model (epoch %d)\" % epoch)\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, state_fpath)\n print(f\"Making a backup (step {epoch})\")\n backup_fpath = os.path.join(self.backup_dir, f\"model_bak_{epoch}.pt\")\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, backup_fpath)\n # Dump the traces\n perf_trace.append(\n {\n 'epoch': epoch,\n 'train_err': train_total_err,\n 'train_acc': train_total_acc,\n 'train_kaggle_score': train_kaggle_score,\n 'val_err': val_total_err,\n 'val_acc': val_total_acc,\n 'val_kaggle_score': val_kaggle_score\n }\n )\n pickle.dump(perf_trace, open(perf_path, 'wb'))\n # store epoch full result separately\n epoch_result = {\n 'epoch': epoch,\n 'train_result': train_result,\n 'val_result': val_result\n }\n pickle.dump(epoch_result, open(os.path.join(self.results_dir, 'result_epoch_{0}.p'.format(epoch)), 'wb'))", "def evaluate_detector(self, batch_size, travel_mode_detector, classes, features, output_file_path):\n predictions, prediction_time = self.get_predictions(\n batch_size=batch_size,\n tmd_detector=travel_mode_detector,\n input_features=features\n )\n accuracy, cohen_kappa_score, f1_score, precision, recall = self.get_classification_metrics(\n classes=classes,\n predictions=predictions\n )\n self.store_classes_and_predictions(output_file_path, classes, predictions)\n return accuracy, cohen_kappa_score, f1_score, precision, prediction_time, recall", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n return classification_report(Y_test, y_pred, target_names = category_names)", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def evaluate_model(X_train, X_test, y_train, y_test, batch_size, nb_epoch):\n model = Sequential()\n model.add(Dense(512, input_shape=(784,)))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.2))\n model.add(Dense(10))\n model.add(Activation(\"softmax\"))\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=RMSprop(),\n metrics=[\"accuracy\"])\n model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,\n verbose=1, validation_data=(X_test, y_test))\n results = model.evaluate(X_test, y_test, verbose=0)\n return results, model", "def evaluate_nlp(model: ContinualModel, dataset: ContinualDataset, last=False) -> Tuple[list, list]:\n status = model.net.training\n model.net.eval()\n accs, accs_mask_classes = [], []\n # todo: change the mask recorder\n for k, test_loader in enumerate(dataset.test_loaders):\n if last and k < len(dataset.test_loaders) - 1:\n continue\n correct, correct_mask_classes, total = 0.0, 0.0, 0.0\n for data in test_loader:\n xs, ys, x_token_idxs, x_token_masks, y_token_idxs, y_token_masks, y_idxs = data\n \n x_token_idxs = x_token_idxs.to(model.device)\n x_token_masks = x_token_masks.to(model.device)\n y_token_idxs = y_token_idxs.to(model.device)\n y_token_masks = y_token_masks.to(model.device)\n y_idxs = y_idxs.to(model.device)\n \n task_id = torch.tensor(k, dtype=torch.int64)\n task_id = task_id.to(model.device)\n \n # todo: change the label recorder\n if 'class-il' not in model.COMPATIBILITY:\n outputs = model(x_token_idxs, x_token_masks, task_id)\n else:\n outputs = model.forward_nlp(x_token_idxs, x_token_masks, task_id)\n \n _, pred = torch.max(outputs.data, 1)\n \n correct += torch.sum(pred == y_idxs).item()\n total += y_idxs.shape[0]\n \n if dataset.SETTING == 'class-il':\n mask_classes(outputs, dataset, k)\n _, pred = torch.max(outputs.data, 1)\n correct_mask_classes += torch.sum(pred == y_idxs).item()\n \n accs.append(correct / total * 100\n if 'class-il' in model.COMPATIBILITY else 0)\n accs_mask_classes.append(correct_mask_classes / total * 100)\n \n model.net.train(status)\n return accs, accs_mask_classes", "def eval(self, val_batch_size: int = 32):\n\n val_generator = DataGenerator(\n batch_size=val_batch_size,\n split=\"test\",\n layers=self.n_blocks,\n train_mode=\"classifier\",\n )\n if self.train_mode == \"combined\":\n model = KM.Model(\n inputs=self.combined.input,\n outputs=self.combined.get_layer(\"logits\").output,\n )\n elif self.train_mode == \"classifier\":\n model = KM.Model(\n inputs=self.classifier.input,\n outputs=self.classifier.get_layer(\"logits\").output,\n )\n\n # initialize the array to store preds for each label\n accuracy = np.zeros((10, 10), dtype=int)\n\n for input, true_logits in val_generator():\n pred_logits = model.predict(input)\n\n true_logits = tf.split(\n true_logits, num_or_size_splits=self.n_blocks, axis=-1\n )\n true_logits = true_logits[0]\n\n # Split the logits from different levels\n pred_logits = tf.split(\n tf.expand_dims(pred_logits, axis=-1),\n num_or_size_splits=self.n_blocks,\n axis=1,\n )\n # Predicted label by taking an elementwise maximum across all layers\n pred_logits = tf.reduce_max(tf.concat(pred_logits, axis=2), axis=2)\n\n # Get true and pred labels\n true_labels = tf.argmax(true_logits, axis=-1)\n pred_labels = tf.argmax(pred_logits, axis=-1)\n for i, gt_label in enumerate(true_labels):\n pred_label = int(pred_labels[i])\n accuracy[int(gt_label)][pred_label] += 1\n\n import matplotlib.pyplot as plt\n import seaborn as sn\n\n plt.figure(figsize=(10, 7))\n sn.heatmap(accuracy / np.sum(accuracy, axis=-1), annot=True)\n plt.show()\n # metrics = self.combined.evaluate(\n # val_generator(),\n # )\n # print(metrics)", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def save_results(output_dir,\n check_file,\n results,\n exp_string,\n identifier,\n shuffle_labels,\n model_options,\n predictor='classify',\n fold_no=None,\n titration_ratio=None):\n\n signal = 'shuffled' if shuffle_labels else 'signal'\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n if isinstance(model_options.n_dim, list):\n n_dim = '.'.join(map(str, model_options.n_dim))\n else:\n n_dim = model_options.n_dim\n\n if predictor == 'classify':\n auc_df = pd.concat(results[\n '{}_auc'.format(exp_string)\n ])\n output_file = construct_filename(output_dir,\n 'auc_threshold_metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n auc_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n aupr_df = pd.concat(results[\n '{}_aupr'.format(exp_string)\n ])\n output_file = construct_filename(output_dir,\n 'aupr_threshold_metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n aupr_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n if '{}_coef'.format(exp_string) in results:\n coef_df = pd.concat(results[\n '{}_coef'.format(exp_string)\n ])\n coef_df.to_csv(\n check_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n metrics_df = pd.concat(results[\n '{}_metrics'.format(exp_string)\n ])\n\n if '{}_preds'.format(exp_string) in results:\n preds_df = pd.concat(results[\n '{}_preds'.format(exp_string)\n ])\n else:\n preds_df = None\n\n if '{}_param_grid'.format(exp_string) in results:\n params_df = pd.concat(results[\n '{}_param_grid'.format(exp_string)\n ])\n else:\n params_df = None\n\n output_file = construct_filename(output_dir,\n 'metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n metrics_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n if preds_df is not None:\n output_file = construct_filename(output_dir,\n 'preds',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n preds_df.to_csv(\n output_file, sep=\"\\t\", float_format=\"%.5g\"\n )\n\n if params_df is not None:\n output_file = construct_filename(output_dir,\n 'param_grid',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no)\n\n params_df.to_csv(output_file, sep=\"\\t\")", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred=model.predict(X_test)\n acc=[]\n for i,c in enumerate(Y_test.columns):\n print(c)\n print(classification_report(Y_test[c], Y_pred[:,i]))\n acc.append(accuracy_score(Y_test[c], Y_pred[:,i]))\n print('Accuracy :',np.mean(acc))\n\n pass", "def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n\n for i, column in enumerate(category_names):\n y_true = Y_test.values[:, i]\n y_pred = Y_pred[:, i]\n target_names = ['not {}'.format(column), '{}'.format(column)]\n print(classification_report(\n y_true, y_pred, target_names=target_names))", "def save_classifier_results(classifier_name, out_dir, allConfMats, allTotalErrs):\r\n\r\n # convert confusion matrix and total errors into numpy array\r\n tmpAllConfMats = np.array(allConfMats)\r\n tmpAllTotalErrs = np.array(allTotalErrs)\r\n # initialize mean and std variables\r\n TN_means = np.zeros(tmpAllConfMats.shape[1])\r\n TN_stds = np.zeros(tmpAllConfMats.shape[1])\r\n FN_means = np.zeros(tmpAllConfMats.shape[1])\r\n FN_stds = np.zeros(tmpAllConfMats.shape[1])\r\n total_means = np.zeros(tmpAllConfMats.shape[1])\r\n total_stds = np.zeros(tmpAllConfMats.shape[1])\r\n\r\n for j in range(tmpAllConfMats.shape[1]):\r\n tmpData = tmpAllConfMats[:, j, 0, 0]\r\n TN_means[j] = np.mean(tmpData[np.invert(np.isnan(tmpData))])\r\n TN_stds[j] = np.std(tmpData[np.invert(np.isnan(tmpData))])\r\n tmpData = tmpAllConfMats[:, j, 1, 0]\r\n FN_means[j] = np.mean(tmpData[np.invert(np.isnan(tmpData))])\r\n FN_stds[j] = np.std(tmpData[np.invert(np.isnan(tmpData))])\r\n tmpData = tmpAllTotalErrs[:, j]\r\n # Compute mean of std of non-Nan values\r\n total_means[j] = np.mean(tmpData[np.invert(np.isnan(tmpData))])\r\n total_stds[j] = np.std(tmpData[np.invert(np.isnan(tmpData))])\r\n with open(path.join(out_dir, classifier_name+'_errors.mat'),'wb') as f:\r\n scipy.io.savemat(f, {'TN_means': TN_means,\r\n 'TN_stds': TN_stds,\r\n 'FN_means': FN_means,\r\n 'FN_stds': FN_stds,\r\n 'total_means': total_means,\r\n 'total_stds': total_stds,\r\n })", "def evaluate_model(\n self,\n val_loader,\n additional_gpu=None,\n metrics=None,\n inputs_key=\"image\",\n labels_key=\"label\"\n ):\n # predict on the validation set\n all_preds = []\n all_labels = []\n\n self.model.eval()\n\n if additional_gpu is not None:\n device = additional_gpu\n else:\n device = self.device\n\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n inputs, labels = data[inputs_key], data[labels_key]\n inputs = inputs.to(device)\n labels = labels.to(device)\n # forward + backward + optimize\n outputs = self.model(inputs)\n # run inference\n all_preds, all_labels = predict(\n outputs,\n labels,\n all_preds,\n all_labels,\n self.prediction_type,\n self.criterion,\n class_threshold=self.class_threshold\n )\n\n # compute confusion matrix\n cm = confusion_matrix(all_labels, all_preds)\n plt.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\n # Visualize the confusion matrix\n classes = [\"control\", \"patient\"]\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \"d\"\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n plt.title(\"Confusion Matrix\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n plt.show()\n\n # print metrics\n if metrics is not None:\n for metric in metrics:\n if isinstance(all_preds[0], list):\n print(\"{}: {}\".format(metric.__name__, np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])))\n else:\n print(\"{}: {}\".format(metric.__name__, metric(all_labels, all_preds)))\n\n\n self.model.train()", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def evaluate_ensemble(hparams, model_type, eval_size, data_dir, num_targets,\n dataset, checkpoint, num_trials):\n checkpoints = []\n for i in range(num_trials):\n file_name = checkpoint.format(i)\n if tf.train.checkpoint_exists(file_name):\n checkpoints.append(file_name)\n\n with tf.Graph().as_default():\n batch_size = 100\n features = get_features('test', batch_size, 1, data_dir, num_targets,\n dataset)[0]\n model = models[model_type](hparams)\n\n session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=session, coord=coord)\n num_steps = eval_size // batch_size\n data, targets = get_placeholder_data(num_steps, batch_size, features,\n session)\n logits = infer_ensemble_logits(features, model, checkpoints, session,\n num_steps, data)\n coord.request_stop()\n coord.join(threads)\n session.close()\n\n logits = np.reshape(logits, (num_trials, num_steps, batch_size, -1))\n logits = np.sum(logits, axis=0)\n predictions = np.argmax(logits, axis=2)\n total_wrong = np.sum(np.not_equal(predictions, targets))\n print('Total wrong predictions: {}, wrong percent: {}%'.format(\n total_wrong, total_wrong / eval_size * 100))", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def evaluate(self, output_dir, test_data, device, verbose_logging=False):\r\n tokenizer = self.tokenizer\r\n # device = torch.device(\"cuda:0\")\r\n model = self.model\r\n model.to(device)\r\n args = self.args\r\n\r\n # # reassgin unique_id for features to keep order for federated learning situation\r\n # unique_id = 1000000000\r\n # for feature in self.test_dl.features:\r\n # feature.unique_id = unique_id\r\n # unique_id += 1\r\n\r\n examples = test_data.examples\r\n features = test_data.features\r\n\r\n eval_loss = 0.0\r\n nb_eval_steps = 0\r\n model.eval()\r\n\r\n # if args.n_gpu > 1:\r\n # model = torch.nn.DataParallel(model)\r\n\r\n if self.args.fp16:\r\n from torch.cuda import amp\r\n\r\n all_results = []\r\n for batch in tqdm(test_data, disable=args.silent, desc=\"Running Evaluation\"):\r\n batch = tuple(t.to(device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {\r\n \"input_ids\": batch[1],\r\n \"attention_mask\": batch[2],\r\n \"token_type_ids\": batch[3],\r\n }\r\n\r\n if self.args.model_type in [\r\n \"xlm\",\r\n \"roberta\",\r\n \"distilbert\",\r\n \"camembert\",\r\n \"electra\",\r\n \"xlmroberta\",\r\n \"bart\",\r\n ]:\r\n del inputs[\"token_type_ids\"]\r\n\r\n example_indices = batch[4]\r\n\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n inputs.update({\"cls_index\": batch[5], \"p_mask\": batch[6]})\r\n\r\n if self.args.fp16:\r\n with amp.autocast():\r\n outputs = model(**inputs)\r\n eval_loss += outputs[0].mean().item()\r\n else:\r\n outputs = model(**inputs)\r\n eval_loss += outputs[0].mean().item()\r\n begin_idx = len(all_results)\r\n for i, _ in enumerate(example_indices):\r\n eval_feature = features[begin_idx + i]\r\n unique_id = int(eval_feature.unique_id)\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n # XLNet uses a more complex post-processing procedure\r\n result = RawResultExtended(\r\n unique_id=unique_id,\r\n start_top_log_probs=to_list(outputs[0][i]),\r\n start_top_index=to_list(outputs[1][i]),\r\n end_top_log_probs=to_list(outputs[2][i]),\r\n end_top_index=to_list(outputs[3][i]),\r\n cls_logits=to_list(outputs[4][i]),\r\n )\r\n else:\r\n result = RawResult(\r\n unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]),\r\n )\r\n all_results.append(result)\r\n\r\n nb_eval_steps += 1\r\n\r\n eval_loss = eval_loss / nb_eval_steps\r\n\r\n prefix = \"test\"\r\n os.makedirs(output_dir, exist_ok=True)\r\n\r\n output_prediction_file = os.path.join(output_dir, \"predictions_{}.json\".format(prefix))\r\n output_nbest_file = os.path.join(output_dir, \"nbest_predictions_{}.json\".format(prefix))\r\n output_null_log_odds_file = os.path.join(output_dir, \"null_odds_{}.json\".format(prefix))\r\n\r\n if args.model_type in [\"xlnet\", \"xlm\"]:\r\n # XLNet uses a more complex post-processing procedure\r\n (all_predictions, all_nbest_json, scores_diff_json, out_eval,) = write_predictions_extended(\r\n examples,\r\n features,\r\n all_results,\r\n args.n_best_size,\r\n args.max_answer_length,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n None,\r\n model.config.start_n_top,\r\n model.config.end_n_top,\r\n True,\r\n tokenizer,\r\n verbose_logging,\r\n )\r\n else:\r\n all_predictions, all_nbest_json, scores_diff_json = write_predictions(\r\n examples,\r\n features,\r\n all_results,\r\n args.n_best_size,\r\n args.max_answer_length,\r\n args.do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n verbose_logging,\r\n True,\r\n args.null_score_diff_threshold,\r\n )\r\n\r\n return all_predictions, all_nbest_json, scores_diff_json, eval_loss", "def evaluate():\n with tf.Graph().as_default() as g:\n # Get images and labels for CIFAR-10.\n # eval_data = FLAGS.eval_data\n # images, labels = model.inputs(eval_data=eval_data, batch_size=FLAGS.eval_batch_size)\n data_obj = model.inputs()\n values = data_obj.value\n data = tf.concat(values,0)\n # print(data)\n\n data = tf.reshape(data, [data_obj.height,data_obj.width])\n # print(data)\n # Build a Graph that computes the logits predictions from the\n # inference model.\n representation, reconstruct = model.inference_fconn(data)\n # print(representation)\n # print(reconstruct)\n # print(data)\n # Calculate predictions.\n # representation_reshape = tf.reshape(representation, [FLAGS.eval_batch_size, -1])\n\n # Restore the moving average version of the learned variables for eval.\n variable_averages = tf.train.ExponentialMovingAverage(\n model.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n # images_reconstruct = tf.concat([data, tf.transpose(reconstruct)],1)\n # print(images_reconstruct)\n # tf.summary.image('original_reconstruct', images_reconstruct)\n # tf.image_summary('original', images, max_images=20)\n # tf.image_summary('reconstruct', reconstruct, max_images=20)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(eval_dir, g)\n\n\n while True:\n eval_once(saver, summary_writer, representation, summary_op, reconstruct, data)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)", "def evaluate(self, test_dir='data/dev', target='real'):\n test_data = {c: os.path.join(test_dir, c) for c in self.classes}\n if not target in test_data:\n print('Error: target class does not exist in test data.')\n return\n outcomes = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}\n # >>> YOUR ANSWER HERE\n data = []\n for c in test_data:\n docs = open(test_data[c]).readlines()\n for doc in docs:\n preprocess_doc = doc.strip()\n data.append((c, preprocess_doc))\n for item in data:\n predict_ans = self.predict(item[1])\n if item[0] == 'real':\n if predict_ans == 'real':\n outcomes['TP'] += 1\n else:\n outcomes['FN'] += 1\n else:\n if predict_ans == 'real':\n outcomes['FP'] += 1\n else:\n outcomes['TN'] += 1\n precision = outcomes['TP'] / (outcomes['TP'] + outcomes['FP']) # replace with equation for precision\n recall = outcomes['TP'] / (outcomes['TP'] + outcomes['FN']) # replace with equation for recall\n f1_score = 2 * ((precision * recall) / (precision + recall)) # replace with equation for f1\n # >>> END YOUR ANSWER\n return precision, recall, f1_score", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def evaluate(model, dataset, append_submission, dataset_root):\n\n with open(os.path.join(dataset_root, dataset + '.json'), 'r') as f:\n image_list = json.load(f)\n\n print('Running evaluation on {} set...'.format(dataset))\n\n count_img=0\n for img in image_list:\n img_path = os.path.join(dataset_root, 'images', dataset, img['filename'])\n pil_img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(pil_img)\n x = preprocess_input(x)\n x = np.expand_dims(x, 0)\n output = model.predict(x)\n sys.stdout.write('\\r'+str(count_img/len(image_list))+' ')\n sys.stdout.flush()\n append_submission(img['filename'], output[0, :4], output[0, 4:])\n count_img+=1", "def evaluate_training(net_name, dobj, dir_path, t_start, batch_size=32, generator=g.DataGeneratorMultInput, **kwargs):\n opt_arg, kwargs = filter_keys(evaluate_net_defaults(), kwargs)\n \n wiki_data = {}\n for k, v in opt_arg.items():\n wiki_data[k] = str(v)\n \n t_string = date_to_file_string(t_start)\n \n net_last = keras.models.load_model(os.path.join(dir_path, net_name + '.hf5'), custom_objects=custom_layers.get_custom_objects())\n \n #Load networks\n if not opt_arg['best_epoch'] == 0:\n net_best = keras.models.load_model(os.path.join(dir_path, net_name + '_epoch_' + str(opt_arg['best_epoch']) + '.hf5'), custom_objects=custom_layers.get_custom_objects())\n else:\n net_best = None\n \n #Run predict generator on the test data for each net.\n prediction_path_last = os.path.join(dir_path, net_name + '_predictions_last_epoch_' + t_string + '.hf5')\n \n store_test_results(net_last, dobj, prediction_path_last, batch_size=batch_size, generator=generator)\n \n prediction_path_best = ''\n \n if not net_best == None:\n prediction_path_best = os.path.join(dir_path, net_name + '_predictions_best_epoch_' + t_string + '.hf5')\n \n store_test_results(net_best, dobj, prediction_path_best, batch_size=batch_size, generator=generator)\n \n try:\n #Create loss plot\n if opt_arg['make_loss_plot']:\n loss_plot_path = os.path.join(dir_path, net_name + '_loss_plot.png')\n make_loss_plot(os.path.join(dir_path, net_name + \"_results.json\"), loss_plot_path)\n else:\n loss_plot_path = 'N/A'\n except:\n print(\"Something went wrong while trying to make the loss plot.\")\n traceback.print_exc()\n print(\"Continuing...\")\n pass\n \n try:\n #Make SNR plots\n SNR_plot_path_last = os.path.join(dir_path, net_name + '_snr_plot_last_epoch_' + t_string + '.png')\n \n plot_true_from_pred_file(prediction_path_last, SNR_plot_path_last, show=opt_arg['show_snr_plot'], net_name=net_name + ' last epoch')\n \n SNR_plot_path_best = ''\n \n if not net_best == None:\n SNR_plot_path_best = os.path.join(dir_path, net_name + '_snr_plot_best_epoch_' + t_string + '.png')\n \n plot_true_from_pred_file(prediction_path_best, SNR_plot_path_best, show=opt_arg['show_snr_plot'], net_name=net_name + ' best epoch')\n except:\n print(\"Something went wrong while trying to make the SNR plot.\")\n traceback.print_exc()\n print(\"Continuing...\")\n pass\n \n try:\n #Make false alarm plots\n false_alarm_plot_path_last = os.path.join(dir_path, net_name + '_false_alarm_plot_last_epoch_' + t_string + '.png')\n \n tmp_false_alarm_path_last = m.plot_false_alarm_from_pred_file(prediction_path_last, false_alarm_plot_path_last, show=opt_arg['show_false_alarm'])\n \n false_alarm_plot_prob_path_last = os.path.join(dir_path, net_name + '_false_alarm_plot_prob_last_epoch_' + t_string + '.png')\n \n tmp_false_alarm_prob_path_last = m.plot_false_alarm_prob_from_pred_file(prediction_path_last, false_alarm_plot_prob_path_last, show=opt_arg['show_false_alarm'])\n \n false_alarm_plot_path_best = ''\n \n false_alarm_plot_prob_path_best = ''\n \n tmp_false_alarm_path_best = ''\n \n tmp_false_alarm_prob_path_best = ''\n \n if not net_best == None:\n false_alarm_plot_path_best = os.path.join(dir_path, net_name + '_false_alarm_plot_best_epoch_' + t_string + '.png')\n \n false_alarm_plot_prob_path_best = os.path.join(dir_path, net_name + '_false_alarm_plot_prob_best_epoch_' + t_string + '.png')\n \n tmp_false_alarm_path_best = m.plot_false_alarm_from_pred_file(prediction_path_best, false_alarm_plot_path_best, show=opt_arg['show_false_alarm'])\n \n tmp_false_alarm_prob_path_best = m.plot_false_alarm_prob_from_pred_file(prediction_path_best, false_alarm_plot_prob_path_best, show=opt_arg['show_false_alarm'])\n except:\n print(\"Something went wrong while trying to make the false alarm plots.\")\n traceback.print_exc()\n print(\"Continuing...\")\n pass\n \n try:\n #Make sensitivity plots\n snr_range = dobj.get_file_properties()['snr']\n \n sensitivity_plot_path_last = os.path.join(dir_path, net_name + '_sensitivity_plot_last_epoch_' + t_string + '.png')\n \n sensitivity_plot_prob_path_last = os.path.join(dir_path, net_name + '_sensitivity_plot_prob_last_epoch_' + t_string + '.png')\n \n m.plot_sensitivity_from_pred_file(prediction_path_last, sensitivity_plot_path_last, bins=(snr_range[0]+1, snr_range[1], 1), show=opt_arg['show_sensitivity_plot'])\n \n m.plot_sensitivity_prob_from_pred_file(prediction_path_last, sensitivity_plot_prob_path_last, bins=(snr_range[0]+1, snr_range[1], 1))\n \n #m.plot_sensitivity_prob(dobj, prediction_path_last, tmp_false_alarm_prob_path_last, sensitivity_plot_prob_path_last, show=opt_arg['show_sensitivity_plot'])\n \n sensitivity_plot_path_best = ''\n \n sensitivity_plot_prob_path_best = ''\n \n if not net_best == None:\n sensitivity_plot_path_best = os.path.join(dir_path, net_name + '_sensitivity_plot_best_epoch_' + t_string + '.png')\n \n sensitivity_plot_prob_path_best = os.path.join(dir_path, net_name + '_sensitivity_plot_prob_best_epoch_' + t_string + '.png')\n \n m.plot_sensitivity_from_pred_file(prediction_path_best, sensitivity_plot_path_best, bins=(snr_range[0]+1, snr_range[1], 1), show=opt_arg['show_sensitivity_plot'])\n \n m.plot_sensitivity_prob_from_pred_file(prediction_path_best, sensitivity_plot_prob_path_best, bins=(snr_range[0]+1, snr_range[1], 1))\n \n #m.plot_sensitivity_prob(dobj, prediction_path_best, tmp_false_alarm_prob_path_best, sensitivity_plot_prob_path_best, show=opt_arg['show_sensitivity_plot'])\n except:\n print(\"Something went wrong while trying to make the sensitivity plots.\")\n traceback.print_exc()\n print(\"Continuing...\")\n pass\n \n #Make p-value plots\n try:\n p_val_dist_path_last = os.path.join(dir_path, 'p_value_distribution_plot_last.png')\n \n p_val_dist_path_best = ''\n \n m.plot_p_val_dist(prediction_path_last, p_val_dist_path_last, title_prefix='Last')\n \n if not net_best == None:\n p_val_dist_path_best = os.path.join(dir_path, 'p_value_distribution_plot_best.png')\n \n m.plot_p_val_dist(prediction_path_best, p_val_dist_path_best, title_prefix='Best')\n except:\n print(\"Something went wrong while trying to make the probability distribution plot.\")\n traceback.print_exc()\n print(\"Continuing...\")\n pass\n \n return((loss_plot_path, SNR_plot_path_last, false_alarm_plot_path_last, false_alarm_plot_prob_path_last, sensitivity_plot_path_last, sensitivity_plot_prob_path_last, SNR_plot_path_best, false_alarm_plot_path_best, false_alarm_plot_prob_path_best, sensitivity_plot_path_best, sensitivity_plot_prob_path_best, p_val_dist_path_last, p_val_dist_path_best, wiki_data))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n # print the metrics\n for i, col in enumerate(category_names):\n print('{} category metrics: '.format(col))\n print(classification_report(Y_test.iloc[:,i], y_pred[:,i]))", "def evaluate(model, file_path, criterion):\n\n n_batches = 0\n \n epoch_loss = 0\n epoch_acc = 0\n epoch_r = 0\n epoch_p = 0\n epoch_f1 = 0\n \n model.eval()\n \n examples = []\n \n for example_name, example_body, example_length in file_iterator(file_path):\n\n examples.append((example_name, example_body, example_length))\n\n if len(examples) >= (BATCH_SIZE * CHUNKS):\n\n random.shuffle(examples)\n\n for tensor_n, tensor_l, tensor_p, tensor_r, mask in numericalize(examples, CHUNKS):\n\n #place on gpu\n\n tensor_n = tensor_n.to(device)\n tensor_l = tensor_l.to(device)\n tensor_p = tensor_p.to(device)\n tensor_r = tensor_r.to(device)\n\n #put into model\n with torch.no_grad():\n loss, acc, p, r, f1 = get_metrics(tensor_n, tensor_l, tensor_p, tensor_r, model, criterion)\n\n epoch_loss += loss\n epoch_acc += acc\n epoch_p += p\n epoch_r += r\n epoch_f1 += f1\n \n n_batches += 1\n \n if n_batches % LOG_EVERY == 0:\n \n loss = epoch_loss / n_batches\n acc = epoch_acc / n_batches\n precision = epoch_p / n_batches\n recall = epoch_r / n_batches\n f1 = epoch_f1 / n_batches\n\n log = f'\\t| Batches: {n_batches} |\\n'\n log += f'\\t| Loss: {loss:02.3f} | Acc.: {acc:.3f} | P: {precision:.3f} | R: {recall:.3f} | F1: {f1:.3f}'\n with open(LOG_PATH, 'a+') as f:\n f.write(log+'\\n')\n print(log)\n\n examples = []\n \n else:\n pass\n \n #outside of for line in f, but will still have some examples left over\n\n random.shuffle(examples)\n\n n = len(examples)//BATCH_SIZE\n\n for tensor_n, tensor_l, tensor_p, tensor_r, mask in numericalize(examples, n):\n \n #place on gpu\n\n tensor_n = tensor_n.to(device)\n tensor_l = tensor_l.to(device)\n tensor_p = tensor_p.to(device)\n tensor_r = tensor_r.to(device)\n \n #put into model\n with torch.no_grad():\n loss, acc, p, r, f1 = get_metrics(tensor_n, tensor_l, tensor_p, tensor_r, model, criterion)\n\n epoch_loss += loss\n epoch_acc += acc\n epoch_p += p\n epoch_r += r\n epoch_f1 += f1\n \n n_batches += 1\n\n return epoch_loss / n_batches, epoch_acc / n_batches, epoch_p / n_batches, epoch_r / n_batches, epoch_f1 / n_batches", "def run_evaluation(forecast_probabilities, observed_labels, output_dir_name):\n\n file_system_utils.mkdir_recursive_if_necessary(\n directory_name=output_dir_name)\n\n # TODO(thunderhoser): Make binarization threshold an input argument to this\n # method.\n (binarization_threshold, best_csi\n ) = model_eval.find_best_binarization_threshold(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n threshold_arg=model_eval.THRESHOLD_ARG_FOR_UNIQUE_FORECASTS,\n criterion_function=model_eval.get_csi,\n optimization_direction=model_eval.MAX_OPTIMIZATION_DIRECTION,\n unique_forecast_precision=FORECAST_PRECISION_FOR_THRESHOLDS)\n\n print (\n 'Best binarization threshold = {0:.4f} ... corresponding CSI = {1:.4f}'\n ).format(binarization_threshold, best_csi)\n\n print 'Binarizing forecast probabilities...'\n forecast_labels = model_eval.binarize_forecast_probs(\n forecast_probabilities=forecast_probabilities,\n binarization_threshold=binarization_threshold)\n\n print 'Creating contingency table...'\n contingency_table_as_dict = model_eval.get_contingency_table(\n forecast_labels=forecast_labels, observed_labels=observed_labels)\n print '{0:s}\\n'.format(str(contingency_table_as_dict))\n\n print 'Computing performance metrics...'\n pod = model_eval.get_pod(contingency_table_as_dict)\n pofd = model_eval.get_pofd(contingency_table_as_dict)\n success_ratio = model_eval.get_success_ratio(contingency_table_as_dict)\n focn = model_eval.get_focn(contingency_table_as_dict)\n accuracy = model_eval.get_accuracy(contingency_table_as_dict)\n csi = model_eval.get_csi(contingency_table_as_dict)\n frequency_bias = model_eval.get_frequency_bias(contingency_table_as_dict)\n peirce_score = model_eval.get_peirce_score(contingency_table_as_dict)\n heidke_score = model_eval.get_heidke_score(contingency_table_as_dict)\n\n print (\n 'POD = {0:.4f} ... POFD = {1:.4f} ... success ratio = {2:.4f} ... '\n 'FOCN = {3:.4f} ... accuracy = {4:.4f} ... CSI = {5:.4f} ... frequency '\n 'bias = {6:.4f} ... Peirce score = {7:.4f} ... Heidke score = {8:.4f}\\n'\n ).format(pod, pofd, success_ratio, focn, accuracy, csi, frequency_bias,\n peirce_score, heidke_score)\n\n auc, scikit_learn_auc = _create_roc_curve(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n bss_dict = _create_attributes_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n aupd = _create_performance_diagram(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels, output_dir_name=output_dir_name)\n print '\\n'\n\n evaluation_file_name = '{0:s}/model_evaluation.p'.format(output_dir_name)\n print 'Writing results to: \"{0:s}\"...'.format(evaluation_file_name)\n model_eval.write_results(\n forecast_probabilities=forecast_probabilities,\n observed_labels=observed_labels,\n binarization_threshold=binarization_threshold, pod=pod, pofd=pofd,\n success_ratio=success_ratio, focn=focn, accuracy=accuracy, csi=csi,\n frequency_bias=frequency_bias, peirce_score=peirce_score,\n heidke_score=heidke_score, auc=auc, scikit_learn_auc=scikit_learn_auc,\n aupd=aupd, bss_dict=bss_dict, pickle_file_name=evaluation_file_name)", "def eval_experiment(session, model, result, writer, last_step, max_steps,\n saver, features, eval_set, output_dir, unsupervised,\n num_gpus, **kwargs):\n del kwargs\n\n all_images = []\n all_labels = []\n all_output = []\n all_route = []\n\n total_correct = 0\n total_almost = 0\n\n global_step = last_step\n last_step = None\n\n batch_features = features\n for i in range(len(batch_features)):\n batch_features[i].pop('height', None)\n batch_features[i].pop('depth', None)\n batch_features[i].pop('num_targets', None)\n batch_features[i].pop('num_classes', None)\n batch_features[i].pop('recons_image', None)\n batch_features[i].pop('recons_label', None)\n\n for i in range(max_steps):\n last_step = i\n summary, correct, almost, inferred, export_data = session.run(\n [result.summary, result.correct, result.almost,\n result.inferred, batch_features])\n\n if unsupervised:\n writer.add_summary(summary, last_step)\n\n total_correct += correct\n total_almost += almost\n\n for j in range(num_gpus):\n all_images.append(export_data[j]['images'])\n all_labels.append(export_data[j]['labels'])\n all_output.append(inferred[j].capsule_output)\n all_route.append(inferred[j].route)\n\n # Export Experiment\n export_experiment(session, saver, last_step, global_step, output_dir,\n eval_set, features=all_output, labels=all_labels,\n images=all_images, route=all_route)\n\n if not unsupervised:\n summary = summarise_predictions(summary, max_steps, total_correct,\n total_almost)\n writer.add_summary(summary, last_step)", "def main(args):\n try:\n rec_path = project_path + \"/\" + args.rec\n test_data_path = project_path + \"/\" + args.test\n output_data_path = project_path + \"/\" + args.output\n\n rec = read_csv(rec_path)\n test = read_csv(test_data_path)\n\n accuracy = accuracy_calculator(rec, test)\n # Write to output file\n save_csv(accuracy, output_data_path)\n except Exception as e:\n logger.error(\"Unexpected error occurred when evaluation: \" + str(e))", "def train_and_evaluate(name, model, train, test, evaluation, final_eval, output_dir):\n\n print(\"---\" * 5)\n print(\"Running pipeline for {}\".format(name))\n\n plot_dir = os.path.join(output_dir, \"plots\")\n\n pipeline = make_pipeline(model)\n\n X_train, y_train = train.drop(\n [\"PM10\"], axis=1).values, train[\"PM10\"].values\n X_test, y_test = test.drop([\"PM10\"], axis=1).values, test[\"PM10\"].values\n X_eval, y_eval = evaluation.drop(\n [\"PM10\"], axis=1).values, evaluation[\"PM10\"].values\n X_final, y_final = final_eval.drop(\n [\"PM10\"], axis=1), final_eval[\"PM10\"].values\n\n # first round - fit on train, predict on test\n print(\"Fitting pipeline on train data\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_test, yhat, title=\"{} - Predicted vs. Actual on Test\".format(name), output_dir=plot_dir)\n\n # second round - fit on train + test, predict on evaluation\n X_train = np.concatenate([X_train, X_test])\n y_train = np.concatenate([y_train, y_test])\n print(\"Fitting pipeline on train + test data\")\n pipeline.fit(X_train,y_train)\n yhat = pipeline.predict(X_eval)\n mae = mean_absolute_error(y_eval,yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(y_eval,yhat,title=\"{} - Predicted vs. Actual on Evaluation\".format(name),output_dir=plot_dir)\n\n # final round - fit on last X hours, by which the actual score will be measured\n X_train = np.concatenate([X_train, X_eval])\n y_train = np.concatenate([y_train, y_eval])\n print(\"Fitting pipeline on all \\\"all available data\\\"\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_final)\n mae = mean_absolute_error(y_final, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_final, yhat, title=\"{} - Predicted vs. Actual\".format(name), output_dir=plot_dir)\n\n # save the model\n joblib.dump(model, os.path.join(\n output_dir, \"models\", \"{}.joblib\".format(name)))\n\n return yhat, mae", "def evaluate_model(y_pred, y_true, X_test, y_test, clf, target_names, X_train, y_train, print_scores = False, document=None, fname=None):\n if print_scores:\n ######################################################\n # accuracy\n print(\"Accuracy: \", accuracy_score(y_true, y_pred))\n ###################################################\n # balanced accuracy\n print(\"Balanced accuracy score: \", balanced_accuracy_score(y_true, y_pred))\n #########################\n # cohen_kappa_score\n \"\"\"\n The kappa score is a number between -1 and 1. Scores above .8 are generally considered good agreement; zero or lower means no agreement (practically random labels)\n \"\"\"\n print(\"cohen kappa score: \",cohen_kappa_score(y_true, y_pred), \"above 0.8 is good agreement\")\n ##############################\n # plot confusion matrix\n plot_confusion(clf, X_test, y_test, [\"HD\", \"WT\"])\n ####################################\n # classification report\n\n print(\"classification report: \\n\", classification_report(y_true, y_pred, target_names=target_names))\n #########################################\n # general metrics\n print(\"Precision: \",metrics.precision_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\"))\n\n print(\"Recall:\", metrics.recall_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\"))\n\n print(\"F1:\",metrics.f1_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\"))\n\n print(\"F beta, beta-0.5\", metrics.fbeta_score(y_true, y_pred, beta=0.5,average=\"binary\", pos_label=\"HD\"))\n\n print(\"F beta, beta-1\",metrics.fbeta_score(y_true, y_pred, beta=1,average=\"binary\", pos_label=\"HD\"))\n\n print(\"F beta, beta-2\",metrics.fbeta_score(y_true, y_pred, beta=2,average=\"binary\", pos_label=\"HD\"))\n\n print(\"precision recall fscore support\", metrics.precision_recall_fscore_support(y_true, y_pred, beta=0.5,average=\"binary\", pos_label=\"HD\"))\n\n\n # ROC curve\n y_scores = clf.predict_proba(X_test)[:, 1]\n precision, recall, threshold = precision_recall_curve(y_true, y_scores, pos_label=\"HD\")\n\n\n print(\"Average precision score: \", average_precision_score(y_true, y_scores, pos_label=\"HD\"))\n\n if document is not None:\n if fname is None:\n raise NameError(\"Provide a filename to save this document\")\n document.add_heading(\"Test Metrics\", level=2)\n document.add_paragraph((\"Accuracy: {}\".format(accuracy_score(y_true, y_pred))), style = \"List Bullet\")\n document.add_paragraph((\"Balanced accuracy score: {}\".format(balanced_accuracy_score(y_true, y_pred))), style = \"List Bullet\")\n document.add_paragraph((\"Cohen kappa score: {} \".format(accuracy_score(y_true, y_pred))), style = \"List Bullet\")\n p=document.add_paragraph(\"\", style = \"List Bullet\")\n p.add_run('(The kappa score is a number between -1 and 1. Scores above .8 are generally considered good agreement; zero or lower means no agreement (practically random labels)).').italic = True\n\n\n # confusion matricies\n document.add_heading(\"Confusion Matrices\", level=2)\n\n np.set_printoptions(precision=2)\n\n # Plot confusion matrices\n titles_options = [(\"Confusion matrix, without normalization\", None),\n (\"Normalized confusion matrix\", 'true')]\n for title, normalize in titles_options:\n memfile = io.BytesIO()\n disp = plot_confusion_matrix(clf, X_test, y_test,\n display_labels=[\"HD\", \"WT\"],\n cmap=plt.cm.Blues,\n normalize=normalize)\n disp.ax_.set_title(title)\n\n plt.savefig(memfile)\n document.add_picture(memfile, width=Inches(5))\n memfile.close()\n\n # classification report\n document.add_heading(\"Classification report\", level=2)\n document.add_paragraph(\"{}\".format(classification_report(y_true, y_pred, target_names=target_names)))\n\n # Precision/recall\n document.add_heading(\"Precision/Recall Scores\", level=2)\n document.add_paragraph(\"Precision: {}\".format(metrics.precision_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\")), style= \"List Bullet\")\n document.add_paragraph(\"Recall: {}\".format(metrics.recall_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\")), style= \"List Bullet\")\n document.add_paragraph(\"F1 {}\".format(metrics.f1_score(y_true, y_pred, average=\"binary\", pos_label=\"HD\")), style= \"List Bullet\")\n\n # Decision boundaries plot\n\n document.add_heading(\"Decision Surface of model - training\")\n memfile = io.BytesIO()\n m = clf\n pca_clf = plot_decision_boundaries.DecisionBoundaries(model=m, name=fname).plot(X_train, y_train, memfile)\n plt.savefig(memfile)\n document.add_picture(memfile, width=Inches(5))\n memfile.close()\n\n \"\"\"\n # todo - Krutik, I can't imagine I will have time to finish this section. If you want to create figures to show the test data on the decision surface, i think you need to adjust the dimensions of the plot within plot_decision_boundaries.DecisionBoundaries(), so they are the same as on the first plot, thus, the decision surface will be comparable for both plots \n \n document.add_heading(\"Decision Surface of model - testing\")\n memfile2 = io.BytesIO()\n plot_decision_boundaries.DecisionBoundaries(model=pca_clf, name=fname).test_plot(pca_clf, X_test, y_test, memfile2, X_train, y_train)\n plt.savefig(memfile2)\n document.add_picture(memfile2, width=Inches(5))\n memfile2.close()\n\n \"\"\"\n\n # feature importance\n\n memfile = io.BytesIO()\n y = permutation_based_feature_importance(clf, X_test, y_test, X_train.columns, X_train, y_train, save=True, filename = memfile)\n document.add_picture(memfile, width=Inches(5))\n memfile.close()\n\n document.save(r'../../ML/Classifiers/{}.docx'.format(fname))\n print(\"Saved {}.docx\".format(fname), \"in ../../ML/Classifiers/\")", "def eval(self, test_file_path: str) -> Tuple[List[Dict[str, float]], classification_report]:\n # TODO write code to extract features from test_file_path and \n # test the model\n pass", "def _run_evaluation(\n sess, experiment, eval_config, output_dir, min_range, max_range, num_bins,\n torsion_bins):\n tf.io.gfile.makedirs(os.path.join(output_dir, 'pickle_files'))\n\n logging.info('Eval config is %s\\nnum_bins: %d', eval_config, num_bins)\n num_examples = 0\n num_crops = 0\n start_all_time = time.time()\n\n # Either do the whole test set, or up to a specified limit.\n max_examples = experiment.num_eval_examples\n if eval_config.max_num_examples > 0:\n max_examples = min(max_examples, eval_config.max_num_examples)\n\n while num_examples < max_examples:\n one_prediction = compute_one_prediction(\n num_examples, experiment, sess, eval_config, num_bins, torsion_bins)\n\n single_message = one_prediction.single_message\n num_crops_local = one_prediction.num_crops_local\n sequence = one_prediction.sequence\n filebase = one_prediction.filebase\n softmax_probs = one_prediction.softmax_probs\n ss = one_prediction.ss\n asa = one_prediction.asa\n torsions = one_prediction.torsions\n\n num_examples += 1\n num_crops += num_crops_local\n\n # Save the output files.\n filename = os.path.join(output_dir,\n 'pickle_files', '%s.pickle' % filebase)\n distogram_io.save_distance_histogram(\n filename, softmax_probs, filebase, sequence,\n min_range=min_range, max_range=max_range, num_bins=num_bins)\n\n if experiment.model.torsion_multiplier > 0:\n torsions_dir = os.path.join(output_dir, 'torsions')\n tf.io.gfile.makedirs(torsions_dir)\n distogram_io.save_torsions(torsions_dir, filebase, sequence, torsions)\n\n if experiment.model.secstruct_multiplier > 0:\n ss_dir = os.path.join(output_dir, 'secstruct')\n tf.io.gfile.makedirs(ss_dir)\n secstruct.save_secstructs(ss_dir, filebase, None, sequence, ss)\n\n if experiment.model.asa_multiplier > 0:\n asa_dir = os.path.join(output_dir, 'asa')\n tf.io.gfile.makedirs(asa_dir)\n secstruct.save_secstructs(asa_dir, filebase, None, sequence,\n np.expand_dims(asa, 1), label='Deepmind 2D ASA')\n\n time_spent = time.time() - start_all_time\n logging.info(\n 'Evaluate %d examples, %d crops %.1f crops/ex. '\n 'Took %.1fs, %.3f s/example %.3f crops/s\\n%s',\n num_examples, num_crops, num_crops / float(num_examples), time_spent,\n time_spent / num_examples, num_crops / time_spent, single_message)\n\n logging.info('Tested on %d', num_examples)", "def plot_eval_3(trained_model, X_val, y_val, image_name):\n # FOR EACH CLASS\n # val_pred = trained_model.predict_proba(X_val, num_iteration=iteration)\n \n iterations = trained_model.booster_.current_iteration()\n# results = np.zeros((2, iterations))\n results = np.zeros((iterations,))\n for pos in range(iterations):\n \n # Calculate the current iteration (from 1 to iterations)\n iteration = pos + 1\n \n # Predict validation set for the current iteration\n# start_time = timeit.default_timer()\n val_pred = trained_model.predict(X_val, num_iteration=iteration)\n# end_time = timeit.default_timer()\n# time = end_time - start_time\n# speed = int(X_val.shape[0] / time)\n \n # Number of hits\n val_ok = (val_pred == y_val)\n \n # Percentage of hits\n val_acc = val_ok.sum() / val_ok.size\n \n # Actualize data for plotting results\n# results[0][pos] = time\n# results[1][pos] = val_acc\n results[pos] = val_acc\n \n # Generate accuracy plot\n plt.figure()\n# plt.plot(results[0], results[1], 'b')\n plt.plot(results, 'b')\n plt.title('Validation accuracy')\n plt.xlabel('iterations')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save validation plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_val_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def evaluate(network, loss_function, softmax_function, test_loader, test_set_size):\n running_loss = 0.0\n confusion_matrix = { # Of shape [predicted value][real value]\n 0: {0: 0, 1: 0, 2: 0},\n 1: {0: 0, 1: 0, 2: 0},\n 2: {0: 0, 1: 0, 2: 0},\n }\n batch_size = -1\n network.eval()\n with torch.no_grad():\n correct = 0\n for graph_batch, label_batch in test_loader:\n if batch_size == -1:\n batch_size = label_batch.size(0)\n logits = network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0)\n running_loss += loss_function(logits, label_batch).detach().item()\n predicted_classes = torch.argmax(logits, dim=1).detach()\n correct += (predicted_classes == label_batch).sum().item()\n for predicted_class, label in zip(predicted_classes, label_batch):\n confusion_matrix[predicted_class.item()][label.item()] += 1\n\n if batch_size <= 0:\n print(\"Error : batch size is {}\".format(batch_size))\n exit(1)\n\n return correct / test_set_size, running_loss / len(test_loader), confusion_matrix", "def evaluate(model, iterations, use_cuda=False):\n\n logger.debug(\"Allocating input and target tensors on GPU : %r\", use_cuda)\n\n # create the instance of data loader\n data_loader = DataLoaderMnist(cuda=use_cuda, seed=1, shuffle=False, train_batch_size=64, test_batch_size=100)\n\n model.eval()\n total = 0\n correct = 0\n current_iterations = 0\n\n with torch.no_grad():\n for inputs, labels in data_loader.test_loader:\n inputs, labels = inputs.to(data_loader.device), labels.to(data_loader.device)\n output = model(inputs)\n current_iterations += 1\n _, predicted = torch.max(output.data, dim=1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if iterations is not None:\n if current_iterations >= iterations:\n break\n\n accuracy = correct / total\n return accuracy", "def evaluate(self, data, labels, batch_size=32, max_seq_len=128):\n test_dataloader = setup_dataloader(data, labels, max_seq_len, batch_size)\n accuracy = 0\n \n for batch in tqdm(test_dataloader, desc=\"Iteration\"):\n with torch.no_grad():\n labels = batch[\"labels\"]\n batch = {k: t.to(self.device) for k, t in batch.items() if k != \"labels\"}\n outputs = self.model(**batch)\n logits = outputs[0]\n accuracy += calculate_accuracy(logits, labels)\n \n batch = {k: t.detach().cpu() for k, t in batch.items()}\n del batch\n torch.cuda.empty_cache()\n\n accuracy = accuracy / len(test_dataloader)\n return accuracy", "def learn(self, input_dir: str) -> float:\n if self.is_default:\n LOGGER.error(\"Cannot learn using default model\")\n raise GuesslangError('Cannot learn using default \"readonly\" model')\n\n languages = self.languages\n\n LOGGER.info(\"Extract training data\")\n extensions = [ext for exts in languages.values() for ext in exts]\n files = search_files(input_dir, extensions)\n nb_files = len(files)\n chunk_size = min(int(CHUNK_PROPORTION * nb_files), CHUNK_SIZE)\n\n LOGGER.debug(\"Evaluation files count: %d\", chunk_size)\n LOGGER.debug(\"Training files count: %d\", nb_files - chunk_size)\n batches = _pop_many(files, chunk_size)\n\n LOGGER.debug(\"Prepare evaluation data\")\n evaluation_data = extract_from_files(next(batches), languages)\n LOGGER.debug(\"Evaluation data count: %d\", len(evaluation_data[0]))\n\n accuracy = 0\n total = ceil(nb_files / chunk_size) - 1\n LOGGER.info(\"Start learning\")\n for pos, training_files in enumerate(batches, 1):\n LOGGER.info(\"Step %.2f%%\", 100 * pos / total)\n\n LOGGER.debug(\"Training data extraction\")\n training_data = extract_from_files(training_files, languages)\n LOGGER.debug(\"Training data count: %d\", len(training_data[0]))\n\n steps = int(FITTING_FACTOR * len(training_data[0]) / 100)\n LOGGER.debug(\"Fitting, steps count: %d\", steps)\n self._classifier.fit(input_fn=_to_func(training_data), steps=steps)\n\n LOGGER.debug(\"Evaluation\")\n accuracy = self._classifier.evaluate(\n input_fn=_to_func(evaluation_data), steps=1)['accuracy']\n _comment(accuracy)\n\n return accuracy", "def prediction():\r\n\r\n loaded_model = load_model('imageTrainedModel.h5')\r\n print(loaded_model.summary())\r\n\r\n # retrieve history also:\r\n f = open('history.pckl', 'rb')\r\n history = pickle.load(f)\r\n f.close()\r\n\r\n print(history.keys())\r\n print(history)\r\n\r\n epochs = len(history['loss']) # length of the list stored at 'loss'\r\n # Plot losses for train and validation\r\n plt.figure()\r\n plt.title('Loss as training progresses')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.plot(history['loss'], label='Train Error')\r\n plt.plot(history['val_loss'], label='Val Error')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Plot metrics\r\n plt.plot(history['acc']) # use same metric that was used for training. 'history' is a dictionary.\r\n plt.title('Accuracy as training progresses')\r\n plt.ylabel('Accuracy (%)')\r\n plt.xlabel('Epoch')\r\n ymax = max(history['acc'])\r\n xpos = history['acc'].index(ymax)\r\n xmax = xpos\r\n plt.annotate('Maximum accuracy: %s' % round(ymax, 3),\r\n xy=(xmax, ymax), xycoords='data',\r\n xytext=(0.5, 0.5), textcoords='axes fraction',\r\n fontsize=12)\r\n plt.show()\r\n\r\n # make predictions using x_test\r\n test_y_predictions = loaded_model.predict(x_test, batch_size=None, verbose=1, steps=None)\r\n test_y_predictions = np.around(test_y_predictions, decimals=0) # round to whole integers\r\n true_false_array = np.equal(y_test, test_y_predictions) # test of equality.\r\n true_count = np.sum(true_false_array) # number of correctly categorised images\r\n false_count = true_false_array.shape[0] - true_count # number of images not correctly categorised\r\n\r\n # Plot predicted and actual image categories\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n plt.title('Classification of Image Categories')\r\n plt.ylabel('Number of Images')\r\n plt.xlabel('Image Classification')\r\n label = ['Correct', 'Incorrect']\r\n index = np.arange(len(label))\r\n plt.xticks(index, label, fontsize=10, rotation=0)\r\n ax1.bar(index, [true_count, false_count])\r\n plt.show()", "def evaluate(path_to_config, path_to_model):\n\n config, paths, session_id = setup(path_to_config, 1)\n assert isinstance(config, ExperimentConfig)\n logger = logging.getLogger(\"%s.main\" % config.name)\n\n logger.info(\"Evaluating network on test data\")\n\n network = Network(config, paths, session_id, 0)\n network.build()\n network.evaluate(DATA_TYPE_TEST, model_path=path_to_model)", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def evaluate_nn(sess, batch_size, get_batches_fn, cross_entropy_loss, input_image,\n correct_label, logits, keep_prob, num_classes):\n\n print()\n print(\"Evaluating model...\")\n\n start_time = time.time()\n\n # create confusion matrix\n predictions = tf.argmax(logits, 1)\n labels = tf.argmax(tf.reshape(correct_label, (-1, num_classes)), 1)\n\n confusion_matrix = tf.confusion_matrix(labels=labels, predictions=predictions)\n\n print(\"Start time: {}\".format(time.ctime(start_time)))\n\n sample_count = 0\n\n total_background_recall = 0\n total_road_recall = 0\n\n min_background_recall = 1\n min_road_recall = 1\n\n # for all the images calculate road recall and background recall, then find average values\n for image, label in get_batches_fn(batch_size):\n loss, conf = sess.run([cross_entropy_loss, confusion_matrix],\n feed_dict={input_image: image,\n correct_label: label,\n keep_prob: 1.})\n cur_batch_size = image.shape[0]\n sample_count += cur_batch_size\n\n background_recall = conf[0, 0] / np.sum(conf[0, :])\n road_recall = conf[1, 1] / np.sum(conf[1, :])\n\n total_background_recall += background_recall * cur_batch_size\n total_road_recall += road_recall * cur_batch_size\n\n min_background_recall = min(min_background_recall, background_recall)\n min_road_recall = min(min_road_recall, road_recall)\n\n print(\"Average values: background recall: {:.3f}, road recall: {:.3f}\".\n format(total_background_recall/sample_count, total_road_recall/sample_count))\n print(\"Min values: background recall: {:.3f}, road recall: {:.3f}\".\n format(min_background_recall, min_road_recall))\n print(\"Overall time: {:.3f}\\n\".format(time.time() - start_time))", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def evaluate(model, data):\n n_targets = 0\n n_correct_predictions = 0\n\n # Set the model on evaluatio mode.\n model.eval()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[evaluate] batch accuracy: 0.000',\n leave=False)\n\n # Loop through validation batches.\n for inputs, targets in data:\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n\n # Choose the class with maximum probability.\n _, predictions = torch.max(outputs, 1)\n\n accuracy = (predictions == targets).sum().item() / len(targets)\n progress_bar.update(1)\n progress_bar.set_description(\n '[evaluate] batch accuracy: {accuracy:.3f}'.format(\n accuracy=accuracy))\n\n # Accumulate targets and correct predictions count.\n n_targets += len(targets)\n n_correct_predictions += (predictions == targets).sum().item()\n\n # Close progress bar.\n progress_bar.close()\n\n return n_correct_predictions / n_targets", "def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')", "def eval(self):\n target_truth_labels = self.get_target_labels()\n for key in self.id_uncertainty_measures.keys():\n # deep copy needed as we mutate confidence values later on\n decision_fn_value = np.concatenate((copy.deepcopy(self.id_uncertainty_measures[key]),\n copy.deepcopy(self.ood_uncertainty_measures[key])),\n axis=0)\n # negation needed for confidence, as confidence is indicator of label=0 samples\n # i.e for correct classified samples.\n # But we need scores for label=1 samples i.e misclassified samples\n # to be higher, so we negate.\n if key == UncertaintyMeasuresEnum.CONFIDENCE or key == UncertaintyMeasuresEnum.PRECISION:\n decision_fn_value *= -1.0\n\n aupr, auroc = ClassifierPredictionEvaluator.compute_pr_roc_curves(\n decision_fn_value, target_truth_labels, self.result_dir, key._value_)\n\n with open(os.path.join(self.result_dir, 'results.txt'), 'a') as f:\n f.write('AUPR using ' + key._value_ + \": \" +\n str(np.round(aupr * 100.0, 1)) + '\\n')\n f.write('AUROC using ' + key._value_ + \": \" +\n str(np.round(auroc * 100.0, 1)) + '\\n')", "def evaluate_classifiers(list_of_models, model_names, data_matrix, actual_values, find_features, \\\n output_file, is_distance_matrix=False):\n test_sets = ml.get_test_sets(actual_values, 10)\n predictions, timers = ml.get_cross_validation_results(list_of_models, model_names, data_matrix, \\\n actual_values, test_sets, find_features, is_distance_matrix)\n ml_eval.evaluate_classification_results(model_names, predictions, list(set(actual_values)), timers, output_file)\n return predictions", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def evaluate_detections(self, all_boxes, output_dir=None):\n raise NotImplementedError", "def _evaluate_model(model, val_loader, criterion, epoch, num_epochs, writer, current_lr, log_every=20):\n\n # Set to eval mode\n model.eval()\n\n y_probs = []\n y_gt = []\n losses = []\n\n for i, (images, label) in enumerate(val_loader):\n\n if torch.cuda.is_available():\n images = [image.cuda() for image in images]\n label = label.cuda()\n\n output = model(images)\n\n loss = criterion(output, label)\n\n loss_value = loss.item()\n losses.append(loss_value)\n\n probas = torch.sigmoid(output)\n\n y_gt.append(int(label.item()))\n y_probs.append(probas.item())\n\n try:\n auc = metrics.roc_auc_score(y_gt, y_probs)\n except:\n auc = 0.5\n\n writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)\n writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)\n\n if (i % log_every == 0) & (i > 0):\n print('''[Epoch: {0} / {1} | Batch : {2} / {3} ]| Avg Val Loss {4} | Val AUC : {5} | lr : {6}'''.\n format(\n epoch + 1,\n num_epochs,\n i,\n len(val_loader),\n np.round(np.mean(losses), 4),\n np.round(auc, 4),\n current_lr\n )\n )\n\n writer.add_scalar('Val/AUC_epoch', auc, epoch + i)\n\n val_loss_epoch = np.round(np.mean(losses), 4)\n val_auc_epoch = np.round(auc, 4)\n\n return val_loss_epoch, val_auc_epoch", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate(net, dev, batcher): \n def accuracy(outputs, labels):\n correct = 0\n total = 0\n misclassified = []\n for (i, output) in enumerate(outputs):\n total += 1\n if labels[i] == output.argmax():\n correct += 1 \n return correct, total, misclassified\n val_loader = batcher(dev, 128)\n total_val_loss = 0\n correct = 0\n total = 0\n misclassified = []\n loss = torch.nn.CrossEntropyLoss() \n for data in val_loader:\n inputs = data[:,1:]\n labels = torch.clamp(data[:,0], min=0).long()\n\n val_outputs = net(inputs) \n val_loss_size = loss(val_outputs, labels)\n\n correct_inc, total_inc, misclassified_inc = accuracy(val_outputs, \n labels)\n correct += correct_inc\n total += total_inc\n misclassified += misclassified_inc\n total_val_loss += val_loss_size.data.item()\n return correct/total, misclassified", "def run(self, config, **kwargs):\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n experiment_path = self.train(config, **kwargs)\n evaluation_logger = utils.getfile_outlogger(\n Path(experiment_path, 'evaluation.log'))\n for testdata, testlabel in zip(config_parameters['testdata'],\n config_parameters['testlabel']):\n evaluation_logger.info(\n f'Evaluting {testdata} with {testlabel} in {experiment_path}')\n # Scores for later evaluation\n scores_file = Path(experiment_path,\n 'scores_' + Path(testdata).stem + '.tsv')\n evaluation_result_file = Path(\n experiment_path) / 'evaluation_{}.txt'.format(\n Path(testdata).stem)\n self.score(experiment_path,\n result_file=scores_file,\n label=testlabel,\n data=testdata)\n self.evaluate_eer(scores_file,\n ground_truth_file=testlabel,\n evaluation_res_file=evaluation_result_file)" ]
[ "0.7298459", "0.6623584", "0.6488723", "0.6457983", "0.6437241", "0.6381797", "0.6377442", "0.6340089", "0.6337451", "0.63357455", "0.63341504", "0.6327298", "0.6297221", "0.62892634", "0.62823796", "0.62763405", "0.6274694", "0.6269606", "0.6268752", "0.6253955", "0.6247242", "0.62461805", "0.6244833", "0.62428856", "0.6241179", "0.62390554", "0.6230034", "0.62136745", "0.6198779", "0.6189779", "0.6177726", "0.6168928", "0.6159611", "0.6158887", "0.615714", "0.61473006", "0.614728", "0.6146219", "0.6143336", "0.6135524", "0.6131384", "0.61284053", "0.6127831", "0.6126591", "0.61230296", "0.6121124", "0.6106213", "0.60952526", "0.6091589", "0.60898244", "0.6071381", "0.6060525", "0.6059573", "0.60482347", "0.603458", "0.60341275", "0.6031671", "0.60243654", "0.6021665", "0.6016936", "0.60129", "0.6011866", "0.6008761", "0.60084", "0.6003886", "0.5998187", "0.5995987", "0.5989816", "0.5981645", "0.59785837", "0.59770495", "0.59750545", "0.5973867", "0.5969159", "0.59643906", "0.5961157", "0.5959398", "0.59591264", "0.595862", "0.59553856", "0.59493554", "0.59452677", "0.5932916", "0.59327614", "0.59278786", "0.59271663", "0.5927047", "0.59264493", "0.59262705", "0.5925751", "0.5925034", "0.5922497", "0.5922065", "0.5921601", "0.59178436", "0.59111077", "0.5903343", "0.5901621", "0.58941126", "0.5891944" ]
0.65427065
2
custom function to remove the stopwords
def remove_stopwords(self,text): return " ".join([word for word in str(text).split() if word not in self.STOPWORDS])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)", "def remove_stopwords(text):\n stopwords = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\", \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\", \"he\", \"him\", \"his\", \"himself\", \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\", \"itself\", \"they\", \"them\", \"their\", \"theirs\", \"themselves\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"these\", \"those\", \"am\", \"is\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \"have\", \"has\", \"had\", \"having\", \"do\", \"does\", \"did\", \"doing\", \"a\", \"an\", \"the\", \"and\", \"but\", \"if\", \"or\", \"because\", \"as\", \"until\", \"while\", \"of\", \"at\", \"by\", \"for\", \"with\", \"about\", \"against\", \"between\", \"into\", \"through\", \"during\", \"before\", \"after\", \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\", \"over\", \"under\", \"again\", \"further\", \"then\", \"once\", \"here\", \"there\", \"when\", \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"most\", \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\", \"own\", \"same\", \"so\", \"than\", \"too\", \"very\", \"s\", \"t\", \"can\", \"will\", \"just\", \"don\", \"should\", \"now\"]\n return \" \".join([word for word in text.split() if word not in stopwords])", "def remove_stopwords(words):\n new_words = []\n for word in words:\n # print(word)\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\r\n new_words = []\r\n for word in words:\r\n if word not in stopwords.words('english'):\r\n new_words.append(word)\r\n return new_words", "def _remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def removeStopwords(self, words):\n\t\twordList = [w.strip() for w in words.split(' ')]\n\t\trtnWords = []\n\t\tfor word in wordList:\n\t\t\tif word.lower() not in self._stopwords:\n\t\t\t\trtnWords.append(word)\n\t\treturn \" \".join(rtnWords)", "def remove_stopwords(text):\n text = \" \"+text\n text = text.upper()\n for word in STOP_WORDS:\n text = text.replace(word.upper(),\" \")\n return text", "def remove_stopwords(self, text):\n stopwords_list = stopwords.words('english')\n whitelist = [\"n't\", \"not\", \"no\"]\n words = text.split()\n clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]\n return \" \".join(clean_words)", "def remove_stopwords(data):\n stop_words = stopwords.words('english')\n words = word_tokenize(str(data))\n new = \"\"\n for word in words:\n if word not in stop_words and len(word) > 1:\n new = new + \" \" + word\n return new", "def remove_stop_words(document):\n\n stop_words = stopwords.words(\"english\")\n stop_words = set(stop_words + EXTENDED_STOP_WORDS)\n return [token for token in document if token not in stop_words]", "def _remove_stopwords(self, words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def _remove_stopwords(self, doc: str):\n processed_tweet = [word for word in doc.split() if\n word not in stopwords.words('spanish') and\n len(word) > 1]\n return ' '.join(processed_tweet)", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopword_list:\n new_words.append(word)\n return new_words", "def remove_stopwords(text):\n return \" \".join([word for word in str(text).split() if word not in STOPWORDS])", "def stopword_removal(words):\n stops = set(stopwords.words('english'))\n words = [w for w in words if w not in stops]\n return words", "def remove_stopwords(words: list) -> list:\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('spanish'):\n new_words.append(word)\n return new_words", "def remove_stopwords(word: str) -> str:\n stop_words = stopwords.words('english')\n if not word in stop_words:\n return word\n else:\n return '0'", "def remove_stopwords(words):\n removed_stopwords = []\n for word in words:\n if word not in stopwords.words('english'):\n removed_stopwords.append(word)\n return removed_stopwords", "def remove_stopwords(text):\n tokens = word_tokenize(text)\n filtered = [word for word in tokens if word not in stop_words]\n filtered = ' '.join(filtered)\n return filtered", "def remove_stopwords(tweet, stopwords):\n\n\timport re\n\n\tstopwords_set = set(stopwords)\n\tsplit_tweet = [word for word in re.split('\\W+', tweet) if word \\\n\t\t\t\t\t\t\t\t\t\t\t not in stopwords_set]\n\treturn ' '.join(split_tweet)", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in nltk.corpus.stopwords.words('french'):\n new_words.append(word)\n return new_words", "def remove_stop_words(tweet):\n tokens_without_sw = \"\"\n for word in tweet.split():\n if not word.lower() in STOPWORDS:\n tokens_without_sw += word.lower() + \" \"\n return tokens_without_sw", "def clean_stopwords(text):\n tokens = tokenize(text)\n tokens = stopwordsRem(tokens)\n return tokens", "def remove_stopwords(document):\n return list(filter(lambda x: x not in BasicNL.words, document))", "def remove_stopwords(text):\r\n text_split=text.split()\r\n text_split=[word for word in text_split if word not in stopwords.words('spanish')]\r\n return text_split", "def _remove_stopwords(self, text: str) -> str:\n pattern = r\"\"\"\n (?x) # Set flag to allow verbose regexps\n \\w+(?:-\\w+)* # Words with optional internal hyphens \n | \\s* # Any space\n | [][!\"#$%&'*+,-./:;<=>?@\\\\^():_`{|}~] # Any symbol \n \"\"\"\n symbol = \" \"\n return \"\".join(\n t if t not in self.stop_words else symbol for t in re.findall(pattern, text)\n )", "def remove_stopwords(setence):\n sent = setence.lower().strip()\n stopwords = load_stopwords(stopword_path)\n tokens = tokenizer.tokenize(sent)\n tokens_filter_stopwords = [word for word in tokens if word not in stopwords]\n string = \" \".join(word for word in tokens_filter_stopwords)\n return string", "def removeStopword(text, stop_words=stop_words):\n word_tokens = word_tokenize(text)\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n return ' '.join(filtered_sentence)", "def removeStopwords(self, words):\n\t\tnewWords = []\n\t\tfor word in words:\n\t\t\tif word not in stopwords.words('english'):\n\t\t\t\tnewWords.append(word)\n\t\treturn newWords", "def remove_stopwords(tokens):\n\n return [t for t in tokens if t not in stopwords.words('english')]", "def removeCustomStopwords(self, words, stopWords):\n\t\tremoved = [word for word in words if word not in stopWords]\t\t\n\t\treturn removed", "def remove_stop_words(text):\n return ' '.join(\n [word for word in text.split(' ') if word not in final_stop_words])", "def remove_stopwords(lista,stopwords):\n lista_out = list()\n for idx, text in enumerate(lista):\n text = ' '.join([word for word in text.split() if word not in stopwords])\n text = text.strip()\n lista_out.append(text)\n #print(\"Len original: {} - Len processed stopwords: {}\".format(len(lista),len(lista_out)))\n return lista_out", "def remove_stopwords(ingredient, stopwords):\n ingredient = ingredient.lower() # normalizes to lower case\n no_stops = [gram for gram in ingredient.split(\" \") if gram not in stopwords]\n new_ingredient = \" \".join(no_stops)\n return new_ingredient", "def remove_stopwords(words):\n stopwords = nltk.corpus.stopwords.words('english')\n return [w for w in words if w not in stopwords]", "def stopwordsRem(tokens):\n no_sw = [t for t in tokens if not t in stopwords.words('english')]\n return no_sw", "def removeStopWords(input_str, rm_words=[]): \n filtered_msg = []\n #check if string, and split on spaces\n if isinstance(input_str, basestring):\n input_str = tokenize(input_str)\n #check each word against nltk stopwords and specified input list\n for word in input_str:\n if word.lower() not in stopwords.words('english') and word.lower() not in rm_words:\n filtered_msg.append(word)\n return \" \".join(filtered_msg)", "def rm_stop_words(self, words):\n return [word for word in words if word.lower() not in self.stopwords]", "def remove_stopwords(string):\n swords = set(stopwords.words(\"english\"))\n return \" \".join([w for w in word_tokenize(string) if w not in swords])", "def _remove_stopwords(data, settings):\n column = settings['input_col']\n output_col = settings['output_col']\n frag = settings['id_frag']\n\n stopwords = settings['news_stops_words']\n stopwords += settings['stopwords']\n stopwords = np.unique(stopwords)\n\n tmp = []\n if data.shape[0] > 0:\n if settings['case_sensitive']:\n stopwords = set(stopwords)\n for tokens in data[column].values:\n tmp.append(list(set(tokens).difference(stopwords)))\n\n else:\n stopwords = set([tok.lower() for tok in stopwords])\n\n for tokens in data[column].values:\n entry = [tok.lower() for tok in tokens]\n tmp.append(list(set(entry).difference(stopwords)))\n\n else:\n tmp = np.nan\n\n if output_col in data.columns:\n data.drop([output_col], axis=1, inplace=True)\n\n data[output_col] = tmp\n\n info = generate_info(data, frag)\n return data, info", "def stopword_filter(words):\n new_words = []\n for w in words:\n if w in stopwords.words(\"german\"): continue\n else: new_words += [w]\n return new_words", "def remove_stopwords(data: pd.Series) -> pd.Series:\n pattern = r'\\b(?:{})\\b'.format('|'.join(stopwords.words('english')))\n return data.str.replace(pattern, '')", "def removestopwords(query):\n wordlist = [word for word in query.split() if word not in stopwords.words('english')]\n return \" \".join(wordlist)", "def remove_stop_words(self):\n self.word_list = [word for word in self.word_list if len(word) > 1 and word not in STOP_WORDS] #The len(word) check is here because there's still one piece of white space I haven't pinned down in each file. I haven't taken the time to figure out a quick way to look at all the whitespace characters yet, but none of the ones I included takes care of that one lonely space. Will keep on it.\n self.word_list.sort()", "def remove_stopwords(tokens):\n stop_words = set(stopwords.words('english'))\n\n tokens = [w for w in tokens if w not in stop_words]\n\n return tokens", "def remove_stop_words(self, query):\n ans=\"\"\n words= query.split()\n for word in words:\n if word.lower() not in self.stop_words:\n ans+=word+\" \"\n return ans", "def remove_stopwords_set(sentence: str, stop_words: Collection[str]) -> str:\n return \" \".join([w for w in word_tokenize(sentence) if w not in stop_words])", "def removeStopWords(self, text=None, sort=True, lc=False):\n\n\t\tif type(text) == type(str()):\n\t\t\ttext = text.split()\n\n\t\ttextWithStopWords = text\n\t\ttextWithoutStopWords = list()\n\n\t\tif sort:\n\t\t\ttextWithStopWords = sorted(textWithStopWords)\n\n\t\tappend = textWithoutStopWords.append\n\t\tlower = str.lower\n\n\t\t# Loop through all the words in the text\n\t\tfor word in textWithStopWords:\n\n\t\t\t# If the word is not a stop word, add it to textWithoutStopWords\n\t\t\tif lower(word) not in self.stop_words:\n\t\t\t\tif lc==True:\n\t\t\t\t\tappend(lower(word))\n\t\t\t\telse:\n\t\t\t\t\tappend(word)\n\n\t\treturn textWithoutStopWords", "def stopwords_removal(text_vector):\n\n text_vector = [\n i for i in text_vector if i not in stopwords.words('english')]\n return text_vector", "def remove_stopwords(text: Iterable[str]) -> Generator[str, None, None]:\n stop_words = set(stopwords.words(\"english\"))\n return (\n word\n for word in text\n if word not in stop_words\n )", "def removeStopWords(self,phrase):\n if(\"stopWord\" in self._classes):\n return self._stopWord.removeStopWord(phrase)", "def stopword_removal_from_taggedwords(tagged_words):\n stops = set(stopwords.words('english'))\n tagged_words = [w for w in tagged_words if not w[0] in stops]\n return tagged_words", "def remove_stop_words(text_tokens):\n\n return [words for words in text_tokens if words not in stop_words]", "def remove_stopwords(texts):\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]", "def stop_words_remover(tokenized_sent):\n # Convert string back to list\n\n filtered_sentence = []\n stop_words = set(stopwords.words(\"english\"))\n for word in literal_eval(tokenized_sent):\n if word not in stop_words:\n filtered_sentence.append(word)\n return filtered_sentence", "def removeOwnStopWords(self, sort=True, lc=False):\n\t\tself.textFile = self.removeStopWords(text=self.textFile, sort=sort, lc=lc)", "def remove_stopwords(tokens):\n stopword_list = make_stopwords()\n no_stop_tokens = [token for token in tokens if token not in stopword_list]\n return no_stop_tokens", "def stop_words():\n return get_stop_words('es') + get_stop_words('ca') + get_stop_words('en')", "def stopWord_removal(list_of_words):\n curated_list = [w for w in list_of_words if not w in STOP_WORDS]\n return curated_list", "def remove_stopwords(text, is_lower_case=False):\n tokens = tokenizer.tokenize(text)\n tokens = [token.strip() for token in tokens]\n if is_lower_case:\n filtered_tokens = [token for token in tokens if token not in stopword_list]\n else:\n filtered_tokens = [token for token in tokens if token.lower() not in stopword_list]\n filtered_text = ' '.join(filtered_tokens)\n return filtered_text", "def test_stopwords():\n assert TextNormalizer().transform([[\"a b\"]])[\"corpus\"][0] == [\"b\"]", "def remove_stop_words(tokens, language):\n if not remove_stop_words_activated or language.__eq__(\"Catalan\"):\n return tokens\n output = []\n stop = stopwords.words(language.lower())\n for token in tokens:\n if token not in stop:\n output.append(token)\n return output", "def remove_stop_words(self, content):\n stop_words = set(stopwords.words('english'))\n words = nltk.word_tokenize(content)\n words = [word.lower() for word in words if word.isalpha()]\n return [w for w in words if not w in stop_words]", "def remove_stop(text):\n STOPWORDS = ['a' ,'b', 'c','d','e','f' ,'g' ,'h','i','j','k','l','m','n','o' ,'p' ,'q','r','s','t','u' ,'v' ,'w','x','y','z']\n return \" \".join([word for word in str(text).split() if word not in STOPWORDS])", "def remove_stop_words(stop_list, tokens):\n return [t for t in tokens if len(t) > 2 and not t in stop_list]", "def article_stop_words_removal(article,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n stop_words = set(stopwords.words('english'))\n article_words = []\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word, preprocess_type)\n if preprocessed_word not in stop_words:\n article_words.append(preprocessed_word)\n return article_words", "def stopWord(text):\r\n\r\n\ttext = removePunc(text) # memanggil fungsi untuk menghapus tanda baca\r\n\t\r\n\tfile = open('TP2-stopword.txt', 'r')\t\t\t\t # membuka file stopword\r\n\tstopWord = file.read().split()\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghapus stopword dalam teks\r\n\t\tif word in stopWord:\r\n\t\t\ttext.remove(word)\r\n\r\n\tfile.close()\r\n\r\n\treturn text", "def stop_words_remover(df):\n stop_words = stop_words_dict['stopwords']\n\n df['Without Stop Words'] = [' '.join([w for w in x.lower().split()\n if w not in stop_words])\n for x in df['Tweets'].tolist()\n ]\n result = []\n l1 = df['Without Stop Words']\n for tweet in l1:\n result.append(tweet.split(' '))\n df['Without Stop Words'] = result\n return df", "def stop_words_remover(df):\n \n df['Without Stop Words'] = df['Tweets'].apply(str.lower).apply(str.split)\n\n for i in range(len(twitter_df)):\n df['Without Stop Words'][i] = [x for x in df['Without Stop Words'][i] if x not in stop_words_dict['stopwords']]\n return df\n pass", "def removeStopWords(self, words):\n line = []\n for w in words:\n if w not in self.stop_words:\n line.append(w)\n return line", "def stop_words_remover(df):\n\n # dictionary of english stopwords\n stop_words_dict = {\n 'stopwords':[\n 'where', 'done', 'if', 'before', 'll', 'very', 'keep', 'something', 'nothing', 'thereupon', \n 'may', 'why', '’s', 'therefore', 'you', 'with', 'towards', 'make', 'really', 'few', 'former', \n 'during', 'mine', 'do', 'would', 'of', 'off', 'six', 'yourself', 'becoming', 'through', \n 'seeming', 'hence', 'us', 'anywhere', 'regarding', 'whole', 'down', 'seem', 'whereas', 'to', \n 'their', 'various', 'thereafter', '‘d', 'above', 'put', 'sometime', 'moreover', 'whoever', 'although', \n 'at', 'four', 'each', 'among', 'whatever', 'any', 'anyhow', 'herein', 'become', 'last', 'between', 'still', \n 'was', 'almost', 'twelve', 'used', 'who', 'go', 'not', 'enough', 'well', '’ve', 'might', 'see', 'whose', \n 'everywhere', 'yourselves', 'across', 'myself', 'further', 'did', 'then', 'is', 'except', 'up', 'take', \n 'became', 'however', 'many', 'thence', 'onto', '‘m', 'my', 'own', 'must', 'wherein', 'elsewhere', 'behind', \n 'becomes', 'alone', 'due', 'being', 'neither', 'a', 'over', 'beside', 'fifteen', 'meanwhile', 'upon', 'next', \n 'forty', 'what', 'less', 'and', 'please', 'toward', 'about', 'below', 'hereafter', 'whether', 'yet', 'nor', \n 'against', 'whereupon', 'top', 'first', 'three', 'show', 'per', 'five', 'two', 'ourselves', 'whenever', \n 'get', 'thereby', 'noone', 'had', 'now', 'everyone', 'everything', 'nowhere', 'ca', 'though', 'least', \n 'so', 'both', 'otherwise', 'whereby', 'unless', 'somewhere', 'give', 'formerly', '’d', 'under', \n 'while', 'empty', 'doing', 'besides', 'thus', 'this', 'anyone', 'its', 'after', 'bottom', 'call', \n 'n’t', 'name', 'even', 'eleven', 'by', 'from', 'when', 'or', 'anyway', 'how', 'the', 'all', \n 'much', 'another', 'since', 'hundred', 'serious', '‘ve', 'ever', 'out', 'full', 'themselves', \n 'been', 'in', \"'d\", 'wherever', 'part', 'someone', 'therein', 'can', 'seemed', 'hereby', 'others', \n \"'s\", \"'re\", 'most', 'one', \"n't\", 'into', 'some', 'will', 'these', 'twenty', 'here', 'as', 'nobody', \n 'also', 'along', 'than', 'anything', 'he', 'there', 'does', 'we', '’ll', 'latterly', 'are', 'ten', \n 'hers', 'should', 'they', '‘s', 'either', 'am', 'be', 'perhaps', '’re', 'only', 'namely', 'sixty', \n 'made', \"'m\", 'always', 'those', 'have', 'again', 'her', 'once', 'ours', 'herself', 'else', 'has', 'nine', \n 'more', 'sometimes', 'your', 'yours', 'that', 'around', 'his', 'indeed', 'mostly', 'cannot', '‘ll', 'too', \n 'seems', '’m', 'himself', 'latter', 'whither', 'amount', 'other', 'nevertheless', 'whom', 'for', 'somehow', \n 'beforehand', 'just', 'an', 'beyond', 'amongst', 'none', \"'ve\", 'say', 'via', 'but', 'often', 're', 'our', \n 'because', 'rather', 'using', 'without', 'throughout', 'on', 'she', 'never', 'eight', 'no', 'hereupon', \n 'them', 'whereafter', 'quite', 'which', 'move', 'thru', 'until', 'afterwards', 'fifty', 'i', 'itself', 'n‘t',\n 'him', 'could', 'front', 'within', '‘re', 'back', 'such', 'already', 'several', 'side', 'whence', 'me', \n 'same', 'were', 'it', 'every', 'third', 'together'\n ]\n }\n \n # Create 'Without Stop Words' column: Mikael\n df['Without Stop Words'] = df['Tweets'].str.lower().str.split()\n\n # Extract stop words from 'Without Stop Words' column: Monica\n for row in df['Without Stop Words']:\n for word in row:\n #find stop word in stop word dictionary\n for stop_word in stop_words_dict['stopwords']:\n if word == stop_word:\n #remove stop word from current row\n row.remove(word)\n \n #loop again in case dictionary missed a word\n for row in df['Without Stop Words']:\n for word in row:\n #find stop word in stop word dictionary\n for stop_word in stop_words_dict['stopwords']:\n if word == stop_word:\n #remove stop word from current row\n row.remove(word)\n return df", "def remove_stop_words(tokenized_word_list):\n stop_words = set(nltk.corpus.stopwords.words(\"english\"))\n filtered_tokens = [word for word in tokenized_word_list if word not in stop_words]\n return filtered_tokens", "def filter_stop_words(self, word_list):\n punctuation = list(string.punctuation)\n file = open(\"stopwords.txt\")\n stopwords = []\n strippables = string.punctuation + string.whitespace\n for line in file:\n stopwords.append(line.strip(strippables))\n stopwords.extend(punctuation)\n\n terms_without_stop = [word for word in word_list if word not in stopwords]\n\n return terms_without_stop", "def cut_words(self, doc):\n return [word for word in jieba.cut(doc) if not word in self.stopwords]", "def remove_stop_words(dataset):\n for n in range(len(dataset)):\n try:\n # concatenate the title and keywords\n current_title = dataset.iloc[n][\"Title of Post\"]\n current_description = dataset.iloc[n][\"Post Description\"]\n\n token_title = word_tokenize(current_title)\n token_description = word_tokenize(current_description)\n filtered_title = []\n filtered_description = []\n\n for word in token_description:\n if word not in stop_words:\n filtered_description.append(word)\n\n filtered_description = listToString(filtered_description)\n\n for word in token_title:\n if word not in stop_words:\n filtered_title.append(word)\n\n filtered_title = listToString(filtered_title)\n\n dataset.iloc[n][\"Title of Post\"] = filtered_title\n dataset.iloc[n][\"Post Description\"] = filtered_description\n\n except:\n pass\n\n return dataset", "def test_stopwords_custom():\n normalizer = TextNormalizer(stopwords=False, custom_stopwords=[\"b\"])\n assert normalizer.transform([[\"a b\"]])[\"corpus\"][0] == [\"a\"]", "def remove_stopwords(self, value):\n with open(\"grandpy/stop_words.json\", encoding=\"utf-8\") as json_file:\n stopwords = json.load(json_file)\n key_words = [word for word in value if word not in stopwords[\"stop_words\"]]\n return key_words", "def getTerms(s): \n cleaned = [clean.sub('', t.lower()) for t in s.split()]\n return [t for t in cleaned if t not in stop_words]", "def filter_stop_words(self, content, stop_words):\n content = re.sub(r\"[^\\w\\s]\", \"\", content)\n content = re.sub(r\"[0-9]+\", \"\", content)\n new_sent = [\n Word(word).singularize()\n for word in content.lower().split()\n if Word(word).singularize() not in stop_words\n ]\n new_cont = \" \".join(new_sent)\n return new_cont", "def remove_stopwords(text, use_stopwords = None, df = True, exclude_number = True):\n from nltk.corpus import stopwords\n from nltk.tokenize import word_tokenize\n \n if use_stopwords is None:\n use_stopwords = set(stopwords.words(\"english\"))\n \n if df:\n new_text = word_tokenize(text)\n if exclude_number:\n new_text = [word for word in new_text if not word.isnumeric()]\n new_text = \" \".join([word for word in new_text if word not in use_stopwords])\n else:\n new_text = \"\"\n for word in text:\n if word not in use_stopwords:\n new_text += word + \" \"\n\n return new_text", "def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word", "def remove_stopwords(text: str, basic_stopwords: Set[str] = None, additional_stopwords=True) -> str:\n if basic_stopwords is None:\n basic_stopwords = _BASIC_STOPWORDS\n\n _stopwords = basic_stopwords\n\n if additional_stopwords:\n with open(_ADDITIONAL_STOPWORDS_PATH) as f:\n additional_stopwords = set(line.strip() for line in f)\n _stopwords = _stopwords.union(additional_stopwords)\n\n return ' '.join(word for word in text.split()\n if word not in _stopwords)", "def remove_stopwords(dataset_path: str) -> str:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n if \"train\" in dataset_path:\n dtypes[\"target\"] = int\n\n def _rm_stopwords(tokens: List[str]):\n return [w for w in tokens\n if w not in nltk.corpus.stopwords.words('english')]\n\n new_path = _make_new_filepath(dataset_path, \"nostopwords\")\n df = pd.read_csv(\n f\"/data/{dataset_path}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df[\"tokens\"] = df[\"tokens\"].apply(_rm_stopwords)\n df.to_csv(f\"/data/{new_path}\")\n return new_path", "def remove_punc_sw(self, docs):\n \n new_docs = []\n \n for text in docs:\n \n for p in punc:\n text = text.replace(p,' ')\n text = text.replace('-', '')\n text = text.replace(\"’\", ' ')\n text = text.lower()\n tokens = word_tokenize(text)\n filtered_tokens = list(filter(lambda token: token not in stopwords, tokens))\n \n new_text = \" \".join(filtered_tokens)\n new_docs.append(new_text)\n \n return pd.Series(new_docs)", "def remove_stopwords(self, *args):\n if self.remove_stopwords is False:\n raise Exception(\"Error - enable stopword removal functionality\")\n if type(args) != list:\n raise Exception(\"Error - expected a list\")\n if args == []:\n raise Exception(\"Error - no items to remove from stopword list\")\n for arg in args:\n if arg in self.stopword_list:\n self.stopword_list.remove(arg)\n else:\n raise Exception(arg+\" not in list\")", "def delete_words(self, words=None):\n\n if words is None:\n words = self.stopwords\n\n self.__corpora = [\n sub(r' ({0}) '.format('|'.join(words)), ' ', string) for string in self.__corpora\n ]", "def df_remove_stopwords(df, col_name):\n stopwords = getStopWords()\n\n # Create column 'stopwords_removed' on df\n df['stopwords_removed'] = list(map(lambda doc: [word for word in doc if word not in stopwords], df[col_name]))", "def filter_stopwords(tagged_records):\r\n print('Filtering stopwords')\r\n stop_words = list(stopwords.words('english'))\r\n stop_words.extend(string.punctuation)\r\n stop_words.extend(constants.CONTRACTIONS)\r\n stop_words.extend(constants.MYSQL_STOPWORDS)\r\n dictionary_words = set(nltk_words.words())\r\n\r\n def not_dictionary_word(word): \r\n return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']\r\n\r\n filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]\r\n filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]\r\n filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)\r\n for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608\r\n filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))\r\n for record in filtered_records]\r\n return filtered_records", "def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\tif load_stopwords():\r\n\t\treturn\r\n\r\n\tglobal stopwords\r\n\tstopwords = nltk.corpus.stopwords.words('english')\r\n\tfor f in os.listdir(paths.path_data_stopwords):\r\n\t\tpath_stopwords = paths.path_data_stopwords + '/' + f\r\n\t\twith open(path_stopwords,'r') as f:\r\n\t\t\tfor l in f:\r\n\t\t\t\tw = l.strip()\r\n\t\t\t\tw = re.sub(r\"[\\x80-\\xff]\",\" \",w)\r\n\t\t\t\tif (w not in stopwords):\r\n\t\t\t\t\tstopwords.append(w)\r\n\t\r\n\t# wip improve with POS and remove numbers\r\n\twith open(paths.path_data_stopwords_txt,'w') as outf:\r\n\t\toutf.write('\\n'.join(stopwords))\r\n\t\r\n\tprint('\\nstopword count : ' + str(len(stopwords)))", "def clean_stopwords_lemmatize(text):\n tokens = clean_stopwords(text)\n tokens = lemmatize_tokens(tokens)\n # count = Counter(tokens)\n # c = count.most_common(15)\n # b = [str(i[0]) for i in c]\n # keywords = [t for t in tokens if t in b]\n news = ['ESPN', 'espn', 'foxsports', 'fox', 'cnn', 'yahoo', '•', '-', '●']\n keywords = [k for k in tokens if not k in news]\n return keywords", "def handle_stop_words(self,\n text: str,\n stop_words: Set[str]\n ) -> Union[str, List[str]]:\n if not self.tokenise:\n return ' '.join(\n w for w in word_tokenize(text) if w not in stop_words\n )\n return [w for w in text if w not in stop_words]", "def preprocess(text,stopwords):\r\n result=[]\r\n for token in gensim.utils.simple_preprocess(text) :\r\n if token not in stopwords and len(token) > 3:\r\n result.append(token)\r\n \r\n return result", "def text_process(mess):\n nopunc= [char for char in mess if char not in string.punctuation]\n nopunc=''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english') and len(word)>2]", "def remove_stop_words(self, document_tokens=None, sentences=None):\n if sentences is not None or (\n sentences is not None and document_tokens is not None):\n sentences_ = []\n for sentence in sentences:\n sentences_.append(\n [word for word in sentence if word not in self.stop_words])\n return sentences_\n elif document_tokens is not None:\n return [word for word in document_tokens if\n word not in self.stop_words]\n else:\n er_msg = 'Wrong parameters for this methods'\n logging.error(er_msg)\n raise Exception(er_msg)", "def remove_stop_words(raw_corpus, doc_freq=0.75):\n vectorizer = TfidfVectorizer()\n vectors = vectorizer.fit_transform([doc.lower() for doc in raw_corpus])\n feature_names = vectorizer.get_feature_names()\n dense = vectors.todense()\n denselist = dense.tolist()\n words_tfidf = pd.DataFrame(denselist, columns=feature_names)\n\n new_stopwords = dict.fromkeys(feature_names, 0)\n for (word, data) in words_tfidf.iteritems():\n for num in data.values:\n if num > 0:\n new_stopwords[word] +=1\n\n new_sw = []\n for word, count in new_stopwords.items():\n if count > doc_freq*len(raw_corpus):\n new_sw.append(word)\n stopw = stopwords.words('english')\n stopw = [*stopw, *new_sw]\n text_nostop = []\n for doc in raw_corpus:\n doc_bag = make_bag(doc, stopw)\n text_nostop.append(\" \".join(doc_bag))\n return(text_nostop)" ]
[ "0.8823565", "0.8401623", "0.8289207", "0.8228253", "0.82197374", "0.8181915", "0.8181915", "0.8181915", "0.8181915", "0.8164255", "0.81541234", "0.81183577", "0.8115647", "0.81121063", "0.8105464", "0.8080783", "0.8072988", "0.806302", "0.80382043", "0.80332667", "0.7997704", "0.7997644", "0.79901284", "0.79758364", "0.79099524", "0.78871113", "0.78837425", "0.7874739", "0.7868765", "0.7868395", "0.78669", "0.7864404", "0.7863408", "0.7857749", "0.7854658", "0.7835874", "0.7827151", "0.78064954", "0.778062", "0.7779177", "0.7750752", "0.7743046", "0.7681973", "0.767216", "0.7665923", "0.7662385", "0.76598006", "0.7656556", "0.7655705", "0.76536524", "0.7653102", "0.76422143", "0.763834", "0.76277715", "0.7616752", "0.761085", "0.7607839", "0.7590062", "0.75861734", "0.75657505", "0.7556644", "0.7534908", "0.7505213", "0.7498205", "0.74875826", "0.74775696", "0.7461338", "0.7441604", "0.7410538", "0.7396186", "0.73659545", "0.7365883", "0.7359356", "0.7355837", "0.73439145", "0.73021126", "0.72976047", "0.7275817", "0.7274874", "0.7252782", "0.7244812", "0.7242535", "0.7177161", "0.7164191", "0.7158421", "0.71580106", "0.7117153", "0.70908827", "0.7080324", "0.70250905", "0.70210224", "0.7014622", "0.6976864", "0.69614077", "0.6945028", "0.6891079", "0.68859565", "0.6881987", "0.6876031", "0.68746215" ]
0.8248563
3
Default b64_encode adds padding, jwt spec removes padding
def base64url_encode(msg): encoded_input = base64.urlsafe_b64encode(to_bytes(msg)) stripped_input = to_bytes(to_string(encoded_input).replace('=', '')) return stripped_input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def b64_json_enc(data):\n json_str = json.dumps(data)\n return base64.b64encode(json_str.encode()).decode()", "def encode_payload(payload):\n jwt_secret = app.config['SECRET_KEY']\n # expiry = 60 * 60 * 24 * 100 # 100 days\n # payload['exp'] = datetime.datetime.utcnow() + datetime.timedelta(seconds=expiry)\n encoded_jwt = jwt.encode(payload, jwt_secret, algorithm='HS256')\n\n return (encoded_jwt).decode()", "def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, '=')", "def encode(data):\n return jwt.encode(data, app.config[\"JWT_SECRET\"], algorithm=\"HS256\")", "def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")", "def b64_encode(value: bytes) -> bytes:\n return base64.urlsafe_b64encode(value).strip(b\"=\")", "def encode_auth_token(user_id: int, user_name:str, user_login:str, perfil_nome:str) -> bytes:\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'uid': user_id,\n 'name': user_name,\n 'login': user_login,\n 'perfil': perfil_nome,\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def b64encode(s: str) -> str:\n return base64.b64encode(s.encode()).decode()", "def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'uid': userdata['uid'],\n 'pwd':userdata['pwd'],\n 'role': userdata['role']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def base64_encode(data):\n return base64.encodestring(data);", "def base64(s):\n return b64encode(s,'[]').replace('=','_')", "def encode(self, payload):\n jwt_payload = payload.copy()\n if self.audience is not None:\n jwt_payload['aud'] = self.audience\n if self.issuer is not None:\n jwt_payload['iss'] = self.issuer\n\n token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)\n if isinstance(token, bytes):\n # For PyJWT <= 1.7.1\n return token.decode('utf-8')\n # For PyJWT >= 2.0.0a1\n return token", "def encryptB64(self, key, value):\n return base64.b64encode(self.encrypt(key, value))", "def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'username': userdata['username'],\n 'password':userdata['password']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def b64_string(input_string):\n return b64encode(input_string.encode(\"utf-8\")).decode(\"utf-8\")", "async def encode(self, payload: dict) -> str:\n delta_seconds = self.duration\n jwt_data = {\n **payload,\n \"exp\": datetime.utcnow() + timedelta(seconds=delta_seconds),\n }\n\n jwt_token = jwt.encode(jwt_data, self.jwt_secret, self.jwt_algorithm)\n token = jwt_token.decode(\"utf-8\")\n\n return token", "def _b64(b):\n return base64.urlsafe_b64encode(b).decode('utf8').replace(\"=\", \"\")", "def _encode(dictionary):\n # Strip ugly base64 padding.\n byteStr = bytearray(json.dumps(dictionary).encode())\n encodedStr = base64.urlsafe_b64encode(byteStr)\n return encodedStr.rstrip('='.encode())", "def test_pad_b64(self):\n test1 = {\"value\": b\"any carnal pleasure.\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4=\"}\n test2 = {\"value\": b\"any carnal pleasure\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ==\"}\n test3 = {\"value\": b\"any carnal pleasur\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\"}\n\n for test in [test1, test2, test3]:\n padded = oidc._pad_b64(test[\"unpadded\"])\n self.assertEqual(test[\"padded\"], padded)\n value = base64.b64decode(padded)\n self.assertEqual(test[\"value\"], value)", "def b64encode(value, *args, **kwargs):\n return base64.b64encode(encode(value, *args, **kwargs).encode('ascii'))", "def base64_string(self) -> global___Expression:", "def jws_b64encode(source):\n return urlsafe_b64encode(source).decode('ascii').rstrip('=')", "def encode_auth_token(self, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=0),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_auth_token(user_data, config):\n ttl_days = config.get('JWT_TTL_DAYS', 0)\n ttl_seconds = config.get('JWT_TTL_SECONDS', 0)\n secret_key = config['JWT_SECRET_KEY']\n\n now = dt.datetime.utcnow()\n try:\n payload = {\n 'exp': now + dt.timedelta(days=ttl_days, seconds=ttl_seconds),\n 'iat': now,\n 'sub': user_data\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception:\n raise", "def base64encode(self, item: str) -> bytes:\n b = self[item]\n b = b if isinstance(b, bytes) else b.encode()\n return base64.b64encode(b)", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def _encode_and_sign(self, dict_payload, encoding=\"ascii\"):\n payload_bytes = json.dumps(dict_payload).encode(encoding)\n b64 = base64.b64encode(payload_bytes)\n creds = self._api_credentials\n secret_bytes = creds.api_secret.encode(encoding)\n signature = hmac.new(secret_bytes, b64, sha384).hexdigest()\n return b64, signature", "def base64Encode(input, addNewlines = False):\n base64Str = base64.b64encode(input)\n if not type(base64Str) is str:\n base64Str = \"\".join(map(chr, base64Str))\n \n if not addNewlines:\n return base64Str\n\n result = \"\"\n i = 0\n while i < len(base64Str):\n result += base64Str[i:i + 64] + \"\\n\"\n i += 64\n return result", "def b64_of_bytes(data: bytes) -> str:\n return base64.b64encode(data).decode()", "def encode_auth_token(user_id):\n rfexp = datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5)\n exp = int(time.time()+600)\n try:\n payload = {\n 'exp': exp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n RFpayload = {\n 'exp': rfexp,\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n ), jwt.encode(\n RFpayload,\n key,\n algorithm='HS512'\n )\n except Exception as e:\n return e", "def _create_token(self, payload, key):\n return jwt.encode(payload, key, algorithm='RS256')", "def encode(uuid_):\n return base64.urlsafe_b64encode(uuid_.bytes)[:-2] # Drop '==' padding", "def encode_auth_token(secret_key, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def _encode_base64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9\\+]\"\n regex = re.compile(pattern)\n while True:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad chars\n data = data + \" \"\n\n return estring", "def _hash_encoder(data: bytes) -> str:\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode('ascii')", "def serialize(obj):\n result = base64.urlsafe_b64encode(obj)\n # this workaround is needed because in case of python 3 the\n # urlsafe_b64encode method returns string of 'bytes' class.\n result = result.decode()\n return result", "def base64_encode(text):\n if not isinstance(text, (bytes, bytearray)):\n text = bytes(text.encode())\n encode = base64.b64encode(text)\n return encode.decode('ascii')", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def encode_transaction(value):\n\n return base64.b64encode(json.dumps(value).encode('utf8')).decode('utf8')", "def encoded_jwt(private_key, user):\n kid = JWT_KEYPAIR_FILES.keys()[0]\n scopes = ['openid']\n return generate_signed_access_token(\n kid, private_key, user, 3600, scopes, forced_exp_time=None)", "def _encode_encrypted_part(self, value):\n\n return encoding_utils.bytes_to_base64(value)", "def fn_base64(self, value):\n if isinstance(value, str):\n value = value.encode()\n return base64.b64encode(value).decode()", "def encode_token(userId):\n token = jwt.encode({'userId': userId, 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=20)},\n secret_key).decode('utf-8')\n return token", "def test_kms_encrypt_returns_b64(self):\n encrypted_secret = ef_utils.kms_encrypt(self.mock_kms, self.service, self.env, self.secret)\n b64_return = base64.b64encode(self.bytes_return)\n self.assertEqual(b64_return, encrypted_secret)", "def encode(text: str) -> str:\n b: bytes = text.encode()\n encoded: bytes = base64.b64encode(b)\n return encoded.decode()", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def encode_auth_token(self,user_id): \n try: \n exp = datetime.utcnow() + timedelta(days=1)\n \n payload = {\n 'exp': exp, \n 'iat': datetime.utcnow(), \n 'sub': user_id\n }\n \n encoded_auth_token = jwt.encode(\n payload, \n getattr(settings, \"SECRET_KEY\",\"\"),\n algorithm='HS256'\n )\n return encoded_auth_token\n except Exception as e: \n print_exception(e)\n return e", "def craft(b64_msg, nkey=key):\n byte_msg = base64.b64decode(b64_msg)\n pad = 8-(len(byte_msg)%8)\n byte_msg += b\"\\x00\"*pad\n enc_msg = encrypt_faces(byte_msg)\n hm = craft_hmac(enc_msg)\n payload = enc_msg+hm\n return base64.b64encode(payload).decode()", "def get_prep_value(self, value):\n if value is not None:\n return 'base64:' + base64.encodestring(json.dumps(value))", "def _encode_partitial_parameter(data):\n return base64.b64encode(data.encode(\"utf-8\")).decode()", "def encode(msg: str) -> str:\n\n msg_bytes = msg.encode(\"ascii\")\n b64_bytes = base64.b64encode(msg_bytes)\n return b64_bytes.decode(\"ascii\")", "def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str", "def build_jwt(payload: dict) -> str:\n if 'sub' not in payload.keys():\n raise ValueError('sub not in payload keys')\n jwt_fields = {\n 'iss': JWT_DOMAIN,\n 'sub': None,\n 'iat': datetime.datetime.utcnow(),\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=JWT_EXPIRATION_MINUTES),\n **payload\n }\n return jwt.encode(jwt_fields, key=SECRET_KEY, json_encoder=JSONDataEncoder).decode(encoding='UTF-8')", "def base64encode(self, value):\n\n return value.encode(\"base64\")[:-1].replace(\"\\n\", \"\")", "def encode_auth_token(user_id, email):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=100, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'sub': email + ' ' + str(user_id)\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def base64_encode_string(string):\n # type: (str or bytes) -> str\n if on_python2():\n return base64.b64encode(string)\n else:\n return str(base64.b64encode(string), 'ascii')", "def encode_u_id(u_id):\n return jwt.encode({\n \"u_id\": u_id,\n \"datetime\": json_time_translator.datetime_to_json(datetime.utcnow())\n }, '1$Arh\"1bWa/7+OS', algorithm='HS256').decode('utf-8')", "def hasher(cls, data, base_64_encode=False):\n salt_key = cls.base_configuration().get_section_option('app', 'key')\n salt_key = base64.b64decode(salt_key)\n result = hashpw(data.encode('utf-8'), salt_key)\n return base64.b64encode(result) if base_64_encode else result", "def encode_data( key, data ):\n text = json.to_json_string(data)\n text = base64.urlsafe_b64encode(text)\n digest = hmac.new(key, text).hexdigest()\n return text, digest", "def base64_encode_array(inArray):\n return base64.b64encode(inArray)", "def encode(self):\n if not self.verify():\n return None\n\n try:\n s = json.dumps(self.auth_dict)\n return encode(APP.config['SECRET_KEY'], s)\n except Exception as err:\n LOGGER.error('Error encoding auth: %s' % str(err))\n raise err", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def base64ify(bytes_or_str):\n if isinstance(bytes_or_str, str):\n input_bytes = bytes_or_str.encode(\"utf8\")\n else:\n input_bytes = bytes_or_str\n\n output_bytes = base64.urlsafe_b64encode(input_bytes)\n return output_bytes.decode(\"ascii\")", "def _encode_key(self, key):\n return key.encode() if isinstance(key, str) else key", "def b64encode(s, altchars=None):\n if not isinstance(s, bytes_types):\n raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__)\n # Strip off the trailing newline\n encoded = binascii.b2a_base64(s)[:-1]\n if altchars is not None:\n if not isinstance(altchars, bytes_types):\n raise TypeError(\"expected bytes, not %s\"\n % altchars.__class__.__name__)\n assert len(altchars) == 2, repr(altchars)\n return encoded.translate(bytes.maketrans(b'+/', altchars))\n return encoded", "def _encode_2xbase64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9]\"\n regex = re.compile(pattern)\n while True:\n # First run\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n # Second run\n ebytes = base64.b64encode(estring.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad/special chars\n data = data + \" \"\n\n return estring", "def serialize_key(key: str) -> bytes:\n return key.encode(\"utf-8\")", "def generate_token(self):\n\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=45),\n 'iat': datetime.utcnow(),\n 'sub': self.id\n }\n # create the byte string token using the payload and the SECRET key\n jwt_string = jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n return jwt_string\n\n except Exception as exception:\n # return an error in string format if an exception occurs\n return str(exception)", "def encode_bin_as_b64(s):\n if isinstance(s, dict):\n ret = {}\n for k,v in s.items():\n ret[k] = encode_bin_as_b64(v)\n return ret\n elif isinstance(s, list) or isinstance(s, tuple):\n ret = []\n for v in s:\n ret.append(encode_bin_as_b64(v))\n return ret\n elif isinstance(s, bytes):\n return '\\0{}'.format(b64encode(s).decode('ascii'))\n elif isinstance(s, Enum):\n return encode_bin_as_b64(s.value)\n else:\n return s", "def encode_data(data):\n bytes = json.dumps(data).encode('utf-8').encode('base64').replace('\\n', '')\n assert len(bytes) < 250 * 1024\n return bytes", "def itob64(n):\n c = hex(n)\n c = c[2:-1] if c[-1] == 'L' else c[2:]\n if len(c)%2:\n c = '0'+c\n x = base64.urlsafe_b64encode(c.decode('hex'))\n return re.sub(r'=*$','',x)", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def gen_sig(key, data):\n signature = hmac.new(key.encode('utf-8'), data.encode('utf-8'), hashlib.sha1)\n\n sig = signature.digest()\n # base64 encode\n b64 = base64.b64encode( sig)\n # url encode\n return b64", "def make_id_jwt(sub, tenant=None):\n payload = {\"sub\": sub}\n if tenant is not None:\n payload[\"mender.tenant\"] = tenant\n payload = json.dumps(payload)\n payloadb64 = b64encode(payload.encode(\"utf-8\"))\n return \"bogus_header.\" + payloadb64.decode() + \".bogus_sign\"", "def get_encoded_token(expiration_from_now_s=600):\n token_payload = {'exp': int(time.time() + expiration_from_now_s)}\n token_payload.update(TOKEN_PAYLOAD_TEMPLATE)\n\n token = jwt.encode(token_payload, 'secret', algorithm='HS256')\n\n return token, token_payload", "def encode_payload(payload, key):\n encoded_payload = b''\n for b in payload:\n encoded_payload += bytes([b ^ key])\n\n return encoded_payload", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'STULOGINID': self.STULOGINID, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def rc4_encode(data, key, encode=base64.b64encode, salt_length=16):\n salt = ''\n for n in range(salt_length):\n salt += chr(random.randrange(256))\n data = salt + crypt(data, sha1(key + salt).digest())\n if encode:\n data = encode(data)\n return data", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def encode(\n self,\n key: KeyInterface,\n payload: Union[bytes, str, dict],\n footer: Union[bytes, str, dict] = b\"\",\n implicit_assertion: Union[bytes, str] = b\"\",\n nonce: bytes = b\"\",\n serializer: Any = json,\n exp: int = 0,\n ) -> bytes:\n\n if not isinstance(payload, (bytes, str, dict)):\n raise ValueError(\"payload should be bytes, str or dict.\")\n\n res: Union[bytes, str]\n bp: bytes\n if isinstance(payload, dict):\n if not serializer:\n raise ValueError(\"serializer should be specified for the payload object.\")\n try:\n if not callable(serializer.dumps):\n raise ValueError(\"serializer should have dumps().\")\n except AttributeError:\n raise ValueError(\"serializer should have dumps().\")\n except Exception:\n raise\n try:\n payload = self._set_registered_claims(payload, exp)\n res = serializer.dumps(payload)\n bp = res if isinstance(res, bytes) else res.encode(\"utf-8\")\n except Exception as err:\n raise ValueError(\"Failed to serialize the payload.\") from err\n else:\n bp = payload if isinstance(payload, bytes) else payload.encode(\"utf-8\")\n\n bf: bytes\n if isinstance(footer, dict):\n if not serializer:\n raise ValueError(\"serializer should be specified for the footer object.\")\n try:\n if not callable(serializer.dumps):\n raise ValueError(\"serializer should have dumps().\")\n except AttributeError:\n raise ValueError(\"serializer should have dumps().\")\n except Exception:\n raise\n try:\n res = serializer.dumps(footer)\n bf = res if isinstance(res, bytes) else res.encode(\"utf-8\")\n except Exception as err:\n raise ValueError(\"Failed to serialize the footer.\") from err\n else:\n bf = footer if isinstance(footer, bytes) else footer.encode(\"utf-8\")\n\n bi = implicit_assertion if isinstance(implicit_assertion, bytes) else implicit_assertion.encode(\"utf-8\")\n\n if key.purpose == \"local\":\n return key.encrypt(bp, bf, bi, nonce)\n\n sig = key.sign(bp, bf, bi)\n token = key.header + base64url_encode(bp + sig)\n if bf:\n token += b\".\" + base64url_encode(bf)\n return token", "def base64sign(plaintext, private_key):\n shahash = SHA256.new(plaintext.encode('utf8'))\n signer = PKCS1_v1_5.new(private_key)\n signature_bytes = signer.sign(shahash)\n return base64.b64encode(signature_bytes)", "def padding_encryption():\n return padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )", "def portable_b64encode(thing):\n if is_py3():\n try:\n some_bits = bytes(thing, 'utf-8')\n except TypeError:\n some_bits = thing\n\n return polite_string(b64encode(some_bits).decode('utf-8'))\n\n return polite_string(b64encode(thing))", "def generate_token(self, user_id):\n try:\n # set up a payload with an expiration time\n payload = {\n 'exp': datetime.utcnow() + timedelta(minutes=10),\n 'iat': datetime.utcnow(),\n 'sub': user_id\n }\n # create the byte string encoded token using payload and SECRET key\n jwt_string = jwt.encode(\n payload,\n SECRET_KEY,\n algorithm='HS256'\n )\n return jwt_string\n except Exception as e:\n # return an error in string format if an exception occurs\n return str(e)", "def generate_signed_token(self, extra_payload: dict[str, Any]) -> str:\n jwt_dict = {\n \"aud\": self._audience,\n \"iat\": datetime.utcnow(),\n \"nbf\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(seconds=self._expiration_time_in_seconds),\n }\n jwt_dict.update(extra_payload)\n token = jwt.encode(\n jwt_dict,\n self._secret_key,\n algorithm=self._algorithm,\n )\n return token", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def adobe_base64_encode(cls, to_encode):\n if isinstance(to_encode, unicode):\n to_encode = to_encode.encode(\"utf8\")\n encoded = base64.encodestring(to_encode)\n return encoded.replace(b\"+\", b\":\").replace(b\"/\", b\";\").replace(b\"=\", b\"@\").strip()", "def generate_auth_token(self, expires_in=600):\n return jwt.encode(\n {'loginid': self.loginid, 'exp': time.time() + expires_in},\n app.config['SECRET_KEY'], algorithm='HS256')", "def base64_pub_encode(self, key):\n (y, g, p, q) = (str(key.y), str(key.g), str(key.p), str(key.q))\n return base64.b64encode((y + \",\" + g + \",\" + p + \",\" + q).encode('utf-8')).decode('utf-8')", "def encrypt_data(self, params):\n from django.core.signing import dumps\n return dumps(params, salt=self.salt_namespace)", "def encrypted(data: str) -> str:\n return b64encode(data.encode('ascii')).decode('ascii')", "def encode_password(jid, password):\n n = int(jid[:2], 16)\n key = jid[2+n:]\n encoded = encrypt(key, password)\n \n b64 = ''.join(map(chr, encoded)).encode('base64')\n return b64.strip() # Remove /n", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def sign_id_token(payload):\n signing_key = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(TESTING_JWT_KEYSET))\n return jwt_encode(\n payload,\n signing_key,\n algorithm=\"RS256\",\n headers={\"kid\": TESTING_JWT_KEYSET[\"kid\"]},\n )" ]
[ "0.71889275", "0.71519893", "0.7149377", "0.71148413", "0.699144", "0.6853686", "0.683355", "0.68118036", "0.6764185", "0.6760017", "0.6730485", "0.672144", "0.66806155", "0.6677432", "0.66706085", "0.6628703", "0.6622796", "0.66124743", "0.65830743", "0.6532523", "0.6522085", "0.6513241", "0.6488795", "0.64706904", "0.6450388", "0.6407059", "0.6394041", "0.63921285", "0.6376198", "0.6352702", "0.63524175", "0.63474566", "0.6301868", "0.6298957", "0.6292815", "0.6258164", "0.6257126", "0.62398475", "0.62143797", "0.6199298", "0.6193429", "0.6190623", "0.6181456", "0.6171819", "0.6171411", "0.61662513", "0.61620915", "0.6152964", "0.615076", "0.61492956", "0.6142038", "0.61382025", "0.61361396", "0.6120371", "0.6108008", "0.6106966", "0.60943687", "0.60857195", "0.6029634", "0.60296196", "0.6011109", "0.60083896", "0.59853077", "0.59830725", "0.59817344", "0.5979269", "0.5979269", "0.5975488", "0.59623027", "0.5959973", "0.5959071", "0.59370244", "0.59318984", "0.5928332", "0.59117204", "0.5911377", "0.5909517", "0.59044135", "0.5897065", "0.5892379", "0.5888581", "0.58821905", "0.58778197", "0.58638704", "0.5858643", "0.5856758", "0.5856682", "0.58535486", "0.5846103", "0.5837155", "0.58359504", "0.5826567", "0.5813529", "0.58114445", "0.5809519", "0.5789265", "0.5778473", "0.5774109", "0.57697284", "0.5766717" ]
0.59093267
77
JWT spec doesn't allow padding characters. base64url_encode removes them, base64url_decode, adds them back in before trying to base64 decode the message
def base64url_decode(msg): bmsg = to_bytes(msg) pad = len(bmsg) % 4 if pad > 0: bmsg += b'=' * (4 - pad) return base64.urlsafe_b64decode(bmsg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base64url_encode(msg):\n encoded_input = base64.urlsafe_b64encode(to_bytes(msg))\n stripped_input = to_bytes(to_string(encoded_input).replace('=', ''))\n return stripped_input", "def _url_base64_encode(msg):\r\n msg_base64 = base64.b64encode(msg)\r\n msg_base64 = msg_base64.replace('+', '-')\r\n msg_base64 = msg_base64.replace('=', '_')\r\n msg_base64 = msg_base64.replace('/', '~')\r\n return msg_base64", "def base64_string(self) -> global___Expression:", "def base64(s):\n return b64encode(s,'[]').replace('=','_')", "def decode_payload(encoded_payload):\n jwt_secret = app.config['SECRET_KEY']\n payload = jwt.decode(encoded_payload, jwt_secret, algorithms='HS256')\n\n return payload", "def b64_string(input_string):\n return b64encode(input_string.encode(\"utf-8\")).decode(\"utf-8\")", "def de_base64(msg):\n try:\n msg_ascii = msg.encode('ascii')\n msg_bytes = base64.b64decode(msg_ascii)\n msg_decoded = msg_bytes.decode('ascii')\n return msg_decoded\n except:\n print('Invalid base64-encoded string')", "def encode_payload(payload):\n jwt_secret = app.config['SECRET_KEY']\n # expiry = 60 * 60 * 24 * 100 # 100 days\n # payload['exp'] = datetime.datetime.utcnow() + datetime.timedelta(seconds=expiry)\n encoded_jwt = jwt.encode(payload, jwt_secret, algorithm='HS256')\n\n return (encoded_jwt).decode()", "def decode_base64(in_str):\n return base64.decodestring(in_str)", "def decode_base64(in_str):\n import base64\n return base64.decodestring(in_str)", "def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")", "def decode_base64(data):\n missing_padding = 4 - len(data) % 4\n if missing_padding:\n data += b'='* missing_padding\n return base64.decodestring(data)", "def decode(b64_msg: str) -> str:\n\n b64_bytes = b64_msg.encode(\"ascii\")\n b64_bytes = base64.b64decode(b64_bytes)\n return b64_bytes.decode(\"ascii\")", "def craft(b64_msg, nkey=key):\n byte_msg = base64.b64decode(b64_msg)\n pad = 8-(len(byte_msg)%8)\n byte_msg += b\"\\x00\"*pad\n enc_msg = encrypt_faces(byte_msg)\n hm = craft_hmac(enc_msg)\n payload = enc_msg+hm\n return base64.b64encode(payload).decode()", "def _b64(b):\n return base64.urlsafe_b64encode(b).decode('utf8').replace(\"=\", \"\")", "def decode_base64(data):\n missing_padding = len(data) % 4\n if missing_padding != 0:\n data += b'='* (4 - missing_padding)\n return base64.decodebytes(data)", "def encode(msg: str) -> str:\n\n msg_bytes = msg.encode(\"ascii\")\n b64_bytes = base64.b64encode(msg_bytes)\n return b64_bytes.decode(\"ascii\")", "def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")", "def test_decode_IQ_token(self):\n\n token = \"\"\"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1laWQiOiJhZGZzfHNodzAwMXNhaW50ZWxzZXdoZXJlfGpwX2FkbWluQHVybjphdXRoMDpzYWludGVsc2V3aGVyZSIsImVtYWlsIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2QGdtYWlsLmNvbSIsInVuaXF1ZV9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvYWR1c2VyZ3VpZCI6IjMveFFhZ0VrSWttcllBU0VQZHVZRmc9PSIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvZmlyc3RuYW1lIjoiQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2xhc3RuYW1lIjoiVGVzdCIsImh0dHBzOi8vdGVsZXRyYWNraW5nLmNsb3VkYXBwLm5ldC9pZGVudGl0eS9jbGFpbXMvb3VuYW1lIjoiU2FpbnRFbHNld2hlcmUiLCJyb2xlIjpbIkRvbWFpbiBVc2VycyIsIkFkbWluaXN0cmF0b3IiLCJJUUdlbkhvc3BTZWMiLCJTYWludEVsc2V3aGVyZSJdLCJ1cG4iOiJKYWltaW4uUGF0ZWw4Mys1MTY0NTZAZ21haWwuY29tIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2lkZW50aXRpZXMvZGVmYXVsdC9wcm92aWRlciI6ImFkZnMiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vaWRlbnRpdGllcy9kZWZhdWx0L2Nvbm5lY3Rpb24iOiJzaHcwMDFzYWludGVsc2V3aGVyZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9pZGVudGl0aWVzL2RlZmF1bHQvaXNTb2NpYWwiOiJmYWxzZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9naXZlbl9uYW1lIjoiSVFHRU5IT1NQXFxiXy1kcHl4eDBFeVVjR0pIaG1aOCIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9waWN0dXJlIjoiaHR0cHM6Ly9zLmdyYXZhdGFyLmNvbS9hdmF0YXIvMzUxYTRiMjU4NWViM2UyYjA1NWI4ZTAyOGY4NzdmNDc_cz00ODBcdTAwMjZyPXBnXHUwMDI2ZD1odHRwcyUzQSUyRiUyRmNkbi5hdXRoMC5jb20lMkZhdmF0YXJzJTJGaXEucG5nIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL25pY2tuYW1lIjoiSmFpbWluLlBhdGVsODMrNTE2NDU2IiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL2VtYWlsX3ZlcmlmaWVkIjoidHJ1ZSIsImh0dHA6Ly9zY2hlbWFzLmF1dGgwLmNvbS9jbGllbnRJRCI6Imtrakgxd3AzdE53RmpEN0M1djI3a0oyWHFWUHE1akhtIiwiaHR0cDovL3NjaGVtYXMuYXV0aDAuY29tL3VwZGF0ZWRfYXQiOiJNb24gSmFuIDE0IDIwMTkgMTU6NTY6MTIgR01UKzAwMDAgKFVUQykiLCJodHRwOi8vc2NoZW1hcy5hdXRoMC5jb20vY3JlYXRlZF9hdCI6IkZyaSBKYW4gMTEgMjAxOSAyMDoxNToyMiBHTVQrMDAwMCAoVVRDKSIsImF1dGhtZXRob2QiOiJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL3dzLzIwMDgvMDYvaWRlbnRpdHkvYXV0aGVudGljYXRpb25tZXRob2QvcGFzc3dvcmQiLCJhdXRoX3RpbWUiOiIyMDE5LTAxLTE0VDIzOjU2OjEyLjg1M1oiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3RlbmFudGlkIjoiMjExNmU5NDMtNTA5NC00MWY3LTgzMTgtODNhYWMyYWMxMTQ3IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy9jb250ZXh0cGVyc29uaWQiOiIwYTAxMjBhMS04NTU3LTQ4MzEtYTQyNi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1lZm9ybWFsIjoiVGVzdCwgQWRtaW4iLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL3VzZXJuYW1laW5mb3JtYWwiOiJBZG1pbiBUZXN0IiwiaHR0cHM6Ly90ZWxldHJhY2tpbmcuY2xvdWRhcHAubmV0L2lkZW50aXR5L2NsYWltcy91c2VySWQiOiI0ZmU5OTdmZC00ZGNkLTQxNWItYjJjYi1hOGJkMDBmNjFkYzkiLCJodHRwczovL3RlbGV0cmFja2luZy5jbG91ZGFwcC5uZXQvaWRlbnRpdHkvY2xhaW1zL2ZlYXR1cmV0eXBlaWQiOlsiNCIsIjIiLCIxIiwiMyIsIjUiLCI2Il0sImlzcyI6InRlbGV0cmFja2luZy5jb20iLCJhdWQiOiJodHRwOi8vd3d3LnNlcnZpY2UudGVsZXRyYWNraW5nLmNvbS8iLCJleHAiOjE1NTAwNzM0MzksIm5iZiI6MTU0NzQ4MTQzOX0.UCL-Wc3OSVDI58U5ShOYqLa-DwNc_WQ3BlY5P3CfnVI\"\"\"\n audience = 'http://www.service.teletracking.com/'\n\n secret = 'drMemxWrLen6fCXQA5jO6gXkK/UoZVzPGRDiff7ByPU='\n decoded_token = AuthenticationHandler.validate_and_decode_token(\n token=token, key=secret,\n audience=audience\n )\n self.assertTrue(decoded_token['role'][0] == 'Domain Users', \"Group 1 not match\")\n self.assertTrue(decoded_token['role'][1] == 'Administrator', \"Group 2 not match\")", "def jwt_encode_handler(payload):\n\n return jwt.encode(\n payload,\n api_settings.JWT_SECRET_KEY,\n api_settings.JWT_ALGORITHM\n ).decode('utf-8')", "def test_decode_base64_string(base64_input, expected_output):\n base64_encoded_string = decode_base64_string(base64_input)\n assert base64_encoded_string == expected_output", "def decodeBase64(data):\n missing_padding = 4 - len(data)%4\n if missing_padding:\n data += b'='* missing_padding\n\n return base64.decodestring(data)", "def parse_base64(payload, sanity=True, decode=False):\n decoded = b64decode(payload + '====')\n value = str(b64encode(decoded), 'utf8')\n\n if not payload[-2:].count('=') == value[-2:].count('='):\n value = value.rstrip('=') + '=' * payload[-2:].count('=')\n\n if sanity:\n assert value == payload\n\n if decode:\n return decoded\n\n return value", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "def decode(encoded_token):\n return jwt.decode(encoded_token, key=settings.JWT_AUTH['JWT_SECRET_KEY'])", "def b64_decode(value: bytes) -> bytes:\n pad = b\"=\" * (-len(value) % 4)\n return base64.urlsafe_b64decode(value + pad)", "def base64decode(self, value):\n\n return value.decode(\"base64\")", "def jws_b64encode(source):\n return urlsafe_b64encode(source).decode('ascii').rstrip('=')", "def base64_decode(s):\n log = logging.getLogger()\n s = str(s).strip()\n try:\n return base64.b64decode(s)\n except TypeError:\n padding = len(s) % 4\n if padding == 1:\n log.error(\"Invalid base64 string: {}\".format(s))\n return ''\n elif padding == 2:\n s += b'=='\n elif padding == 3:\n s += b'='\n return base64.b64decode(s)", "def bytes_to_base64url(val: bytes) -> str:\n return urlsafe_b64encode(val).decode(\"utf-8\").rstrip(\"=\")", "def _encode_base64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9\\+]\"\n regex = re.compile(pattern)\n while True:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad chars\n data = data + \" \"\n\n return estring", "def hash_b64decode(self,s):\n\n payload = s.encode('ascii') # get base64 bytes\n payload = base64.b64decode(payload) # get raw bytes\n payload = payload.decode('ascii') # get string from bytes\n\n if self.test_string(payload):\n return payload\n else: \n return ''", "def encode(data):\n return jwt.encode(data, app.config[\"JWT_SECRET\"], algorithm=\"HS256\")", "def fn_base64(self, value):\n if isinstance(value, str):\n value = value.encode()\n return base64.b64encode(value).decode()", "def _decode_encrypted_part(self, value):\n\n return encoding_utils.base64_to_bytes(value)", "def test_encode_decode_token(create_user):\n user = create_user\n user_data = {\n \"email\": user.email,\n \"username\": user.username\n }\n jwt = JWTAuthentication()\n # encode token\n encoded_token = jwt.generate_token(user_data)\n assert type(encoded_token) is str # test encoding is 'utf-8'\n # decode token\n user_details = jwt.decode_token(encoded_token)\n assert user_details['userdata'] == user_data # test token details", "def convert_to_base64(str):\n str_bytes = str.encode(\"utf-8\")\n str_bytes_base64 = base64.b64encode(str_bytes)\n str_base64 = str_bytes_base64.decode(\"utf-8\")\n return str_base64", "def b64_json_dec(encoded):\n json_str = base64.b64decode(encoded).decode()\n return json.loads(json_str)", "def decode_base64(self, s):\n return self.transcode(struct.unpack('!L', base64.b64decode(s + '==', self.extra_chars))[0])", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def build_jwt(payload: dict) -> str:\n if 'sub' not in payload.keys():\n raise ValueError('sub not in payload keys')\n jwt_fields = {\n 'iss': JWT_DOMAIN,\n 'sub': None,\n 'iat': datetime.datetime.utcnow(),\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=JWT_EXPIRATION_MINUTES),\n **payload\n }\n return jwt.encode(jwt_fields, key=SECRET_KEY, json_encoder=JSONDataEncoder).decode(encoding='UTF-8')", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def pad_base64_str(str):\n missing_padding = len(str) % 4\n if missing_padding != 0:\n str += '=' * (4 - missing_padding)\n return str", "def b64_json_enc(data):\n json_str = json.dumps(data)\n return base64.b64encode(json_str.encode()).decode()", "def base64_decode_string(string):\n # type: (str) -> str\n return base64.b64decode(string)", "def _encode_2xbase64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9]\"\n regex = re.compile(pattern)\n while True:\n # First run\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n # Second run\n ebytes = base64.b64encode(estring.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad/special chars\n data = data + \" \"\n\n return estring", "def adobe_base64_encode(cls, to_encode):\n if isinstance(to_encode, unicode):\n to_encode = to_encode.encode(\"utf8\")\n encoded = base64.encodestring(to_encode)\n return encoded.replace(b\"+\", b\":\").replace(b\"/\", b\";\").replace(b\"=\", b\"@\").strip()", "def base64_encode(data):\n return base64.encodestring(data);", "def _encode_partitial_parameter(data):\n return base64.b64encode(data.encode(\"utf-8\")).decode()", "def base64ify(bytes_or_str):\n if isinstance(bytes_or_str, str):\n input_bytes = bytes_or_str.encode(\"utf8\")\n else:\n input_bytes = bytes_or_str\n\n output_bytes = base64.urlsafe_b64encode(input_bytes)\n return output_bytes.decode(\"ascii\")", "def encode(self, payload):\n jwt_payload = payload.copy()\n if self.audience is not None:\n jwt_payload['aud'] = self.audience\n if self.issuer is not None:\n jwt_payload['iss'] = self.issuer\n\n token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)\n if isinstance(token, bytes):\n # For PyJWT <= 1.7.1\n return token.decode('utf-8')\n # For PyJWT >= 2.0.0a1\n return token", "def urlsafe_base64_decode(s):\n s = s.encode()\n try:\n return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b\"=\"))\n except (LookupError, BinasciiError) as e:\n raise ValueError(e)", "def urlsafe_base64_decode(s):\n s = s.encode()\n try:\n return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b\"=\"))\n except (LookupError, BinasciiError) as e:\n raise ValueError(e)", "def decode_message_part(message_part):\n return base64.urlsafe_b64decode(message_part['body']['data']).decode().strip()", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def urlsafe_base64_encode(s):\n return base64.urlsafe_b64encode(s).rstrip(b\"\\n=\").decode(\"ascii\")", "def b64_encode(value: bytes) -> bytes:\n return base64.urlsafe_b64encode(value).strip(b\"=\")", "def b64encode(s: str) -> str:\n return base64.b64encode(s.encode()).decode()", "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def b64decode(s: str) -> str:\n return base64.b64decode(s.encode()).decode()", "def base64_decode(n, encoding='ISO-8859-1'):\t\n decoded = base64.decodestring(n.encode('ascii'))\t\n return tonative(decoded, encoding)", "def fix_key(public_key: str) -> str:\n\n if public_key.startswith(\"http://\") or public_key.startswith(\"https://\"):\n resp = requests.get(public_key)\n if public_key.endswith(\".json\"):\n key = resp.json()\n if \"keys\" in key:\n key = key[\"keys\"][0]\n jwk = jwcrypto.jwk.JWK.from_json(json.dumps(key))\n public_key = jwk.export_to_pem().decode(\"utf-8\")\n else:\n public_key = resp.content.decode(\"utf-8\")\n elif public_key.startswith(\"/\") or public_key.endswith((\".pem\")):\n with open(public_key, \"r\") as f:\n public_key = f.read()\n # ENV variables sometimes don't pass newlines, spec says white space\n # doesn't matter, but pyjwt cares about it, so fix it\n public_key = public_key.replace(\" PUBLIC \", \"_PLACEHOLDER_\")\n public_key = public_key.replace(\" \", \"\\n\")\n public_key = public_key.replace(\"_PLACEHOLDER_\", \" PUBLIC \")\n return public_key", "def _hash_encoder(data: bytes) -> str:\n return base64.urlsafe_b64encode(data).rstrip(b\"=\").decode('ascii')", "def decode_request(self, data):\n return decode_jwt(data[\"jwt\"], data[\"cert_name\"], self.node.node_name, self.node.id)", "def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, '=')", "def test_pad_b64(self):\n test1 = {\"value\": b\"any carnal pleasure.\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZS4=\"}\n test2 = {\"value\": b\"any carnal pleasure\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3VyZQ==\"}\n test3 = {\"value\": b\"any carnal pleasur\",\n \"unpadded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\",\n \"padded\": \"YW55IGNhcm5hbCBwbGVhc3Vy\"}\n\n for test in [test1, test2, test3]:\n padded = oidc._pad_b64(test[\"unpadded\"])\n self.assertEqual(test[\"padded\"], padded)\n value = base64.b64decode(padded)\n self.assertEqual(test[\"value\"], value)", "def generate_token(payload: Any, secret: str | List[str]) -> str:\n return url_encode_full_stops(URLSafeTimedSerializer(secret).dumps(payload, \"token\"))", "def decode_base64(data, altchars=b'+/'):\n data = re.sub(rb'[^a-zA-Z0-9%s]+' % altchars, b'', data) # normalize\n missing_padding = len(data) % 4\n if missing_padding:\n data += b'='* (4 - missing_padding)\n return base64.b64decode(data, altchars)", "async def encode(self, payload: dict) -> str:\n delta_seconds = self.duration\n jwt_data = {\n **payload,\n \"exp\": datetime.utcnow() + timedelta(seconds=delta_seconds),\n }\n\n jwt_token = jwt.encode(jwt_data, self.jwt_secret, self.jwt_algorithm)\n token = jwt_token.decode(\"utf-8\")\n\n return token", "def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def encode_auth_token(user_id: int, user_name:str, user_login:str, perfil_nome:str) -> bytes:\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=5),\n 'iat': datetime.datetime.utcnow(),\n 'uid': user_id,\n 'name': user_name,\n 'login': user_login,\n 'perfil': perfil_nome,\n }\n return jwt.encode(\n payload,\n key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'uid': userdata['uid'],\n 'pwd':userdata['pwd'],\n 'role': userdata['role']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def b64dec(string: str) -> bytes:\n\n data = string.encode(\"utf-8\")\n try:\n return base64.b64decode(data, validate=True)\n except binascii.Error:\n # altchars for urlsafe encoded base64 - instead of + and _ instead of /\n return base64.b64decode(data, altchars=b\"-_\", validate=True)", "def decodeJWT(self, token):\n try:\n return jwt.decode(token, self.secret, algorithms=[self.algorithm])\n except jwt.exceptions.InvalidSignatureError:\n raise ValueError(f'The following JWT is invalid: {token}')", "def _encode_encrypted_part(self, value):\n\n return encoding_utils.bytes_to_base64(value)", "def decode_auth_token(auth_token):\n try:\n payload = jwt.decode(auth_token, Config.SECRET_KEY,algorithms='HS256')\n return payload\n except jwt.ExpiredSignatureError:\n return 'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please log in again.'", "def decode(self, encoded):", "def encode_auth_token(userdata):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),\n 'iat': datetime.datetime.utcnow(),\n 'username': userdata['username'],\n 'password':userdata['password']\n }\n return jwt.encode(\n payload,\n Config.SECRET_KEY,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def btob64(b):\n x = base64.urlsafe_b64encode(b)\n return re.sub(r'=*$','',x)", "def base64_to_pem(crypto_type, b64_text, width=76):\n lines = ''\n for pos in xrange(0, len(b64_text), width):\n lines += b64_text[pos:pos + width] + '\\n'\n\n return '-----BEGIN %s-----\\n%s-----END %s-----' % (crypto_type, lines, crypto_type)", "def _b64_decode(data):\n encoded = data.translate(B64_FROM_BCRYPT)\n padding = '=' * (4 - len(data) % 4) if len(data) % 4 else ''\n return base64.b64decode(encoded + padding)", "def _create_token(self, payload, key):\n return jwt.encode(payload, key, algorithm='RS256')", "def decode_token(token):\n payload = None\n try:\n payload = jwt.decode(token.encode('utf-8'), '1$Arh\"1bWa/7+OS', algorithm='HS256')['u_id']\n except jwt.InvalidTokenError:\n pass\n return payload", "def b64_of_bytes(data: bytes) -> str:\n return base64.b64encode(data).decode()", "def decode_jwt(encoded, key, algorithms = 'HS256'):\n try:\n payload = jwt.decode(\n encoded, \n key, \n algorithms = algorithms\n )\n\n return payload\n # if token has expired:\n except jwt.exceptions.ExpiredSignatureError:\n raise JWTError(\n {\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, \n 401\n )\n # other exceptions:\n except Exception:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, \n 400\n )", "def _decode_public_key_identifier(identifier):\n\n return JWTAuth._get_identifier_cypher().decrypt(base64.b64decode(identifier)).decode('utf-8')", "def encode_auth_token(self, id):\n payload = {\n \"exp\": datetime.utcnow()\n + timedelta(\n days=current_app.config.get(\"TOKEN_EXPIRATION_DAYS\"),\n seconds=current_app.config.get(\"TOKEN_EXPIRATION_SECONDS\"),\n ),\n \"iat\": datetime.utcnow(),\n \"sub\": id,\n }\n return jwt.encode(\n payload, current_app.config.get(\"SECRET_KEY\"), algorithm=\"HS256\"\n )", "def make_id_jwt(sub, tenant=None):\n payload = {\"sub\": sub}\n if tenant is not None:\n payload[\"mender.tenant\"] = tenant\n payload = json.dumps(payload)\n payloadb64 = b64encode(payload.encode(\"utf-8\"))\n return \"bogus_header.\" + payloadb64.decode() + \".bogus_sign\"", "def encode_auth_token(self, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=0),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n app.config.get('SECRET_KEY'),\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def decode(self, crypto):", "def encode(self, decoded):", "def _encode(dictionary):\n # Strip ugly base64 padding.\n byteStr = bytearray(json.dumps(dictionary).encode())\n encodedStr = base64.urlsafe_b64encode(byteStr)\n return encodedStr.rstrip('='.encode())", "def b64decode(data, altchars=b'+/'):\n data = re.sub(r'[^a-zA-Z0-9%s]+' % altchars, '', data) # normalize\n missing_padding = len(data) % 4\n if missing_padding:\n data += '='* (4 - missing_padding)\n return base64.b64decode(data, altchars)", "def base64Encode(input, addNewlines = False):\n base64Str = base64.b64encode(input)\n if not type(base64Str) is str:\n base64Str = \"\".join(map(chr, base64Str))\n \n if not addNewlines:\n return base64Str\n\n result = \"\"\n i = 0\n while i < len(base64Str):\n result += base64Str[i:i + 64] + \"\\n\"\n i += 64\n return result", "def encode_auth_token(secret_key, user_id):\n try:\n payload = {\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),\n 'iat': datetime.datetime.utcnow(),\n 'sub': user_id\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception as e:\n return e", "def _is_base64(value):\n #http://stackoverflow.com/questions/12315398/verify-is-a-string-is-encoded-in-base64-python\n try:\n enc = base64.b64encode(base64.b64decode(value)).strip()\n return enc == value\n except TypeError:\n return False", "def base64url_rsa_encrypt(self, value, public_key):\n rsakey = RSA.importKey(public_key)\n cipher = PKCS1_OAEP.new(rsakey)\n\n # encrypt, IMPORTANT: read about padding modes (RSA.pkcs1_padding)\n encrypted_msg = cipher.encrypt(value)\n\n if encrypted_msg:\n b64 = encrypted_msg.encode('base64')\n b64 = re.sub('(/)', '_', b64)\n b64 = re.sub('(\\+)', '-', b64)\n b64 = re.sub('(=)', '.', b64)\n return b64\n else:\n return AlignetError('RSA Ciphering could not be executed')", "def encode_auth_token(user_data, config):\n ttl_days = config.get('JWT_TTL_DAYS', 0)\n ttl_seconds = config.get('JWT_TTL_SECONDS', 0)\n secret_key = config['JWT_SECRET_KEY']\n\n now = dt.datetime.utcnow()\n try:\n payload = {\n 'exp': now + dt.timedelta(days=ttl_days, seconds=ttl_seconds),\n 'iat': now,\n 'sub': user_data\n }\n return jwt.encode(\n payload,\n secret_key,\n algorithm='HS256'\n )\n except Exception:\n raise" ]
[ "0.7315336", "0.69865996", "0.6815715", "0.6786328", "0.6752276", "0.6739659", "0.667964", "0.663198", "0.66216433", "0.6594801", "0.655502", "0.6553585", "0.6501903", "0.64332056", "0.64272547", "0.64245147", "0.6410119", "0.6359167", "0.63350207", "0.63240933", "0.6308579", "0.6300031", "0.62963617", "0.6294254", "0.6294254", "0.6280614", "0.62751776", "0.6265362", "0.62590444", "0.6251533", "0.62346095", "0.6192578", "0.6185225", "0.6155432", "0.61181056", "0.6111332", "0.6111018", "0.60957307", "0.60889685", "0.6076392", "0.60689116", "0.6062045", "0.6049405", "0.6044183", "0.60386175", "0.60292155", "0.60282516", "0.60078603", "0.5995975", "0.59940845", "0.59893775", "0.5985634", "0.59841716", "0.59841716", "0.5983736", "0.59647363", "0.59647363", "0.59514076", "0.59452164", "0.5925815", "0.59133273", "0.5907142", "0.58895415", "0.58868885", "0.58815295", "0.58760184", "0.5874835", "0.5874109", "0.5872231", "0.58600694", "0.58520293", "0.58477545", "0.5841635", "0.58074105", "0.5801473", "0.5790179", "0.5782907", "0.5773414", "0.57697517", "0.57626444", "0.5758936", "0.5757548", "0.5741512", "0.57403535", "0.5718269", "0.5717385", "0.57092106", "0.57091916", "0.56989795", "0.5686368", "0.565299", "0.5651845", "0.56467193", "0.564413", "0.5642604", "0.56392133", "0.56331813", "0.563159", "0.5623292", "0.56181854" ]
0.68548036
2
Create a nonce with timestamp included
def make_nonce(): time_format = '%Y-%m-%dT%H:%M:%SZ' time_component = time.strftime(time_format, time.gmtime()) valid_chars = '' # iterate over all the aschii characters for a list of all alpha-numeric characters for char_index in range(0, 128): if chr(char_index).isalpha() or chr(char_index).isalnum(): valid_chars += chr(char_index) random_str = '' random_chr = random.SystemRandom() for i in range(0, 6): random_str += random_chr.choice(valid_chars) return '001{time_str}{random_str}'.format(time_str=time_component, random_str=random_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_nonce():\n default_seed = 'ifh2847fhsn\"lqOEYd@#Djh(&'\n hash = sha.new(default_seed)\n hash.update(str(datetime.utcnow()))\n return hash.hexdigest()", "def generate_nonce():\n return str(int(round(time.time() * 1000)))", "def _nonce():\n return str(round(100000 * time.time()) * 2)", "def generate_nonce():\n return uuid4().hex", "def generateNonce():\r\n hash = hashlib.sha1()\r\n hash.update(str(time.time()).encode('utf-8'))\r\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def _nonce(self):\n return str(int(round(time.time() * 10000)))", "def get_nonce() -> int:\n return int(time.time() * FACTOR)", "def make_nonce (self, request):\r\n ip = request.channel.server.ip\r\n now = str(long(time.time()))\r\n if now[-1:] == 'L':\r\n now = now[:-1]\r\n private_key = str (id (self))\r\n nonce = ':'.join([ip, now, private_key])\r\n return self.apply_hash (nonce)", "def nonce(length=40, prefix=\"access_token\"):\n rbytes = os.urandom(length)\n return \"{}_{}\".format(prefix, str(hashlib.sha1(rbytes).hexdigest()))", "def nonce():\n return random.randint(0, 4294967295)", "def nonce():\n return random.randint(0, 4294967295)", "def _generate_nonce(self):\n return str(random.randrange(100000, 999999))", "def _get_nonce():\n return uuid.uuid4().get_hex()", "def _build_new_nonce(self):\n seqno = self.new_sequence_number()\n\n partial_iv = seqno.to_bytes(5, 'big')\n\n return (self._construct_nonce(partial_iv, self.sender_id), partial_iv.lstrip(b'\\0') or b'\\0')", "def generate_nonce(length=8):\n return ''.join([str(random.randint(0, 9)) for i in range(length)])", "def dirty_nonce(rev, NONCE_LEN=5, **kwargs):\n import uuid\n return '%s-%s' % (rev, uuid.uuid4().hex[:NONCE_LEN])", "def gen_nonce(self, length=32):\n if(length < 32):\n res = {\"message\": 'Invalid nonce length'}, 400\n else:\n nonce = secrets.token_hex(floor(length))\n nonces_file = \"client-generate-nonces.txt\"\n res = self.check_nonce(nonce, nonces_file, length)\n return res", "def _nonce(self):\n # Note: if we use multithreading for a single exchange, this may\n # cause an issue.\n delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n return int(delta.total_seconds() * 1000)", "def GetCspNonce():\n NONCE_LENGTH = 16\n return base64.b64encode(os.urandom(NONCE_LENGTH))", "def default_nonce_duration():\n return now() + timedelta(hours=4)", "def _oauth_nonce_generate(self):\n\t\traw_data = random.getrandbits(32 * 8)\n\t\traw_str = ''\n\t\tfor i in range(32):\n\t\t\tnew_part = raw_data % 256\n\t\t\traw_data /= 256\n\t\t\traw_str += chr(new_part)\n\t\n\t\tencoded = base64.b64encode(raw_str) \n\t\treturn encoded.rstrip('=').replace('+', 'A').replace('/', 'B')", "def get_nonce(length=16):\n characters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n charlen = len(characters)\n return \"\".join([characters[SystemRandom().randint(0, charlen - 1)] for _ in range(0, length)])", "def storeNonce(self, nonce):\r\n entity = datastore.Entity('Nonce')\r\n entity['nonce'] = nonce\r\n entity['created'] = datetime.datetime.now()\r\n datastore.Put(entity)", "def _create_timestamp():\n return (datetime.utcnow() - datetime(1970,1,1)).total_seconds()", "def _GetCspNonce():\n nonce_length = constants.NONCE_LENGTH\n return base64.b64encode(os.urandom(nonce_length * 2))[:nonce_length]", "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "def get_initial_nonce(self):\n\n #First we will initiate the nonce with the prng.\n bit_nonce = int_to_bitstr(self.prng, 16)\n\n \"\"\" Then we generate the second part by taking only \n the last 16 bits until we have 32 bits in total. \"\"\"\n for i in range(16):\n bit_nonce += self.prng_feedback(bit_nonce[i:i+16])\n\n \"\"\" The new state of the prng will be the last 16 bits\n of the nonce, because we discarded 16 bits during the\n feedback loop. The initial nonce has 32 bits now. \"\"\"\n bit_prng = bit_nonce[16:]\n\n self.prng = bitstr_to_int(bit_prng)\n self.nonce = bitstr_to_int(bit_nonce)\n\n return self.nonce", "def create_new_nonce(self):\n\n self.nonce_action_auth = util.create_id_task() # create a new random auth string\n self.nonce_action_auth_valid_uses = 1\n return self.nonce_action_auth", "def create_id_nonce_signature(\n cls, *, signature_inputs: TSignatureInputs, private_key: bytes,\n ) -> bytes:\n ...", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def make_token(self, user):\n return super()._make_token_with_timestamp(user, int(time.time()))", "def gen_oauth_timestamp():\n\treturn int(time.time())", "def set_nonce(self, nonce=None):\n if nonce is None:\n nonce = os.urandom(32)\n self.nonce = nonce", "def _generate_timestamp():\n\t\treturn strftime(\"%Y%m%dT%H%M%S\")", "def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()", "def create_ticket(privkey, uid, validuntil, ip=None, tokens=(),\n udata='', graceperiod=None, extra_fields = ()):\n\n v = 'uid=%s;validuntil=%d' % (uid, validuntil)\n if ip:\n v += ';cip=%s' % ip\n if tokens:\n v += ';tokens=%s' % ','.join(tokens)\n if graceperiod:\n v += ';graceperiod=%d' % graceperiod\n if udata:\n v += ';udata=%s' % udata\n for k,fv in extra_fields:\n if isinstance(fv, bytes):\n fv = fv.decode('utf-8')\n v += ';%s=%s' % (k,fv)\n\n sig = calculate_digest(privkey, v.encode('utf-8'))\n v += ';sig=%s' % sig.decode('utf-8')\n\n print(v)\n\n return v", "def useNonce(self, server_url, timestamp, salt):\n\n if is_nonce_old(timestamp):\n return False\n\n try:\n mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,\n timestamp=timestamp)\n except me.DoesNotExist:\n mist_nonces = []\n\n if len(mist_nonces) == 0:\n print(\"Timestamp = %s\" % timestamp)\n MistNonce(\n server_url=server_url, salt=salt, timestamp=timestamp\n ).save()\n return True\n\n return False", "def timestamp():\n\tn = datetime.datetime.now()\n\treturn \"%04d-%02d-%02dT%02d:%02d:%02d\" % (\n\t\tn.year, n.month, n.day, n.hour, n.minute, n.second\n\t)", "def _create_time_stamp() -> str:\n\n return datetime.datetime.now().strftime(\"%Y%m%d\")", "def __timestamp():\n today = time.time()\n return struct.pack(b'=L', int(today))", "def generate_token():\n return uuid4()", "def generate_transaction_id():\r\n return str(int(time.time() * 1000))", "def _make_stamp():\n from random import choice\n from string import hexdigits\n\n length = 8\n return ''.join(choice(hexdigits) for m in range(length))", "def useNonce(self, nonce):\r\n query = datastore.Query('Nonce')\r\n query['nonce ='] = nonce\r\n query['created >='] = (datetime.datetime.now() -\r\n datetime.timedelta(hours=6))\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())\r\n return True\r\n else:\r\n return False", "def _get_timestamp():\n return '{}Z'.format(\n datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]\n )", "def create_hmac_sha_256_signature(api_key_secret, signing_data, timestamp, nonce):\n key_nonce = \\\n hmac.new(codecs.decode(api_key_secret, 'hex_codec'), codecs.decode(nonce, 'hex_codec'), sha256).digest()\n key_date = hmac.new(key_nonce, str(timestamp).encode(), sha256).digest()\n signature_key = hmac.new(key_date, u'vcode_request_version_1'.encode(), sha256).digest()\n return hmac.new(signature_key, signing_data.encode(), sha256).hexdigest()", "def prehash_message(self, timestamp, account, method, params, nonce):\n first = hashlib.sha256(py23_bytes(timestamp + account + method + params, self.ENCODING))\n return self.K + first.digest() + nonce", "def create_timestamp(dt=None):\n if dt is None:\n dt = datetime.datetime.utcnow()\n return dt.isoformat()", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def generate_message_control_id():\n d = datetime.datetime.utcnow()\n # Strip off the decade, ID only has to be unique for 3 years.\n # So now we have a 16 char timestamp.\n timestamp = d.strftime(\"%y%j%H%M%S%f\")[1:]\n # Add 4 chars of uniqueness\n unique = \"\".join(random.sample(alphanumerics, 4))\n return timestamp + unique", "def poly1305_key_gen(key: bytes, nonce: bytes) -> bytes:\n\n poly = ChaCha(key, nonce)\n return poly.encrypt(bytes(32))", "def generate_token(secret, message=None):\n timestamp = str(int(time.time()))\n return '{}:{}'.format(\n timestamp,\n get_hmac(secret, str(message) + timestamp),\n )", "def validateNonce(lastNonce, lastHash, nonce):\n sha = hashlib.sha256(f'{lastNonce}{lastHash}{nonce}'.encode())\n return sha.hexdigest()[:4] == '0000'", "def nonceRFC6979(privKey, inHash):\n # Truncate private key if too long.\n if len(privKey) > 32:\n privKey = privKey[:32]\n\n q = Curve.N\n x = privKey\n\n qlen = q.bit_length()\n holen = SHA256_SIZE\n rolen = (qlen + 7) >> 3\n bx = int2octets(x, rolen) + bits2octets(inHash, rolen)\n\n # Step B\n v = ByteArray(bytearray([1] * holen))\n\n # Step C (Go zeroes the all allocated memory)\n k = ByteArray(0, length=holen)\n\n # Step D\n k = mac(k, v + ByteArray(0x00, length=1) + bx)\n\n # Step E\n v = mac(k, v)\n\n # Step F\n k = mac(k, v + 0x01 + bx)\n\n # Step G\n v = mac(k, v)\n\n # Step H\n while True:\n # Step H1\n t = ByteArray(b\"\")\n\n # Step H2\n while len(t) * 8 < qlen:\n v = mac(k, v)\n t += v\n\n # Step H3\n secret = hashToInt(t)\n if secret >= 1 and secret < q:\n return secret\n\n k = mac(k, v + 0x00)\n v = mac(k, v)", "def get_nonce(self, address):\n nonce = self.web3_object.eth.getTransactionCount(address)\n return nonce", "def gen_oauth_nonce():\n\trandom = os.urandom(32)\n\tencoded = base64.b64encode(random)\n\twords = re.sub('[^\\w]', '', str(encoded))\n\treturn words", "def _make_timestamp(self):\r\n\t\tlogger.debug(\"Get a timestamp\")\r\n\t\treturn time.mktime(datetime.today().timetuple())", "def timestamp():\n return datetime.datetime.now().strftime(\"%Y-%m-%d-T%H-%M-%S\")", "def verify_and_burn_nonce(nonce):\n ret = re.match(r'^001[2-9][0-9]{3}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])'\n r'T([01][0-9]|2[0-3])(:[0-5][0-9]){2}Z[A-Za-z0-9]{6}$', nonce)\n if ret:\n date = parser.parse(nonce[3:-6])\n now = datetime.utcnow().replace(tzinfo=tz.tzutc())\n ret = date < (now + timedelta(minutes=2)) and date > (now + timedelta(hours=-1))\n\n return ret # TODO: keep a record (at least for the last hour) of burned nonces", "def generate_timer_id():\n\treturn base64.b64encode(os.urandom(30), b\"Qx\").decode(\"ascii\")", "def _create_security_token(user):\n timestamp = int(time.time())\n plaintext = \"%x %s\" % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n # Pad plaintest with whitespace to make the length a multiple of 16,\n # as this is a requirement of AES encryption.\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = \"sig\"\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)", "def get_unique_id(cls, wallet: str, nonce: Optional[str]) -> str:\n if nonce:\n return f\"{wallet}_{nonce}{cls.TRANSACTION_KEY_POSTFIX}\"\n else:\n return f\"{wallet}{cls.TRANSACTION_KEY_POSTFIX}\"", "def timestamp():\n return datetime.now().strftime(\"%Y%m%dT%H%M%S\")", "def generate_security_hash(self, content_type, object_pk, timestamp):\n info = (content_type, object_pk, timestamp)\n key_salt = \"django.contrib.forms.CommentSecurityForm\"\n value = \"-\".join(info)\n return salted_hmac(key_salt, value).hexdigest()", "def get_timestamp():\n now, s=get_date()\n return (now, \"%s%s%s%s\" % (s, str(now.hour).zfill(2), str(now.minute).zfill(2), str(now.second).zfill(2)))", "def generate_token(login, password):\n time = datetime.datetime.now().timestamp()\n raw_string = str(login) + str(password) + str(time)\n return hashlib.sha256(str(raw_string).encode('utf-8')).hexdigest()", "def createTimeStamp_Time():\r\n\r\n return str(datetime.now().strftime(\"%H%M%S\"))", "def timestamp():\n return round(datetime.datetime.utcnow().timestamp())", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def test_nonce(mocker):\n transaction = Transaction(\n chain=0,\n nonce=14_294_967_296,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.nonce = 1_260_300\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.nonce = -1\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)", "def _generate_id(self, context):\n tmp = datetime.datetime.now()\n tmp = tmp.strftime('%Y%m%d%H%M%S%f')\n tmp += context.peer()\n m = hashlib.md5()\n m.update(tmp.encode('utf-8'))\n return str(m.hexdigest())", "def get_client_token(**_):\n return str(uuid.uuid4())", "def createTimeStamp_Date():\r\n\r\n return str(datetime.now().strftime(\"%Y%m%d\"))", "def time_stamper() :\n\treturn datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def new_token_expiry_date():\n\treturn timezone.now() + datetime.timedelta(days=TOKEN_VALID_DATE)", "def get_timestamp():\n timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())\n return timestamp", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'username': self.username,\n 'exp': int(dt.strftime('%s')),\n }, settings.SECRET_KEY, algorithm='HS256')\n # print(token)\n return token", "def generate_cltrid(self, length=32):\n return hashlib.sha512(uuid.uuid4().hex).hexdigest()[0:length]", "def makeTimeStamp():\n return datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")", "def random_ticket():\n ts = time.time()\n return \"%s_%s\" % (ts, random_str(6, string.digits))", "def timestamp():\n return int(time.time())", "def _generate_jwt_token(self):\n dt = datetime.now() + timedelta(days=60)\n\n token = jwt.encode({\n 'id': self.pk,\n 'exp': int(dt.strftime('%s'))\n }, settings.SECRET_KEY, algorithm='HS256')\n\n return token.decode('utf-8')", "def Timestamp():\n now = NowDateTime()\n return '%04d%02d%02d-%02d%02d%02d-%06d-%s' % (\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second,\n now.microsecond,\n time.tzname[0],\n )", "def timestamp():\n return datetime.utcnow().strftime(\"%F %T\")", "def creation_timestamp(self):\n\n return self.get_signingTime()", "def create_acct_packet(self, **args):\n return host.Host.create_acct_packet(self, secret=self.secret, **args)", "def __get_new_token(self):\n keystone = {\n 'username': self.username,\n 'password': self.password,\n 'project_name': self.project,\n 'auth_url': self.auth_uri\n }\n\n ks_client = ksclient.KSClient(**keystone)\n convert_time = ciso8601.parse_datetime(str(ks_client._keystone.auth_ref.expires))\n token_exp = time.mktime(convert_time.timetuple())\n #tmp_str = str(convert_time).split('.')\n #token_exp = time.mktime(time.strptime(tmp_str[0], '%Y-%m-%d %H:%M:%S'))\n factor = self.__correct_token_time()\n\n print (\"Get new Token: {}\".format(ks_client.token))\n print (\"Expiration time in UTC: {}\".format(ks_client._keystone.auth_ref.expires))\n print (\"Expiration time in seconds since beginning of time: {}\".format(token_exp))\n print (\"The FACTOR: {}\".format(factor))\n return ks_client.token, (token_exp + factor)", "def _timestamp():\n moment = time.time()\n moment_us = repr(moment).split(\".\")[1]\n return time.strftime(\"%Y-%m-%d-%H-%M-%S-{}\".format(moment_us), time.gmtime(moment))", "def timestamp():\n return datetime.now().strftime('%Y-%m-%d_%H-%M-%S')", "def getUTCTimeStamp(timestamp):\n d = ShakeDateTime(1970, 1, 1) + timedelta(microseconds=(timestamp*1000))\n #this is necessary because adding a timedelta returns a datetime object, not a ShakeDateTime object.\n d = ShakeDateTime(d.year,d.month,d.day,d.hour,d.minute,d.second,d.microsecond)\n return d", "def timestamp():\n return time.time()", "def timestamp():\n tmptz = time.timezone\n sign_str = '+'\n if tmptz > 0:\n sign_str = '-'\n tmptz_hours = int(tmptz / 3600)\n\n return str(\"%s%s%02d:%02d\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime()), sign_str, abs(tmptz_hours),\n int(tmptz / 60 - tmptz_hours * 60)))", "def generate_client_token_by_uuid():\n return str(uuid.uuid4())", "def generate_client_token_by_uuid():\n return str(uuid.uuid4())", "def create_from_transaction(tx, prev_hash):\n\n tx_hash = HashAssist.hash_value(tx.to_string_for_hashing())\n\n print(\"Mining nonce....\")\n nonce = proof.mint(prev_hash + tx_hash, WORK_FACTOR)\n header_hash = HashAssist.hash_value(prev_hash + tx_hash + nonce)\n\n return Block(header_hash, prev_hash, nonce, tx_hash, tx)", "async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)", "def hex_timestamp(timestamp=None):\n if not timestamp:\n timestamp = unix_timestamp()\n return hex_value(timestamp, width=8)" ]
[ "0.81308687", "0.808096", "0.78753763", "0.7763684", "0.7712205", "0.7655275", "0.7655275", "0.7587932", "0.7550162", "0.7538623", "0.7479432", "0.74498117", "0.74498117", "0.7414203", "0.7234778", "0.713928", "0.69921154", "0.6871529", "0.6782506", "0.6731106", "0.66884166", "0.6599518", "0.6590977", "0.6531914", "0.6496039", "0.64277786", "0.6424741", "0.6383983", "0.63668597", "0.63627505", "0.62909955", "0.62878877", "0.617719", "0.61431414", "0.6105602", "0.60350066", "0.60144114", "0.5956325", "0.59457314", "0.5944688", "0.5943323", "0.5906539", "0.5884202", "0.58488995", "0.5845423", "0.58361477", "0.57906514", "0.5783238", "0.57401687", "0.57327443", "0.5719858", "0.57140636", "0.57084614", "0.5693377", "0.566471", "0.5658775", "0.5634225", "0.5624294", "0.5612586", "0.5604595", "0.5603116", "0.5553978", "0.55492723", "0.5544183", "0.55232906", "0.5520593", "0.5511464", "0.5503941", "0.5493066", "0.54800695", "0.54797554", "0.54715985", "0.5449857", "0.5442269", "0.5437353", "0.5434504", "0.5424949", "0.5411099", "0.5404086", "0.5402652", "0.54020566", "0.5389742", "0.53825235", "0.5378869", "0.53768057", "0.5371843", "0.5368122", "0.5366729", "0.5362092", "0.5357194", "0.5347235", "0.5347089", "0.534627", "0.5341591", "0.5333104", "0.53254384", "0.53254384", "0.53238523", "0.53128946", "0.5306296" ]
0.8013038
2
Ensure that the nonce is correct, less than one hour old, and not more than two minutes in the future Callers should also store used nonces and reject messages with previouslyused ones.
def verify_and_burn_nonce(nonce): ret = re.match(r'^001[2-9][0-9]{3}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])' r'T([01][0-9]|2[0-3])(:[0-5][0-9]){2}Z[A-Za-z0-9]{6}$', nonce) if ret: date = parser.parse(nonce[3:-6]) now = datetime.utcnow().replace(tzinfo=tz.tzutc()) ret = date < (now + timedelta(minutes=2)) and date > (now + timedelta(hours=-1)) return ret # TODO: keep a record (at least for the last hour) of burned nonces
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateNonce(lastNonce, lastHash, nonce):\n sha = hashlib.sha256(f'{lastNonce}{lastHash}{nonce}'.encode())\n return sha.hexdigest()[:4] == '0000'", "def nonce():\n return random.randint(0, 4294967295)", "def nonce():\n return random.randint(0, 4294967295)", "def _nonce():\n return str(round(100000 * time.time()) * 2)", "def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()", "def get_nonce() -> int:\n return int(time.time() * FACTOR)", "def useNonce(self, nonce):\r\n query = datastore.Query('Nonce')\r\n query['nonce ='] = nonce\r\n query['created >='] = (datetime.datetime.now() -\r\n datetime.timedelta(hours=6))\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())\r\n return True\r\n else:\r\n return False", "def nonceVerification(nonce, decryptedNonce):\r\n if (nonce == decryptedNonce):\r\n status = \"150 OK\"\r\n else:\r\n status = \"400 Error\"\r\n return status", "def _nonce(self):\n return str(int(round(time.time() * 10000)))", "def make_nonce (self, request):\r\n ip = request.channel.server.ip\r\n now = str(long(time.time()))\r\n if now[-1:] == 'L':\r\n now = now[:-1]\r\n private_key = str (id (self))\r\n nonce = ':'.join([ip, now, private_key])\r\n return self.apply_hash (nonce)", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def useNonce(self, server_url, timestamp, salt):\n\n if is_nonce_old(timestamp):\n return False\n\n try:\n mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,\n timestamp=timestamp)\n except me.DoesNotExist:\n mist_nonces = []\n\n if len(mist_nonces) == 0:\n print(\"Timestamp = %s\" % timestamp)\n MistNonce(\n server_url=server_url, salt=salt, timestamp=timestamp\n ).save()\n return True\n\n return False", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def check_token(self, user, token):\n\n # Parse the token\n try:\n ts_b36, hash = token.split(\"-\")\n except ValueError:\n return False\n\n try:\n ts = base36_to_int(ts_b36)\n except ValueError:\n return False\n\n # Check that the timestamp/uid has not been tampered with\n recomputed_token = self._make_token_with_timestamp(user, ts)\n\n log.debug(\"Ricalcolo re_token=%s token=%s\" % (recomputed_token, token))\n if not constant_time_compare(recomputed_token, token):\n return False\n\n # Check the timestamp is within limit\n if (self._num_days(self._today()) - ts) > settings.REFERRAL_TOKEN_RESET_TIMEOUT_DAYS:\n return False\n\n return True", "def default_nonce_duration():\n return now() + timedelta(hours=4)", "def test_blind_sig_expiration(self):\n signer_obj = ECCBlind(year=2020, month=1)\n point_r = signer_obj.signer_init()\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n msg = os.urandom(64)\n msg_blinded = requester_obj.create_signing_request(point_r, msg)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n verifier_obj = ECCBlind(pubkey=signer_obj.pubkey())\n self.assertFalse(verifier_obj.verify(msg, signature))", "def _nonce(self):\n # Note: if we use multithreading for a single exchange, this may\n # cause an issue.\n delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n return int(delta.total_seconds() * 1000)", "def gen_nonce(self, length=32):\n if(length < 32):\n res = {\"message\": 'Invalid nonce length'}, 400\n else:\n nonce = secrets.token_hex(floor(length))\n nonces_file = \"client-generate-nonces.txt\"\n res = self.check_nonce(nonce, nonces_file, length)\n return res", "def _get_nonce():\n return uuid.uuid4().get_hex()", "def test_nonce(mocker):\n transaction = Transaction(\n chain=0,\n nonce=14_294_967_296,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.nonce = 1_260_300\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.nonce = -1\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)", "def nonceVerification(nonce, decryptedNonce):\n #Enter code to compare the nonce and the decryptedNonce. This method\n # should return a string of \"200 OK\" if the parameters match otherwise\n # it should return \"400 Error Detected\"\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"", "def check_one_time_nonce(self, user_supplied_nonce):\n\n if self.nonce_action_auth_valid_uses > 0:\n self.nonce_action_auth_valid_uses -= 1\n ret = util.safe_string_compare(user_supplied_nonce, self.nonce_action_auth)\n if ret is True: # explicitly checking for boolean True\n return True\n return False\n return False", "def make_nonce():\n time_format = '%Y-%m-%dT%H:%M:%SZ'\n time_component = time.strftime(time_format, time.gmtime())\n valid_chars = ''\n\n # iterate over all the aschii characters for a list of all alpha-numeric characters\n for char_index in range(0, 128):\n if chr(char_index).isalpha() or chr(char_index).isalnum():\n valid_chars += chr(char_index)\n\n random_str = ''\n random_chr = random.SystemRandom()\n for i in range(0, 6):\n random_str += random_chr.choice(valid_chars)\n\n return '001{time_str}{random_str}'.format(time_str=time_component,\n random_str=random_str)", "def generate_nonce():\n return str(int(round(time.time() * 1000)))", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def nonceVerification(nonce, decryptedNonce):\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"", "def verify_token(self, token):\n _now = timezone.now()\n\n if (\n (self.token is not None)\n and (token == self.token)\n and (_now < self.valid_until)\n ):\n self.token = None\n self.valid_until = _now\n self.save()\n\n return True\n else:\n return False", "def _generate_nonce(self):\n return str(random.randrange(100000, 999999))", "async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)", "def get_initial_nonce(self):\n\n #First we will initiate the nonce with the prng.\n bit_nonce = int_to_bitstr(self.prng, 16)\n\n \"\"\" Then we generate the second part by taking only \n the last 16 bits until we have 32 bits in total. \"\"\"\n for i in range(16):\n bit_nonce += self.prng_feedback(bit_nonce[i:i+16])\n\n \"\"\" The new state of the prng will be the last 16 bits\n of the nonce, because we discarded 16 bits during the\n feedback loop. The initial nonce has 32 bits now. \"\"\"\n bit_prng = bit_nonce[16:]\n\n self.prng = bitstr_to_int(bit_prng)\n self.nonce = bitstr_to_int(bit_nonce)\n\n return self.nonce", "def generate_nonce():\n return uuid4().hex", "def validate_exp(self, now, leeway):\n if 'exp' in self:\n exp = self['exp']\n if not _validate_numeric_time(exp):\n raise InvalidClaimError('exp')\n if exp < (now - leeway):\n raise ExpiredTokenError()", "def validate(self, encrypted_token: str) -> bool:\n payload, timestamp_ms, crc = self.unsleeve(encrypted_token)\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n computed_crc = zlib.crc32(payload + ts_bytes)\n\n if crc == computed_crc:\n return in_range(timestamp_ms, deadline=self.token_life_ms)\n\n return False", "def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):\n guess = (str(transactions)+str(last_hash)+str(nonce)).encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:difficulty] == '0'*difficulty", "def verify_auth_token(shared_key, eppn, token, nonce, timestamp, generator=sha256):\n # check timestamp to make sure it is within 300 seconds from now\n logger.debug(\"Trying to authenticate user {!r} with auth token {!r}\".format(eppn, token))\n # check timestamp to make sure it is within -300..900 seconds from now\n now = int(time.time())\n ts = int(timestamp, 16)\n if (ts < now - 300) or (ts > now + 900):\n logger.debug(\"Auth token timestamp {!r} out of bounds ({!s} seconds from {!s})\".format(\n timestamp, ts - now, now))\n raise HTTPForbidden(_('Login token expired, please await confirmation e-mail to log in.'))\n # verify there is a long enough nonce\n if len(nonce) < 16:\n logger.debug(\"Auth token nonce {!r} too short\".format(nonce))\n raise HTTPForbidden(_('Login token invalid'))\n\n expected = generator(\"{0}|{1}|{2}|{3}\".format(\n shared_key, eppn, nonce, timestamp)).hexdigest()\n # constant time comparision of the hash, courtesy of\n # http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/\n if len(expected) != len(token):\n logger.debug(\"Auth token bad length\")\n raise HTTPForbidden(_('Login token invalid'))\n result = 0\n for x, y in zip(expected, token):\n result |= ord(x) ^ ord(y)\n logger.debug(\"Auth token match result: {!r}\".format(result == 0))\n return result == 0", "def test_live_thread_token_is_valid(self):\n assert self.token.is_valid()", "def proof_of_work(self, block, previous_hash):\r\n # Start WIth Nonce = 1\r\n nonce = 1\r\n\r\n # Loop Till You Find A Valid Nonce\r\n check_proof = False\r\n while check_proof is False:\r\n block['nonce'] = nonce\r\n hash_operation = self.hash(block)\r\n if hash_operation[:4] == '0000': # Check if the current_hash fulfills the required condition\r\n check_proof = True # If it does then exit the loop\r\n else:\r\n nonce += 1 # Else try with the next nonce\r\n\r\n return nonce, hash_operation # Return the nonce and the hash that meet the required condition\r", "def test_timeout(self):\n # Uses a mocked version of EmailActivationTokenGenerator\n # so we can change the value of 'today'\n class Mocked(EmailActivationTokenGenerator):\n def __init__(self, today):\n self._today_val = today\n\n def _today(self):\n return self._today_val\n\n user = self.create_user()\n token_generator = EmailActivationTokenGenerator()\n token = token_generator.make_token(user)\n\n p1 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS))\n self.assertTrue(p1.check_token(user, token))\n\n p2 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS + 1))\n self.assertFalse(p2.check_token(user, token))", "def check_token(self, token):\n if not token or not self.verification_token:\n return False\n if not constant_time_compare(token, self.verification_token):\n return False\n if self.is_verified:\n return False\n age = timezone.now() - self.added_date\n if age >= timedelta(days=AssociatedEmail.VERIFICATION_TIMEOUT_DAYS):\n return False\n return True", "def checkTokenTime(func):\n def wrapper(*args, **kwargs):\n config = s.query(Config).first()\n time_left = config.LastAuthDateUTC + (config.ExpiredToken * 1000) - int(datetime.datetime.now().timestamp() * 1000)\n if time_left < 10: # give 10 seconds grace\n Issuer.updateToken(Issuer)\n return func(*args, **kwargs)\n return wrapper", "def _verify_timeout(self, doc):\n expires = doc['expires']\n if expires == 0:\n return False\n if expires >= self._time():\n return False\n return True", "def test_validate_chain_with_tempered_block_nonce(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n last_block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(last_block)\n\n chain = blockchain.full_chain\n\n # Hack a block\n chain.append(Block(1, [], 1, last_block.hash))\n\n self.assertFalse(blockchain.validate_chain(blockchain.full_chain))", "def test_rejects_expired_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n utils.set_utcnow_for_test(XsrfToolTests.TEST_NOW +\n datetime.timedelta(hours=4, minutes=1))\n self.assertFalse(tool.verify_token(token, 12345, 'test_action'))", "def _assert_valid(self, token_id, token_ref):\n current_time = timeutils.normalize_time(timeutils.utcnow())\n expires = token_ref.get('expires')\n if not expires or current_time > timeutils.normalize_time(expires):\n raise exception.TokenNotFound(token_id=token_id)", "def nonce(length=40, prefix=\"access_token\"):\n rbytes = os.urandom(length)\n return \"{}_{}\".format(prefix, str(hashlib.sha1(rbytes).hexdigest()))", "def nonceRFC6979(privKey, inHash):\n # Truncate private key if too long.\n if len(privKey) > 32:\n privKey = privKey[:32]\n\n q = Curve.N\n x = privKey\n\n qlen = q.bit_length()\n holen = SHA256_SIZE\n rolen = (qlen + 7) >> 3\n bx = int2octets(x, rolen) + bits2octets(inHash, rolen)\n\n # Step B\n v = ByteArray(bytearray([1] * holen))\n\n # Step C (Go zeroes the all allocated memory)\n k = ByteArray(0, length=holen)\n\n # Step D\n k = mac(k, v + ByteArray(0x00, length=1) + bx)\n\n # Step E\n v = mac(k, v)\n\n # Step F\n k = mac(k, v + 0x01 + bx)\n\n # Step G\n v = mac(k, v)\n\n # Step H\n while True:\n # Step H1\n t = ByteArray(b\"\")\n\n # Step H2\n while len(t) * 8 < qlen:\n v = mac(k, v)\n t += v\n\n # Step H3\n secret = hashToInt(t)\n if secret >= 1 and secret < q:\n return secret\n\n k = mac(k, v + 0x00)\n v = mac(k, v)", "def _check_goauth_expiration(self, expiry):\n now = int(time.time())\n time_left = int(expiry) - now\n # 10 days\n min_time_left = 60*60*24*10\n if time_left < min_time_left:\n return False\n else:\n return True", "def isValid( self ):\n\n assert self.issueDate\n now = int(time.time())\n\n if (now - self.issueDate) > const.SESSION_TICKET_LIFETIME:\n log.debug(\"Ticket is not valid anymore.\")\n return False\n\n return True", "def test_invalid_time_too_late(event_member):\n _, member, event_id = event_member\n current = date.today() + timedelta(days=1)\n start = (datetime.combine(current, time(16, 30)) +\n timedelta(days=(MAX_DAYS - 2)))\n end = start + timedelta(days=5)\n expect_error(edit, InputError, member.username, event_id,\n True, start, end)", "def test_is_token_json_temporally_valid(self):\n payload_list = []\n\n # Test that we reject a payload without 'iat' or 'exp'\n # as the tokens should have a lifetime\n payload_list.append({\n 'sub': CLIENT_ID,\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '714892f5-014f-43ad-bea0-fa47579db222'\n })\n\n # Test that we reject a payload without 'exp'\n # as such a token would never expire\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID\n })\n\n # Test that we reject a payload without 'iat'\n # as all tokens should indicate when they were issued\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 200000\n })\n\n # Test that we reject a payload with an 'iat' and 'exp'\n # in the past (e.g. they have expired)\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) - 200000\n })\n\n # Test that we reject a payload with an 'iat' and 'exp'\n # in the future (as we should as they are not yet valid)\n payload_list.append({\n 'iss': 'https://iam-test.indigo-datacloud.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) + 200000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) + 2000000\n })\n\n for payload in payload_list:\n # Assert the underlying helper method reponsible for\n # checking temporal validity returns False when passed\n # temporally invalid payloads\n self.assertFalse(\n self._token_checker._is_token_json_temporally_valid(payload),\n \"Payload %s should not be accepted!\" % payload\n )\n\n # Assert the wrapper method valid_token_to_id returns\n # None when passed temporally invalid tokens\n token = self._create_token(payload, PRIVATE_KEY)\n self.assertEqual(\n self._token_checker.valid_token_to_id(token), None,\n \"Token with payload %s should not be accepted!\" % payload\n )", "def validate_lifetime(self, for_policy, policy_info):\n units = policy_info['lifetime']['units']\n if units != 'seconds':\n raise CsrValidationFailure(resource=for_policy,\n key='lifetime:units',\n value=units)\n value = policy_info['lifetime']['value']\n if (value < LIFETIME_LIMITS[for_policy]['min'] or\n value > LIFETIME_LIMITS[for_policy]['max']):\n raise CsrValidationFailure(resource=for_policy,\n key='lifetime:value',\n value=value)", "def is_valid(self):\n return self.access_token is not None \\\n and time.time() < self._expiration_timestamp", "def test_id_token_expired(self):\n id_token_fmt = 'YW55IGNhcm5hbCBwbGVhc3VyZS4.{}.YW55IGNhcm5hbCBwbGVhc3VyZS4'\n\n payload_expired = {'exp': 0}\n payload_expired_b64 = self._payload_to_b64(payload_expired)\n id_token_expired = id_token_fmt.format(payload_expired_b64)\n self.assertTrue(oidc._id_token_expired(id_token_expired))\n\n payload_valid = {'exp': 99999999999}\n payload_valid_b64 = self._payload_to_b64(payload_valid)\n id_token_valid = id_token_fmt.format(payload_valid_b64)\n self.assertFalse(oidc._id_token_expired(id_token_valid))", "def test_can_not_cancel_past_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(hours=1)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'cancel': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False", "def pass_good_until(hours_good=config.HOURS_TO_GRANT_ACCESS):\n return datetime.now() + timedelta(hours=hours_good)", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def ssl_expires_in(entity, serial_number, remaining, buffer_days=14):\n # if the cert expires in less than two weeks, we should reissue it\n if remaining < datetime.timedelta(days=0):\n # cert has already expired - uhoh!\n print(\"Cert %s issued to '%s' expired %s days ago!\"\n % (serial_number, entity, remaining.days))\n elif remaining < datetime.timedelta(days=buffer_days):\n # expires sooner than the buffer\n print(\"Cert %s issued to '%s' is nearly expired - %s more days\"\n % (serial_number, entity, remaining.days))\n else:\n # everything is fine\n print(\"Cert %s issued to '%s' is valid for %s more days\"\n % (serial_number, entity, remaining.days))", "def _check_timeouts(self):\n\n expired_tokens = []\n for token in self._capability_timeouts:\n interval = datetime.utcnow() - self._capability_timeouts[token]\n if interval.total_seconds() >= 10:\n expired_tokens.append(token)\n\n for token in expired_tokens:\n cap_withdraw = mplane.model.Withdrawal(capability=self._capabilities[token])\n self.handle_message(cap_withdraw, self.identity_for(token))", "def try_valid(ctx, fields):\n if fields.get(\"valid\") is None:\n return\n # parse at least the YYYY-mm-ddTHH:MM\n ts = datetime.datetime.strptime(fields[\"valid\"][:16], \"%Y-%m-%dT%H:%M\")\n ctx[\"valid\"] = utc(ts.year, ts.month, ts.day, ts.hour, ts.minute)", "def validate_nbf(self, now, leeway):\n if 'nbf' in self:\n nbf = self['nbf']\n if not _validate_numeric_time(nbf):\n raise InvalidClaimError('nbf')\n if nbf > (now + leeway):\n raise InvalidTokenError()", "def dirty_nonce(rev, NONCE_LEN=5, **kwargs):\n import uuid\n return '%s-%s' % (rev, uuid.uuid4().hex[:NONCE_LEN])", "def generateNonce():\r\n hash = hashlib.sha1()\r\n hash.update(str(time.time()).encode('utf-8'))\r\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def run_ticket_validation(user_id, access_token, nonce):\n token_check_url = 'https://graph.oculus.com/user_nonce_validate?access_token={access_token}&nonce={nonce}&user_id={user_id}'\n url = token_check_url.format(user_id=user_id, access_token=access_token, nonce=nonce)\n\n try:\n ret = requests.post(url, headers={'Accept': 'application/json'})\n except requests.exceptions.RequestException as e:\n log.warning(\"Oculus authentication request failed: %s\", e)\n abort_unauthorized(\"Oculus ticket validation failed. Can't reach Oculus platform.\")\n\n if ret.status_code != 200 or not ret.json().get('is_valid', False):\n log.warning(\"Failed Oculus authentication. Response code %s: %s\", ret.status_code, ret.json())\n abort_unauthorized(\"User {} not authenticated on Oculus platform.\".format(user_id))\n\n return user_id", "def test_http_issuer_ban(self):\n self.assertEqual(\n self._token_checker._check_token_not_revoked(None,\n 'http://idc.org'),\n None\n )\n\n self.assertFalse(\n self._token_checker._verify_token(None,\n 'http://idc.org')\n )", "def _check_validity(self):\n pass", "async def validate_token(self, token):", "def validate(self):\n try:\n num = map(int, self.card_number)\n except ValueError:\n raise AuthorizeInvalidError('Credit card number is not valid.')\n if sum(num[::-2] + map(lambda d: sum(divmod(d * 2, 10)), num[-2::-2])) % 10:\n raise AuthorizeInvalidError('Credit card number is not valid.')\n if datetime.now() > self.expiration:\n raise AuthorizeInvalidError('Credit card is expired.')\n if not re.match(r'^[\\d+]{3,4}$', self.cvv):\n raise AuthorizeInvalidError('Credit card CVV is invalid format.')\n if not self.card_type:\n raise AuthorizeInvalidError('Credit card number is not valid.')", "def _build_new_nonce(self):\n seqno = self.new_sequence_number()\n\n partial_iv = seqno.to_bytes(5, 'big')\n\n return (self._construct_nonce(partial_iv, self.sender_id), partial_iv.lstrip(b'\\0') or b'\\0')", "def valid(self):\n return (self.expiry is None or self.expiry > timezone.now()) and (\n self.use_limit is None or self.times_used < self.use_limit\n )", "def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)", "def create_nonce():\n default_seed = 'ifh2847fhsn\"lqOEYd@#Djh(&'\n hash = sha.new(default_seed)\n hash.update(str(datetime.utcnow()))\n return hash.hexdigest()", "def test_reset_tenant_token_later(self):\n new_token, orig_token = self._check_reset_token(invalidate=False)\n self.assertEqual(new_token.previous, orig_token.valid)", "def test_max_used_thread_token_is_valid(self):\n self.token.use_count = const.MAX_TOKEN_USE_COUNT\n assert not self.token.is_valid()", "def set_nonce(self, nonce=None):\n if nonce is None:\n nonce = os.urandom(32)\n self.nonce = nonce", "def storeNonce(self, nonce):\r\n entity = datastore.Entity('Nonce')\r\n entity['nonce'] = nonce\r\n entity['created'] = datetime.datetime.now()\r\n datastore.Put(entity)", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def check_expiration(self, cur_time):\n\n\t\ttime_limit = 1000\n\t\ttime_elapsed = cur_time - self.time_created\n\n\t\t# Erase cache after an arbitrary amount of time\n\t\tif time_elapsed > time_limit:\n\t\t\tself.cache_expiration()", "def test_encrypt_nonce(self):\n key = b'0' * 32\n message = 'message'\n\n assert encrypt(message, key=key) != encrypt(message, key=key)", "def test_token_cache_mis_match(self, mock_check_token_not_revoked,\n mock_get_issuer_public_key):\n # Mock the external call to retrieve the IAM public key\n # used in the _verify_token and valid_token_to_id call\n mock_get_issuer_public_key.return_value = PUBLIC_KEY\n # Mock the external call to check the token has not been rejected\n # used in the valid_token_to_id call\n mock_check_token_not_revoked.return_value = CLIENT_ID\n\n # This payload will be valid as we will sign it with PRIVATE_KEY\n payload1 = self._standard_token()\n\n # This payload has a subject that will be in the cache, but this\n # new token is not. We need to ensure this invalid token does not\n # get granted rights based only on it's subject being in the cache\n payload2 = {\n 'iss': 'https://iam-test.idc.eu/',\n 'jti': '098cb343-c45e-490d-8aa0-ce1873cdc5f8',\n 'iat': int(time.time()) - 2000000,\n 'sub': CLIENT_ID,\n 'exp': int(time.time()) - 200\n }\n\n token1 = self._create_token(payload1, PRIVATE_KEY)\n token2 = self._create_token(payload2, PRIVATE_KEY)\n\n with self.settings(IAM_HOSTNAME_LIST=['iam-test.idc.eu']):\n self.assertEqual(\n self._token_checker.valid_token_to_id(token1), CLIENT_ID,\n \"Token with payload %s should not be accepted!\" % payload1\n )\n\n self.assertEqual(\n self._token_checker.valid_token_to_id(token2), None,\n \"Token with payload %s should not be accepted!\" % payload2\n )", "def test_with_unpermitted_token(self):\n email_text = self.email_template % self.token.uuid\n assert not save_from_email_reply(email_text)", "def test_retry_badNonce(self,\n new_request_action: ACMERequestActions,\n new_rsa_privkey: rsa.RSAPrivateKey):\n req = new_request_action\n jws = JWSRS256(\n url=req.acme_dir['newAccount'],\n nonce='badNonce',\n jwk = JWKRSA(\n priv_key=new_rsa_privkey,\n n=new_rsa_privkey.public_key().public_numbers().n,\n e=new_rsa_privkey.public_key().public_numbers().e\n ),\n payload={\n 'termsOfServiceAgreed': True,\n 'contact': TEST_CONTACTS\n },\n )\n jws.sign()\n resp = req.new_account(jws)\n assert resp.status_code == 201", "def token_is_stale(self):\n return self.m_token_expiry < datetime.datetime.now(tz=pytz.utc)", "def verify_is_allowed(self):\n if (\n self.throttling_enabled\n and self.throttling_failure_count > 0\n and self.throttling_failure_timestamp is not None\n ):\n now = timezone.now()\n delay = (now - self.throttling_failure_timestamp).total_seconds()\n # Required delays should be 1, 2, 4, 8 ...\n delay_required = self.get_throttle_factor() * (\n 2 ** (self.throttling_failure_count - 1)\n )\n if delay < delay_required:\n return (\n False,\n {\n 'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,\n 'failure_count': self.throttling_failure_count,\n 'locked_until': self.throttling_failure_timestamp\n + timedelta(seconds=delay_required),\n },\n )\n\n return super().verify_is_allowed()", "def testExpiredClientApprovalIsNoLongerValid(self):\n self.InitDefaultRouter()\n\n client_id = self.SetupClient(0)\n gui_test_lib.CreateFileVersion(client_id, \"fs/os/foo\")\n\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n with test_lib.FakeTime(100.0, increment=1e-3):\n self.RequestAndGrantClientApproval(\n client_id, requestor=self.test_username)\n\n # This should work now.\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n token_expiry = config.CONFIG[\"ACL.token_expiry\"]\n\n # Make sure the caches are reset.\n self.ClearCache()\n\n # This is close to expiry but should still work.\n with test_lib.FakeTime(100.0 + token_expiry - 100.0):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()\n\n # Make sure the caches are reset.\n self.ClearCache()\n\n # Past expiry, should fail.\n with test_lib.FakeTime(100.0 + token_expiry + 100.0):\n with self.assertRaises(grr_api_errors.AccessForbiddenError):\n self.api.Client(client_id).File(\"fs/os/foo\").Get()", "def _get_nonce(self, url):\n action = LOG_JWS_GET_NONCE()\n if len(self._nonces) > 0:\n with action:\n nonce = self._nonces.pop()\n action.add_success_fields(nonce=nonce)\n return succeed(nonce)\n else:\n with action.context():\n return (\n DeferredContext(self.head(url))\n .addCallback(self._add_nonce)\n .addCallback(lambda _: self._nonces.pop())\n .addCallback(tap(\n lambda nonce: action.add_success_fields(nonce=nonce)))\n .addActionFinish())", "def clean_timestamp(self):\n ts = self.cleaned_data[\"timestamp\"]\n if time.time() - ts > DEFAULT_COMMENTS_TIMEOUT:\n raise forms.ValidationError(\"Timestamp check failed\")\n return ts", "def check_replay(self, timestamp, *context):\n\n new_timestamp = self.get_signingTime()\n if timestamp is not None and timestamp > new_timestamp:\n if context:\n context = \" (\" + \" \".join(context) + \")\"\n raise rpki.exceptions.CMSReplay(\n \"CMS replay: last message %s, this message %s%s\" % (\n timestamp, new_timestamp, context))\n return new_timestamp", "def is_valid(self):\n return self.startTime <= ApiKey.get_now() < self.endTime", "def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False", "def suc_nonce(self, nonce = None):\n\n # if we don't provide a nonce. We will use the internal one\n if nonce is None:\n nonce = self.nonce\n\n # We convert the nonce in bit in order to work on it\n bit_nonce = int_to_bitstr(nonce, 32)\n\n \"\"\" Generate the feedback bit based on the nonce's \n second half, because the last 16 bits of the nonce is\n identical to the 16 bits prng state. \"\"\"\n fbit = self.prng_feedback(bit_nonce[16,:])\n\n # The left bit is discarded and the feedback bit is added\n nonce = bit_nonce[1:] + fbit\n\n # We will update the internal nonce/prng to the suc(nonce/prng)\n if nonce is None:\n\n # The internal prng is updated with the second part of the nonce\n self.prng = bitstr_to_int(bit_nonce[16,:])\n self.nonce = bitstr_to_int(bit_nonce)\n\n # Return nonce, it will be sent to the reader\n return self.nonce\n else:\n return bitstr_to_int(nonce)", "def _validate_timestamp(timestamp):\n dts = datetime.datetime.utcnow()\n current_time = round(time.mktime(dts.timetuple()) + dts.microsecond/1e6)\n if (timestamp - current_time) > SYNC_TOLERANCE:\n raise InvalidTransaction(\n 'Timestamp must be less than local time.'\n ' Expected {0} in ({1}-{2}, {1}+{2})'.format(\n timestamp, current_time, SYNC_TOLERANCE))", "def getUIDValidity(self):\n return 42", "def test_expire_ban(self):\n pass", "def valid(self):\n return self.expiry > timezone.now()", "def _about_to_expire(self, secret: Secret) -> bool:\n return secret.is_expired(datetime.now(UTC) + self.expiry_margin)", "def _add_nonce(self, response):\n nonce = response.headers.getRawHeaders(\n REPLAY_NONCE_HEADER, [None])[0]\n with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action:\n if nonce is None:\n raise errors.MissingNonce(response)\n else:\n try:\n decoded_nonce = Header._fields['nonce'].decode(\n nonce.decode('ascii')\n )\n action.add_success_fields(nonce=decoded_nonce)\n except DeserializationError as error:\n raise errors.BadNonce(nonce, error)\n self._nonces.add(decoded_nonce)\n return response", "def clean_security_hash(self):\n security_hash_dict = {\n 'content_type': self.data.get(\"content_type\", \"\"),\n 'object_pk': self.data.get(\"object_pk\", \"\"),\n 'timestamp': self.data.get(\"timestamp\", \"\"),\n }\n expected_hash = self.generate_security_hash(**security_hash_dict)\n actual_hash = self.cleaned_data[\"security_hash\"]\n if not constant_time_compare(expected_hash, actual_hash):\n raise forms.ValidationError(\"Security hash check failed.\")\n return actual_hash", "def __expired_timestamp(self, timestamp):\n return int(time.time()) > timestamp + self.__ttl" ]
[ "0.65487677", "0.6318106", "0.6318106", "0.63045347", "0.623894", "0.618787", "0.61794573", "0.6055988", "0.59781826", "0.59658813", "0.59374857", "0.59365934", "0.59196234", "0.5910003", "0.5893382", "0.5891976", "0.58326024", "0.5790357", "0.5772316", "0.5751489", "0.57323647", "0.5717647", "0.57078433", "0.56723195", "0.56435466", "0.5622025", "0.55843925", "0.55049086", "0.5487869", "0.5466227", "0.5432646", "0.54289323", "0.5417357", "0.5416008", "0.5412118", "0.53951234", "0.5393745", "0.538952", "0.53754574", "0.53700525", "0.53401405", "0.53399974", "0.5338019", "0.5336982", "0.533343", "0.53244174", "0.5277802", "0.5253266", "0.5240912", "0.52380127", "0.52320987", "0.522847", "0.52278054", "0.5221542", "0.5214335", "0.52104026", "0.5209037", "0.52026606", "0.5198866", "0.5198609", "0.5188097", "0.51805854", "0.517854", "0.51773345", "0.51765317", "0.5173648", "0.5171428", "0.51687264", "0.5163966", "0.5161194", "0.5160372", "0.5148204", "0.5122102", "0.5116445", "0.51012206", "0.5099442", "0.5099344", "0.5099344", "0.5098412", "0.5096843", "0.50895137", "0.5089043", "0.50890064", "0.5083095", "0.5082625", "0.5080561", "0.50786185", "0.5074826", "0.50596416", "0.5046258", "0.5042819", "0.5040704", "0.50229627", "0.50199044", "0.5018729", "0.5016918", "0.5010341", "0.5008486", "0.50025827", "0.49908984" ]
0.694894
0
Map Juniper SRX Policy Object into xml config element
def to_xml(self): policy_element = create_element('policy') create_element('name', text=self.name, parent=policy_element) match_element = create_element('match', parent=policy_element) for s in self.src_addresses: create_element('source-address', text=s.name, parent=match_element) for d in self.dst_addresses: create_element('destination-address', text=d.name, parent=match_element) then_element = create_element('then', parent=policy_element) create_element(JuniperSRXPolicy.ActionMap[self.action], parent=then_element) log_element = create_element('log', parent=then_element) for log_type in self.logging: create_element(JuniperSRXPolicy.LoggingMap[log_type], parent=log_element) return policy_element
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _wrap_policy(policy_doc):\n return {\"IAMPolicy\": policy_doc}", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def translate_policy(policy: dict):\n if 'PolicyName' in policy:\n # This is a normal policy that should not be expanded\n return policy\n template_name = next(iter(policy))\n template_parameters = policy[template_name]\n try:\n # 'convert' will return a list of policy statements\n policy_document = processor.convert(template_name, template_parameters)\n except InsufficientParameterValues as e:\n # Exception's message will give lot of specific details\n raise ValueError(str(e))\n except InvalidParameterValues:\n raise ValueError(\"Must specify valid parameter values for policy template '{}'\".format(template_name))\n return {\n \"PolicyName\": template_name + '-' + str(uuid.uuid4()),\n \"PolicyDocument\": policy_document\n }", "def policy_str(self): # -> str:\n ...", "def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config", "def create_export_policy():\n config = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return config", "def module_config_template():\n\n d = {\"AWSPricePerformancePublisher\": {\n \"module\": \"modules.AWS.publishers.AWS_price_performance\",\n \"name\": \"AWSPricePerformancePublisher\",\n }, }\n print(\"Entry in channel cofiguration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")\n print(\"\\t publish_to_graphite - publish to graphite if True\")\n print(\"\\t graphite_host - graphite host name\")", "def test_create_hyperflex_ucsm_config_policy(self):\n pass", "def test_create_hyperflex_sys_config_policy(self):\n pass", "def _config(self):\n tmpl = self._template_interface\n for p in tmpl._params:\n setattr(self, p._name, p.get_value())", "def __repr__(self) -> str:\n view = {\n \"server\": self.server,\n \"access-token\": 'yes' if self.token is not None else 'no',\n \"insecure\": self.insecure,\n \"output\": self.output,\n \"verbose\": self.verbose,\n }\n\n return \"<Configuration({})\".format(view)", "def generate_puppet_resource(self):\n\t\tself.puppet_resource = Template(\"\"\"\n# -- BEGIN \"$domain\" --\napache::vhost::enable { \"$user\": }\n# -- END \"$domain\" --\n\"\"\").safe_substitute({\n\t\t\t\t\"user\": self.argv.get('user'),\n\t\t})", "def policy_repr(self, policy):\n return policy.__repr__()", "def test_patch_hyperflex_ucsm_config_policy(self):\n pass", "def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})", "def update_Policy(self,inputpolicy):\n \n policyob = self.SD_Map.retrieve_ob(inputpolicy)\n policyob.values[-1] = self.PolicyDicts[inputpolicy][self.translate(self.policy_option_vars[inputpolicy].get(),\n input_language = self.language,\n output_language = 'english')]", "def test_create_hyperflex_node_config_policy(self):\n pass", "def test_update_hyperflex_ucsm_config_policy(self):\n pass", "def policy_alias(self):", "def get_policy_info(self):\n policy_info = []\n for pol in self:\n # delete from /info if deprecated\n if pol.is_deprecated:\n continue\n policy_entry = {}\n policy_entry['name'] = pol.name\n if pol.is_default:\n policy_entry['default'] = pol.is_default\n policy_info.append(policy_entry)\n return policy_info", "def from_xml_node(cls, xml_node):\n policies = []\n for policy_node in xml_node.iter(tag=xml_tags.Elements.POLICY):\n policies.append(Policy.from_xml_node(policy_node))\n return cls(policies)", "def test_patch_hyperflex_node_config_policy(self):\n pass", "def test_get_hyperflex_sys_config_policy_by_moid(self):\n pass", "def add_to_pr_export(self, exp_template):", "def apply_policy(self, policy):\n tenant_name = policy['tenant_name']\n fw_id = policy['fw_id']\n fw_name = policy['fw_name']\n LOG.debug(\"asa_apply_policy: tenant=%s fw_id=%s fw_name=%s\",\n tenant_name, fw_id, fw_name)\n cmds = [\"conf t\", \"changeto context \" + tenant_name]\n\n rule_dict = policy['rules']\n for rule_id in rule_dict:\n rule = rule_dict[rule_id]\n protocol = rule['protocol']\n name = rule['name']\n enabled = rule['enabled']\n dst_port = rule['destination_port']\n src_port = rule['source_port']\n\n if (rule['source_ip_address'] is not None):\n src_ip = IPNetwork(rule['source_ip_address'])\n else:\n src_ip = IPNetwork('0.0.0.0/0')\n\n if (rule['destination_ip_address'] is not None):\n dst_ip = IPNetwork(rule['destination_ip_address'])\n else:\n dst_ip = IPNetwork('0.0.0.0/0')\n\n if rule['action'] == 'allow':\n action = 'permit'\n else:\n action = 'deny'\n\n LOG.debug(\"rule[%s]: name=%s enabled=%s prot=%s dport=%s sport=%s \\\n dip=%s %s sip=%s %s action=%s\",\n rule_id, name, enabled, protocol, dst_port, src_port,\n dst_ip.network, dst_ip.netmask,\n src_ip.network, src_ip.netmask, action)\n\n acl = \"access-list \"\n acl = (acl + tenant_name + \" extended \" + action + \" \" +\n protocol + \" \")\n if (rule['source_ip_address'] is None):\n acl = acl + \"any \"\n else:\n acl = acl + str(src_ip.network) + \" \" + (\n str(src_ip.netmask) + \" \")\n if (src_port is not None):\n if (':' in src_port):\n range = src_port.replace(':', ' ')\n acl = acl + \"range \" + range + \" \"\n else:\n acl = acl + \"eq \" + src_port + \" \"\n if (rule['destination_ip_address'] is None):\n acl = acl + \"any \"\n else:\n acl = acl + str(dst_ip.network) + \" \" + \\\n str(dst_ip.netmask) + \" \"\n if (dst_port is not None):\n if (':' in dst_port):\n range = dst_port.replace(':', ' ')\n acl = acl + \"range \" + range + \" \"\n else:\n acl = acl + \"eq \" + dst_port + \" \"\n if (enabled is False):\n acl = acl + 'inactive'\n\n # remove the old ace for this rule\n if (rule_id in self.rule_tbl):\n cmds.append('no ' + self.rule_tbl[rule_id])\n\n self.rule_tbl[rule_id] = acl\n if tenant_name in self.tenant_rule:\n if rule_id not in self.tenant_rule[tenant_name]['rule_lst']:\n self.tenant_rule[tenant_name]['rule_lst'].append(rule_id)\n cmds.append(acl)\n cmds.append(\"access-group \" + tenant_name + \" global\")\n cmds.append(\"write memory\")\n\n LOG.debug(cmds)\n data = {\"commands\": cmds}\n return self.rest_send_cli(data)", "def apply_policy(cls, metadata, policy):\r\n for attr, value in policy.iteritems():\r\n attr = cls._translate(attr)\r\n if attr not in cls.fields:\r\n # Store unknown attributes coming from policy.json\r\n # in such a way that they will export to xml unchanged\r\n metadata['xml_attributes'][attr] = value\r\n else:\r\n metadata[attr] = value", "def getConfigPage():\r\n\r\n\r\n #print os.getcwd()\r\n #os.chdir('..')\r\n #os.chdir('D:\\Stone\\Python\\Python_Insurance')\r\n #print os.getcwd()\r\n\r\n #configPage = ConfigObj('pagesConf.conf')\r\n configPage = ConfigObj('thirdevalConf.conf')\r\n\r\n #configPagePolicyHolder_Usr = configPage['PolicyHolder_Usr']\r\n #dictFirst = configPagePolicyHolder_Usr.dict()\r\n #print dictFirst\r\n #j = 2\r\n #for j in range(j):\r\n # for member in dictFirst:\r\n # if 'titleText' in dictFirst[member]:\r\n # print member\r\n # dictSecond = dictFirst[member]\r\n # print dictSecond\r\n # break\r\n # dictFirst.pop(member)\r\n #i = i+1\r\n #print i\r\n #for item in configPagePolicyHolder_Usr[member]:\r\n # print configPagePolicyHolder_Usr[member][item]\r\n #for item in member:\r\n # print member[item]\r\n #print configPage\r\n #print configPagePolicyHolder_Usr\r\n #configPagePolicyHolder_Usr = configPage['PolicyHolder_Usr']\r\n #print configPagePolicyHolder_Usr\r\n\r\n #print configPagePolicyHolder_Usr.as_int( 'rowNumber')\r\n #print configPage.as_bool(\"hasPolicyHolder_Usr\")\r\n return configPage", "def _serialize(self, policy):\n return {\n \"rid\": policy.rid,\n \"actions\": list(policy.actions),\n \"consumer\": url_for(\n \"rest_endpoints.consumers\",\n consumer_key=policy.consumer_key)\n }", "def test_update_hyperflex_node_config_policy(self):\n pass", "def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }", "def test_update_hyperflex_sys_config_policy(self):\n pass", "def test_create_hyperflex_proxy_setting_policy(self):\n pass", "def _setup_policy_object(policy_type,\n policy_entitlement_type,\n service_target_type,\n policy_membership_type,\n container_object,\n name: str,\n priority: int,\n description: Optional[str] = None,\n keywords: Optional[str] = None,\n caption: Optional[str] = None,\n available_to_subunits: Optional[bool] = None,\n enabled: Optional[bool] = None,\n membership_type: Optional[str] = None,\n membership_role_dns: Optional[List[str]] = None,\n entitlements: List[Dict] = []):\n\n policy_object = policy_type()\n\n if description is not None:\n policy_object['description'] = description\n policy_object['name'] = name\n\n if keywords is not None:\n policy_object['keywords'] = keywords\n\n if caption is not None:\n policy_object['caption'] = caption\n\n entitlement_list = []\n\n # Iterate through the entitlements argument and add each one to the request\n for entitlement in entitlements:\n entitlement_object = policy_entitlement_type()\n service_target_object = service_target_type()\n\n # Set type 0 for a service type (specify the name of the service profile in the name. MAKE SURE IT IS EXACT-\n # IT IS CASE_SENSITIVE).\n # Set type 1 for a specific service (specify it's DN in the name).\n # Set type 2 for all services (specify * as the name).\n # Set type 3 for a service selection policy (specify the name of the service profile in the name. MAKE SURE IT\n # IS EXACT- IT IS CASE_SENSITIVE). The service selection policy will be automatically selected based on the\n # service profile selected.\n\n if entitlement['target_type'] is not None:\n if entitlement['target_type'] == 'all':\n service_target_object['name'] = '*'\n service_target_object['type'] = '2'\n elif entitlement['target_type'] == 'type':\n service_target_object['name'] = entitlement['service_type']\n service_target_object['type'] = '0'\n elif entitlement['target_type'] == 'policy':\n service_target_object['name'] = entitlement['service_type']\n service_target_object['type'] = '3'\n elif entitlement['target_type'] == 'specific':\n service_target_object['name'] = entitlement['service_dn']\n service_target_object['type'] = '1'\n else:\n raise ValueError(\"Invalid target_type value in entitlement. Valid values are 'all', 'type', 'policy', \"\n \"or 'specific'.\")\n\n entitlement_object['serviceTarget'] = service_target_object\n\n if entitlement['automatic'] is not None:\n # The type value should be set to 0 for manual provisioning, or 1 for automatic provisioning\n if entitlement['automatic']:\n entitlement_object['type'] = 1\n else:\n entitlement_object['type'] = 0\n\n if entitlement['workflow_dn'] is not None:\n entitlement_object['processDN'] = str(entitlement['workflow_dn'])\n\n if entitlement['ownership_type'] is not None:\n if entitlement['ownership_type'].lower() == 'all':\n entitlement_object['ownershipType'] = '*'\n elif entitlement['ownership_type'].lower() == 'device':\n entitlement_object['ownershipType'] = 'Device'\n elif entitlement['ownership_type'].lower() == 'individual':\n entitlement_object['ownershipType'] = 'Individual'\n elif entitlement['ownership_type'].lower() == 'system':\n entitlement_object['ownershipType'] = 'System'\n elif entitlement['ownership_type'].lower() == 'vendor':\n entitlement_object['ownershipType'] = 'Vendor'\n else:\n raise ValueError(\"Invalid value for entitlement ownership_type. Valid values are 'all', 'device', \"\n \"'individual', 'system', or 'vendor'.\")\n\n entitlement_list.append(entitlement_object)\n\n policy_object['entitlements'] = {'item': entitlement_list}\n\n # Add membership information to the request\n membership_list = []\n membership_object = policy_membership_type()\n\n if membership_type is not None:\n # Set type 2 for all users in the organization. Specify '*' as the name.\n # Set type 3 to specify a specific role. Specify the role DN as the name. Create more membership objects for\n # more roles.\n # Set type 4 for all other users who are not granted to the entitlement(s) defined by this provisioning policy\n # via other policies. Specify '*' as the name.\n if membership_type == 'all':\n membership_object['name'] = '*'\n membership_object['type'] = '2'\n membership_list.append(membership_object)\n elif membership_type == 'other':\n membership_object['name'] = '*'\n membership_object['type'] = '4'\n membership_list.append(membership_object)\n elif membership_type == 'roles':\n for role in membership_role_dns:\n membership_object = policy_membership_type()\n membership_object['name'] = str(role)\n membership_object['type'] = '3'\n membership_list.append(membership_object)\n else:\n raise ValueError(\"Invalid value for membership_type. Valid values are 'all', 'other', or 'roles'.\")\n\n policy_object['membership'] = {'item': membership_list}\n\n if priority is not None:\n if priority < 1:\n raise ValueError(\"Invalid priority value. Priority must be an integer greater than 0.\")\n policy_object['priority'] = priority\n\n if available_to_subunits is not None:\n # Scope should be set to 1 for 'this business unit only' and 2 for 'this business unit and its subunits'\n if available_to_subunits:\n policy_object['scope'] = 2\n else:\n policy_object['scope'] = 1\n\n if container_object is not None:\n policy_object['organizationalContainer'] = container_object\n\n if enabled is not None:\n policy_object['enabled'] = enabled\n\n return policy_object", "def test_patch_hyperflex_sys_config_policy(self):\n pass", "def create_xml_snippet(self, cutomized_config):\n conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (cutomized_config)\n return conf_xml_snippet", "def GatherExtensionPolicySettings(self, settings, policies):\n for field in settings.DESCRIPTOR.fields:\n # |field| is the entry for a specific policy in the top-level\n # ExternalPolicyData proto.\n field_value = policies.get(field.name)\n if field_value is None:\n continue\n\n field_descriptor = settings.DESCRIPTOR.fields_by_name[field.name]\n self.SetProtobufMessageField(settings, field_descriptor,\n field_value)", "def policies(self, value):\n policies = {}\n for domain, obj in six.iteritems(value):\n if isinstance(obj, Policy):\n policies[domain] = obj\n else:\n policies[domain] = Policy(obj, self.policy_aliases)\n self._set_attr('policies', policies)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n policy_type: Optional[pulumi.Input[str]] = None,\n tag_policy: Optional[pulumi.Input[pulumi.InputType['MonitorConfigPolicyTagPolicyArgs']]] = None,\n __props__=None):\n ...", "def policy_template(cls):\n return relationship.many_to_one(cls, 'policy_template')", "def _get_policies(self, cr, uid, context=None):\n return [('optional', _('Optional')),\n ('always', _('Always')),\n ('never', _('Never'))]", "def create_license_policy_config_file(\n directory,\n name,\n description,\n allow_unknown_licenses,\n package_query_string,\n spdx_identifiers,\n on_violation_quarantine,\n):\n\n data = {\n \"name\": name,\n \"description\": description,\n \"spdx_identifiers\": list(spdx_identifiers),\n \"allow_unknown_licenses\": allow_unknown_licenses,\n \"package_query_string\": package_query_string,\n \"on_violation_quarantine\": on_violation_quarantine,\n }\n\n file_path = directory / \"LICENSE-POLICY-CONFIG.json\"\n file_path.write_text(str(json.dumps(data)))\n return file_path", "def make_website_config(xml_fragment):\n return '<?xml version=\"1.0\" encoding=\"UTF-8\"?><WebsiteConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">' + xml_fragment + '</WebsiteConfiguration>'", "def adapter_policy_create(handle, name, descr=\"\", parent_dn=\"org-root\"):\n\n from ucsmsdk.mometa.adaptor.AdaptorHostEthIfProfile import \\\n AdaptorHostEthIfProfile\n\n obj = handle.query_dn(parent_dn)\n if not obj:\n raise ValueError(\"org '%s' does not exist\" % parent_dn)\n\n mo = AdaptorHostEthIfProfile(parent_mo_or_dn=obj, name=name, descr=descr)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo", "def get_policies_default(config, n_policies, obs_space, act_space, policy_template=\"player_%d\"):\r\n policies = {policy_template % i: get_agent_config(agent_id=i, which=config['_policies'][i],\r\n config=config,\r\n obs_space=obs_space, act_space=act_space)\r\n for i in range(1, 1 + n_policies)}\r\n return policies", "def CreatePolicyForExternalPolicyData(self, policy_key):\n settings = ep.ExternalPolicyData()\n data = self.server.ReadPolicyDataFromDataDir(policy_key)\n if data:\n settings.download_url = urlparse.urljoin(\n self.server.GetBaseURL(), 'externalpolicydata?key=%s' % policy_key)\n settings.secure_hash = hashlib.sha256(data).digest()\n return settings.SerializeToString()\n else:\n return None", "def to_xml(self, resource):\n stream = StringIO.StringIO()\n stream.write(\"<?xml version='1.0' encoding='UTF-8'?>\")\n stream.write('<%s>' % \"GpsRO\")\n\n for item in resource.items():\n key, value = item\n if isinstance(value, str) or isinstance(value, unicode):\n stream.write('\\n<%s>%s</%s>' % (key, value, key))\n else:\n stream.write('\\n<%s>%d</%s>' % (key, value, key))\n\n stream.write('\\n</%s>' % \"GpsRO\")\n stream.seek(0)\n return stream.read()", "def collect_physpool_element(cfg):\n physpool_el = cfg.find_children(\"physical_pool\")\n cfg['IMAGEPOOLINSERT'] = '{}'\n\n if not physpool_el:\n cfg['DOPOOLRELOC'] = '0'\n return \n \n policydict = dict([(el.name, el.value) for el in cfg.find_child('machine').find_children('cache_policy')])\n policydict[None] = '-1'\n\n # Make a new list with DEFAULT_PHYSPOOL at the front\n # and with everything else behind\n physpool_el = [el for el in physpool_el if el.name == 'DEFAULT_PHYSPOOL'] + \\\n [el for el in physpool_el if el.name != 'DEFAULT_PHYSPOOL']\n\n handle_image_pools(cfg, physpool_el)\n handle_island_pools(cfg, physpool_el)\n\n #pool_configs\n doreloc = '0'\n for el in physpool_el:\n verify_name(cfg, el.name)\n #\n # for each region:\n # [0] = base address\n # [1] = size in bytes\n # [2] = number of bytes to be added to the pool descriptor\n # in the memory image and then subtracted during bootup;\n # this can either be from the 'padding' attribute or it\n # can represent memory \"stolen\" from the end of the\n # pool for other purposes, such as the image build utility.\n # [3] = name of the region, or None if no name exists\n # [4] = cache policy for the region (-1 if none specified)\n #\n r = [[x.base, x.size, getattr(x,'padding',0), getattr(x,'name',None),\n int(policydict[getattr(x,'cache_policy',None)],0)] for x in el.find_children('region')]\n add_physpool(cfg, el.name, r)\n\n add_physpool(cfg, '')\n cfg['DOPOOLRELOC'] = doreloc\n cfg['PHYSPOOLS'] = ' \\\\\\n'.join(cfg.physpools)", "def _TranslatePolicy(self, pol, exp_info):\n # pylint: disable=unused-argument\n raise Error('%s does not implement _TranslatePolicies()' % self._PLATFORM)", "def test_get_hyperflex_node_config_policy_list(self):\n pass", "def test_get_hyperflex_ucsm_config_policy_list(self):\n pass", "def post(self, nodepool_policy):\n context = pecan.request.context\n nodepool_policy_dict = nodepool_policy.as_dict()\n\n print 'aaaaaa'\n print context.project_id\n print context.user_id\n print 'aaaaaaa'\n nodepool_policy_dict['project_id'] = context.project_id\n nodepool_policy_dict['user_id'] = context.user_id\n\n nodepool_policy = objects.NodePoolPolicy(context, **nodepool_policy_dict)\n nodepool_policy.create()\n\n # Set the HTTP Location Header\n # pecan.response.location = link.build_url('nodepool_policies', nodepool_policy.id)\n return NodePoolPolicy.convert_with_links(nodepool_policy)\n\n # res_nodepool_policy = pecan.request.rpcapi.nodepool_policy_create(nodepool_policy,\n # nodepool_policy.nodepool_policy_create_timeout)\n\n # # Set the HTTP Location Header\n # pecan.response.location = link.build_url('nodepool_policies', res_nodepool_policy.uuid)\n # return NodePoolPolicy.convert_with_links(res_nodepool_policy)", "def extend_tag_policy_with_hdfs(policy):\n if get_resource_type(policy) != \"tag\":\n raise AttributeError(\"Policy does not have resource type tag. Policy: {}\".format(policy[\"name\"]))\n policy_template_tag = copy.deepcopy(policy)\n if \"policyItems\" in policy:\n policy_template_tag[\"policyItems\"] = []\n for policy_item in policy[\"policyItems\"]:\n policy_item_copy = copy.deepcopy(policy_item)\n policy_item_copy[\"accesses\"] = _expand_hive_tag_accesses_to_file_accesses(policy_item_copy[\"accesses\"])\n policy_template_tag[\"policyItems\"].append(policy_item_copy)\n if \"denyPolicyItems\" in policy:\n policy_template_tag[\"denyPolicyItems\"] = []\n for policy_item in policy[\"denyPolicyItems\"]:\n policy_item_copy = copy.deepcopy(policy_item)\n policy_item_copy[\"accesses\"] = _expand_hive_tag_accesses_to_file_accesses(policy_item_copy[\"accesses\"])\n policy_template_tag[\"denyPolicyItems\"].append(policy_item_copy)\n return policy_template_tag", "def xml_create_hybrid_element(self, doc, element):\n\n # Create the hybrid element and add it to the higher level element.\n hybrid_element = doc.createElement('hybrid')\n element.appendChild(hybrid_element)\n\n # Set the hybridisation attributes.\n hybrid_element.setAttribute('desc', 'Data pipe hybridisation information')\n\n # Create an element to store the pipes list.\n list_element = doc.createElement('pipes')\n hybrid_element.appendChild(list_element)\n\n # Add the pipes list.\n text_val = doc.createTextNode(str(self.hybrid_pipes))\n list_element.appendChild(text_val)", "def initialize_policies(self, policy_collection, options):", "def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):\r\n condition = {}\r\n if expires:\r\n condition[\"DateLessThan\"] = {\"AWS:EpochTime\": expires}\r\n if valid_after:\r\n condition[\"DateGreaterThan\"] = {\"AWS:EpochTime\": valid_after}\r\n if ip_address:\r\n if '/' not in ip_address:\r\n ip_address += \"/32\"\r\n condition[\"IpAddress\"] = {\"AWS:SourceIp\": ip_address}\r\n policy = {\"Statement\": [{\r\n \"Resource\": resource,\r\n \"Condition\": condition}]}\r\n return json.dumps(policy, separators=(\",\", \":\"))", "def convert_payload_xml(payload):\n root = \"\"\"<SystemConfiguration><Component FQDD=\"iDRAC.Embedded.1\">{0}</Component></SystemConfiguration>\"\"\"\n attr = \"\"\n json_payload = {}\n for k, v in payload.items():\n key = re.sub(r\"(?<=\\d)\\.\", \"#\", k)\n attr += '<Attribute Name=\"{0}\">{1}</Attribute>'.format(key, v)\n json_payload[key] = v\n root = root.format(attr)\n return root, json_payload", "def xml(self, indent):\n if self.__commentOut:\n prefix = \"<!--\"\n suffix = \" -->\"\n else:\n prefix = \"\"\n suffix = \"\"\n hubs = self.__stringMap.keys()\n if self.OMIT_HUB_NUMBER or len(hubs) != 1:\n nStr = \"\"\n else:\n nStr = \" hub=\\\"%d\\\"\" % hubs[0]\n return \"%s%s<domConfigList%s>%s</domConfigList>%s\" % \\\n (prefix, indent, nStr, self.__fileName, suffix)", "def __init__(__self__,\n resource_name: str,\n args: MonitorConfigPolicyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n policy_type: Optional[pulumi.Input[str]] = None,\n tag_policy: Optional[pulumi.Input['MonitorConfigPolicyTagPolicyArgs']] = None):\n if policy_type is not None:\n pulumi.set(__self__, \"policy_type\", policy_type)\n if tag_policy is not None:\n pulumi.set(__self__, \"tag_policy\", tag_policy)", "def test_get_hyperflex_ucsm_config_policy_by_moid(self):\n pass", "def xml_out(db):\n stats = basic_stats(db)\n print('<?xml version=\"1.0\"?>')\n print('<idp-audit rps=\"%d\" logins=\"%d\" users=\"%d\">'\n % (stats['rps'], stats['logins'], stats['users']))\n for rp, i in list(db['rp'].items()):\n print(' <rp count=\"%d\">%s</rp>' % (i, rp))\n print(\"</idp-audit>\")", "def __init__(self, policy_id=None, policy_name=None, is_policy_enabled=None, policy_target_version=None, policy_deployment_method=None, software_title=None, software_title_configuration_id=None, pending=None, completed=None, deferred=None, failed=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._policy_id = None\n self._policy_name = None\n self._is_policy_enabled = None\n self._policy_target_version = None\n self._policy_deployment_method = None\n self._software_title = None\n self._software_title_configuration_id = None\n self._pending = None\n self._completed = None\n self._deferred = None\n self._failed = None\n self.discriminator = None\n\n if policy_id is not None:\n self.policy_id = policy_id\n if policy_name is not None:\n self.policy_name = policy_name\n if is_policy_enabled is not None:\n self.is_policy_enabled = is_policy_enabled\n if policy_target_version is not None:\n self.policy_target_version = policy_target_version\n if policy_deployment_method is not None:\n self.policy_deployment_method = policy_deployment_method\n if software_title is not None:\n self.software_title = software_title\n if software_title_configuration_id is not None:\n self.software_title_configuration_id = software_title_configuration_id\n if pending is not None:\n self.pending = pending\n if completed is not None:\n self.completed = completed\n if deferred is not None:\n self.deferred = deferred\n if failed is not None:\n self.failed = failed", "def update_by_config(self, policy_enabling_map):\n self.enabled_policies = []\n\n for policy_name, policy_config in policy_enabling_map.items():\n if not self._is_policy_exists(policy_name):\n self._warn_unexistent_policy(policy_name)\n continue\n\n if policy_config['enabled']:\n enabled_policy = self._get_policy(policy_name)\n self.enabled_policies.append(enabled_policy)", "def get_ad_extensions_mapping(self):\n return {\"stitching-element\":\n \"http://example.com/stitching-element\"} # /ad.xsd", "def GatherUserPolicySettings(self, settings, policies):\n for field in settings.DESCRIPTOR.fields:\n # |field| is the entry for a specific policy in the top-level\n # CloudPolicySettings proto.\n\n # Look for this policy's value in the mandatory or recommended dicts.\n if field.name in policies.get('mandatory', {}):\n mode = cp.PolicyOptions.MANDATORY\n value = policies['mandatory'][field.name]\n elif field.name in policies.get('recommended', {}):\n mode = cp.PolicyOptions.RECOMMENDED\n value = policies['recommended'][field.name]\n else:\n continue\n\n # Create protobuf message for this policy.\n policy_message = eval('cp.' + field.message_type.name + '()')\n policy_message.policy_options.mode = mode\n field_descriptor = policy_message.DESCRIPTOR.fields_by_name['value']\n self.SetProtobufMessageField(policy_message, field_descriptor, value)\n settings.__getattribute__(field.name).CopyFrom(policy_message)", "def get_config_template(self) -> cconfig.Config:", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def __init__(__self__, *,\n policy_type: pulumi.Input[str],\n tag_policy: Optional[pulumi.Input['MonitorConfigPolicyTagPolicyArgs']] = None):\n pulumi.set(__self__, \"policy_type\", policy_type)\n if tag_policy is not None:\n pulumi.set(__self__, \"tag_policy\", tag_policy)", "def PolicyStatement(self) -> PolicyStatement:", "def test_get_hyperflex_sys_config_policy_list(self):\n pass", "def policyid(self, policyid):\n self._policyid = policyid", "def get_config_template() -> dict:\n return {\n VENE_PAYMENTS_BAMBORA_API_URL: (str, \"https://payform.bambora.com/pbwapi\"),\n VENE_PAYMENTS_BAMBORA_API_KEY: str,\n VENE_PAYMENTS_BAMBORA_API_SECRET: str,\n VENE_PAYMENTS_BAMBORA_PAYMENT_METHODS: list,\n }", "def policies(self, policies):\n\n self._policies = policies", "def logic_to_config(self):\n raise NotImplementedError", "def get_policy_config(\n platform,\n filters=None,\n prepend=True,\n pillar_key=\"acl\",\n pillarenv=None,\n saltenv=None,\n merge_pillar=True,\n only_lower_merge=False,\n revision_id=None,\n revision_no=None,\n revision_date=True,\n revision_date_format=\"%Y/%m/%d\",\n):\n if not filters:\n filters = []\n if merge_pillar and not only_lower_merge:\n # the pillar key for the policy config is the `pillar_key` itself\n policy_pillar_cfg = _get_pillar_cfg(\n pillar_key, saltenv=saltenv, pillarenv=pillarenv\n )\n # now, let's merge everything witht the pillar data\n # again, this will not remove any extra filters/terms\n # but it will merge with the pillar data\n # if this behaviour is not wanted, the user can set `merge_pillar` as `False`\n filters = _merge_list_of_dict(filters, policy_pillar_cfg, prepend=prepend)\n policy_object = _get_policy_object(\n platform,\n filters=filters,\n pillar_key=pillar_key,\n pillarenv=pillarenv,\n saltenv=saltenv,\n merge_pillar=merge_pillar,\n )\n policy_text = str(policy_object)\n return _revision_tag(\n policy_text,\n revision_id=revision_id,\n revision_no=revision_no,\n revision_date=revision_date,\n revision_date_format=revision_date_format,\n )", "def record_config(setup_state):\n bp.config = {k: v for k, v in setup_state.app.config.get_namespace('POKER_').items()}", "def configuration():", "def tag_policy(self) -> pulumi.Output[Optional['outputs.MonitorConfigPolicyTagPolicy']]:\n return pulumi.get(self, \"tag_policy\")", "def config(self) -> NamedTuple:", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def magic_config(self,parameter_s=''):\n \n page('Current configuration structure:\\n'+\n pformat(self.rc.dict()))", "def update_policy(self):\n pass", "def __init__(self):\n self.account = None\n self.typeInfo['account'] = 'string'\n \"\"\"the domain of the HealthCheck policy\"\"\"\n self.domain = None\n self.typeInfo['domain'] = 'string'\n \"\"\"the domain ID of the HealthCheck policy\"\"\"\n self.domainid = None\n self.typeInfo['domainid'] = 'string'\n \"\"\"the LB rule ID\"\"\"\n self.lbruleid = None\n self.typeInfo['lbruleid'] = 'string'\n \"\"\"the id of the zone the HealthCheck policy belongs to\"\"\"\n self.zoneid = None\n self.typeInfo['zoneid'] = 'string'\n \"\"\"the list of healthcheckpolicies\"\"\"\n self.healthcheckpolicy = []", "def create_conf_xml(self):\n path = os.path.join(\n self.buildout['buildout']['parts-directory'],\n self.name)\n if not os.path.isdir(path):\n os.makedirs(path)\n\n xml_path = os.path.join(path, 'uwsgi.xml')\n\n conf = \"\"\n for key, value in self.conf.items():\n if value.lower() in ('true', 'on', 'yes'):\n conf += \"<%s/>\\n\" % key\n elif value and value.lower() not in ('false', 'off', 'yes'):\n conf += \"<%s>%s</%s>\\n\" % (key, value, key)\n\n\n requirements, ws = self.egg.working_set()\n eggs_paths = [dist.location for dist in ws]\n eggs_paths.extend(self.get_extra_paths())\n # order preserving unique\n unique_egg_paths = []\n for p in eggs_paths:\n if p not in unique_egg_paths:\n unique_egg_paths.append(p)\n\n for path in map(realpath, unique_egg_paths):\n conf += \"<pythonpath>%s</pythonpath>\\n\" % path\n\n f = open(xml_path, 'w')\n f.write(\"<uwsgi>\\n%s</uwsgi>\" % conf)\n f.close()\n return xml_path", "async def _parse_policy(self, raw_policy):\n if raw_policy.get('AttachmentCount') > 0:\n policy_dict = {}\n policy_dict['id'] = policy_dict['name'] = raw_policy.get('PolicyName')\n policy_dict['description'] = raw_policy.get('Description')\n policy_dict['create_date'] = raw_policy.get('CreateDate')\n policy_dict['update_date'] = raw_policy.get('UpdateDate')\n policy_dict['attachment_count'] = raw_policy.get('AttachmentCount')\n policy_dict['type'] = raw_policy.get('PolicyType')\n policy_dict['default_version'] = raw_policy.get('DefaultVersion')\n\n policy_version = await self.facade.ram.get_policy_version(policy_dict['name'],\n policy_dict['type'],\n policy_dict['default_version'])\n policy_version['PolicyDocument'] = json.loads(policy_version['PolicyDocument'])\n # policy_dict['policy_document'] = policy_version['PolicyDocument']\n policy_dict['policy_document'] = policy_version\n\n policy_entities = await self.facade.ram.get_policy_entities(policy_dict['name'],\n policy_dict['type'])\n policy_dict['entities'] = {}\n if policy_entities['Users']['User']:\n policy_dict['entities']['users'] = []\n for user in policy_entities['Users']['User']:\n policy_dict['entities']['users'].append(user['UserName'])\n if policy_entities['Groups']['Group']:\n policy_dict['entities']['groups'] = []\n for group in policy_entities['Groups']['Group']:\n policy_dict['entities']['groups'].append(group['GroupName'])\n if policy_entities['Roles']['Role']:\n policy_dict['entities']['roles'] = []\n for role in policy_entities['Roles']['Role']:\n policy_dict['entities']['roles'].append(role['RoleName'])\n\n return policy_dict['id'], policy_dict\n else:\n return None, None", "def GenerateConfig(context):\n\n resources = [\n {\n 'name': 'auth_cloud_sql_client_to_cloud_sql_proxy_sa',\n 'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding',\n 'properties': {\n 'resource': context.env['project'],\n 'role': 'roles/cloudsql.client',\n 'member': 'serviceAccount:$(ref.cloud-sql-proxy-service-acc.email)'\n },\n }\n ]\n return {'resources': resources}", "def test_get_hyperflex_vcenter_config_policy_by_moid(self):\n pass", "def build(self):\n if ((self.allowMethods is None or len(self.allowMethods) == 0) and\n (self.denyMethods is None or len(self.denyMethods) == 0)):\n raise NameError(\"No statements defined for the policy\")\n\n policy = {\n 'principalId': self.principalId,\n 'policyDocument': {\n 'Version': self.version,\n 'Statement': []\n }\n }\n\n policy['policyDocument']['Statement'].extend(\n self._getStatementForEffect(\"Allow\", self.allowMethods))\n policy['policyDocument']['Statement'].extend(\n self._getStatementForEffect(\"Deny\", self.denyMethods))\n\n return policy", "def Attributes(self) -> PolicyStatementAttribute:", "def test_create_hyperflex_vcenter_config_policy(self):\n pass", "def test_get_hyperflex_proxy_setting_policy_list(self):\n pass", "def get_manifest_extensions_mapping(self):\n return {\"stitching-element\":\n \"http://example.com/stitching-element\"} # /manifest.xsd", "def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()", "def test_patch_hyperflex_vcenter_config_policy(self):\n pass", "def get_policy(self, *args, **kwargs):\r\n pass", "def generate_ipsec_conf(strongswan_obj, **kwargs):\r\n return strongswan_obj.create_ipsec_conf(**kwargs)", "def create_export_policy(l3out_name):\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": l3out_name,\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return export_policy", "def __init__(self, meta, policy):\n assert isinstance(policy, (PublicResolution.Implementation, LinearResolution.Implementation))\n assert policy.meta in meta._policies\n super(DynamicResolution.Implementation, self).__init__(meta)\n self._policy = policy" ]
[ "0.58459353", "0.57357043", "0.52592963", "0.52169687", "0.5177298", "0.5132841", "0.5117141", "0.5053394", "0.50252014", "0.50223446", "0.5019244", "0.50055027", "0.49536827", "0.4945237", "0.49375263", "0.49065456", "0.49007678", "0.48749763", "0.48602158", "0.48564184", "0.4848674", "0.48378274", "0.48179054", "0.48055226", "0.48010263", "0.47990635", "0.4791481", "0.47764415", "0.4771747", "0.4757154", "0.47477064", "0.47474083", "0.47291836", "0.4728305", "0.4708772", "0.4691746", "0.46863648", "0.46811506", "0.46798456", "0.4679306", "0.46740013", "0.46575406", "0.46556267", "0.4655385", "0.46536985", "0.4623571", "0.46177012", "0.46170703", "0.46151924", "0.4598377", "0.45965794", "0.45962933", "0.45951387", "0.4592117", "0.4580315", "0.45761642", "0.4575893", "0.4563503", "0.45630774", "0.45618775", "0.45606357", "0.4559682", "0.45579937", "0.45546195", "0.45483527", "0.45480368", "0.45385075", "0.45292693", "0.45291036", "0.45281583", "0.4527138", "0.4513961", "0.4509599", "0.4500015", "0.44961756", "0.44804677", "0.44801986", "0.44721016", "0.4468488", "0.44677362", "0.44677362", "0.44677362", "0.4467502", "0.44638565", "0.4459815", "0.44594952", "0.44503927", "0.44440344", "0.44383729", "0.4438136", "0.44380835", "0.44329974", "0.44309536", "0.4427465", "0.4425852", "0.4413416", "0.4403492", "0.44018006", "0.43990567", "0.43927163" ]
0.6687791
0
Creates a new ColumnInfo and update the size
def update(self, size) -> 'ColumnInfo': return ColumnInfo( size, self.directive, self.period )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddColumnInfo(self, colInfo):\r\n \r\n self._columns.append(colInfo)\r\n self._total_col_width += colInfo.GetWidth()\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True", "def AddColumnInfo(self, colInfo):\r\n\r\n self._header_win.AddColumnInfo(colInfo)\r\n self.DoHeaderLayout()", "def InsertColumnInfo(self, before, colInfo):\r\n\r\n if before < 0 or before >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n self._columns.insert(before, colInfo)\r\n self._total_col_width += colInfo.GetWidth()\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True", "def InsertColumnInfo(self, before, colInfo):\r\n\r\n self._header_win.InsertColumnInfo(before, colInfo)\r\n self._header_win.Refresh()", "def SetColumn(self, column, info):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n w = self._columns[column].GetWidth()\r\n self._columns[column] = info\r\n \r\n if w != info.GetWidth():\r\n self._total_col_width += info.GetWidth() - w\r\n self._owner.AdjustMyScrollbars()\r\n \r\n self._owner._dirty = True", "def _update_width(self, is_commit_in_existing_columns):\n max_cols = self.num_columns + self.num_parents\n\n # Even if the current commit has no parents to be printed, it still\n # takes up a column for itself.\n if self.num_parents < 1:\n max_cols += 1\n\n # We added a column for the current commit as part of self.num_parents.\n # If the current commit was already in self.columns, then we have double\n # counted it.\n if is_commit_in_existing_columns:\n max_cols -= 1\n\n # Each column takes up 2 spaces\n self.width = max_cols * 2", "def UpdateColumns(self):\r\n data = self.data\r\n columns = data.getParam('columns',data.tankColumns[:])\r\n col_name = data.getParam('colNames',{})\r\n col_width = data.getParam('colWidths',{})\r\n col_align = data.getParam('colAligns',{})\r\n for index,column in enumerate(columns):\r\n name = col_name.get(column,_(column))\r\n width = col_width.get(column,30)\r\n align = wxListAligns[col_align.get(column,'LEFT')]\r\n self.gList.InsertColumn(index,name,align)\r\n self.gList.SetColumnWidth(index, width)", "def addcolumn(self, colname, coldata):\n if len(coldata) != len(self):\n raise ValueError,\"Column length must match catalog length\"\n\n #Most of the bookkeeping is the same as for an empty column\n self.addemptycolumn(colname,coldata.dtype)\n\n #and then we reset the column to contain the actual data\n setattr(self,colname,coldata)", "def AddColumns(sqlite_file, table_name):\r\n columns = ['cf_direct_parent','cf_kingdom','cf_superclass',\\\r\n 'cf_class','cf_subclass','cf_intermediate_0','cf_intermediate_1',\\\r\n 'cf_intermediate_2','cf_intermediate_3','cf_intermediate_4',\\\r\n 'cf_intermediate_5','cf_molecular_framework','cf_alternative_parents',\\\r\n 'cf_substituents', 'cf_description']\r\n column_type = 'TEXT'\r\n # Connecting to the database file\r\n conn = sqlite3.connect(sqlite_file) # Connecting to the database\r\n c = conn.cursor() # Adding a cursor to interact with the database\r\n # Adding new column, if it does not exist yet, without a row value\r\n for new_column_name in columns:\r\n try:\r\n c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\r\n .format(tn=table_name, cn=new_column_name, ct=column_type))\r\n print(\"Column created: {cn}\".format(cn=new_column_name))\r\n except sqlite3.OperationalError:\r\n print(\"Column already exists: {cn}\".format(cn=new_column_name))\r\n conn.commit()\r\n conn.close()\r\n return None", "def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata", "def __store_column_width(self):\n self.header_width = []\n for i in range(0, self.view.header().count()):\n self.header_width.append(self.view.columnWidth(i))", "def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def SetColumn(self, column, colInfo):\r\n\r\n self._header_win.SetColumn(column, colInfo)\r\n self._header_win.Refresh()", "def _write_new_column_data(self, column: str, new_kind: str, data: ndarray,\n srm: list, order: int) -> None:\n if new_kind not in self._data:\n loc = 0\n else:\n loc = self._data[new_kind].shape[1]\n if new_kind == 'S':\n self._str_reverse_map[loc] = srm\n self._column_info[column] = utils.Column(new_kind, loc, order)\n if new_kind in self._data:\n self._data[new_kind] = np.asfortranarray(np.column_stack((self._data[new_kind], data)))\n else:\n if data.ndim == 1:\n data = data[:, np.newaxis]\n\n self._data[new_kind] = np.asfortranarray(data)", "def newColumn (layer,FieldName,DataType):\n # Check if field already exists\n if layer.fields().indexFromName(FieldName)==-1:\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes([QgsField(FieldName,DataType)])\n print(\"New field \\\"{}\\\" added\".format(FieldName))\n # Update to propagate the changes\n layer.updateFields()\n else:\n print(\"Field \\\"{}\\\" already exists.\".format(FieldName))", "def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta", "def add_col(self, colname, n_batch=5000, debug=False):\n\n if debug: print(\"Create new column {col}\".format(col=colname))\n # Alter table add column\n #\n alter_query = '''\n ALTER TABLE \"{tablename}\"\n ADD COLUMN \"{colname}\" {datatype};\n '''.format(tablename=self.get_carto_tablename(),\n colname=colname,\n datatype=datatype_map(str(self.dtypes[colname])))\n if debug: print(alter_query)\n\n # add column\n resp = self.carto_sql_client.send(alter_query)\n if debug: print(resp)\n\n # update all the values in that column\n #\n # NOTE: fails if colval is 'inf' or some other exceptional Python\n # or NumPy type\n n_items = len(self[colname])\n update_query = '''\n UPDATE \"{tablename}\"\n SET \"{colname}\" = {colval}\n WHERE \"cartodb_id\" = {cartodb_id};\n '''\n queries = []\n\n for row_num, item in enumerate(self[colname].iteritems()):\n # if debug: print(item)\n pgtype = dtype_to_pgtype(self[colname].dtype, colname)\n temp_query = update_query.format(\n tablename=self.get_carto_tablename(),\n colname=colname,\n colval=numpy_val_to_pg_val(item[1], pgtype),\n cartodb_id=item[0]).strip()\n queries.append(temp_query)\n if (len(queries) == n_batch) or (row_num == n_items - 1):\n output_query = '\\n'.join(queries)\n if debug: print(output_query)\n if debug: print(\"Num chars in query: {}\".format(len(output_query)))\n resp = self.carto_sql_client.send(output_query)\n queries = []\n\n return None", "def _update_desc(self):\n if not self.connection:\n self.close()\n cname = CREATE_BUFFER_U(1024)\n ctype_code = C_SHORT()\n csize = ctypes.c_size_t()\n cdisp_size = C_SSIZE_T(0)\n c_decimal_digits = C_SHORT()\n cnull_ok = C_SHORT()\n col_descr = []\n self._col_type_code_list = []\n for col in range(1, self._num_of_cols() + 1):\n ret = ODBC_API.SQLColAttribute(self.stmt_h, col, 6, ADDR(CREATE_BUFFER(10)), 10, ADDR(C_SHORT()), ADDR(cdisp_size))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n ret = ODBC_API.SQLDescribeColW(self.stmt_h, col, cname, len(cname), ADDR(C_SHORT()), ADDR(ctype_code), ADDR(csize), ADDR(c_decimal_digits), ADDR(cnull_ok))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n # (name, type_code, display_size,\n col_descr.append((from_buffer_u(cname), SQL_DATA_TYPE_DICT.get(ctype_code.value, (ctype_code.value,))[0], cdisp_size.value, csize.value, csize.value, c_decimal_digits.value, cnull_ok.value == 1 and True or False))\n self._col_type_code_list.append(ctype_code.value)\n if len(col_descr) > 0:\n self.description = col_descr\n # Create the row type before fetching.\n self._row_type = self.row_type_callable(self)\n else:\n self.description = None\n self._create_col_buf()", "def create_column(self, new_column, dtype):\n self.logger.debug(\"[%u] Ready to add column %s\" %\n (os.getpid(), new_column))\n ddl = \"\"\"\n ALTER TABLE {schema}.{table}\n ADD COLUMN IF NOT EXISTS {col} {type}\n \"\"\"\n # TODO Replace by execute_ddl func and test it\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl.format(schema=self.config['schema'],\n table=self.config['table'],\n col=new_column,\n type=dtype))\n self.logger.debug(\"[%u] Column %s has been added\" %\n (os.getpid(), new_column))", "def OnColumnResize(self,event):\r\n iColumn = event.GetColumn()\r\n column = self.data.getParam('columns')[iColumn]\r\n self.data.updateParam('colWidths')[column] = self.gList.GetColumnWidth(iColumn)", "def columns(self, new_columns: ColumnT) -> None:\n new_columns2: ndarray = init.check_column_validity(new_columns)\n len_new: int = len(new_columns2)\n len_old: int = len(self._columns)\n if len_new != len_old:\n raise ValueError(f'There are {len_old} columns in the DataFrame. '\n f'You provided {len_new}.')\n\n new_column_info: ColInfoT = {}\n for old_col, new_col in zip(self._columns, new_columns2):\n new_column_info[new_col] = utils.Column(*self._column_info[old_col].values)\n\n self._column_info = new_column_info\n self._columns = new_columns2", "def update_column(self, xmldata):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n headers = managers.request_manager.get_request().session().value(\"headers\")\n if not columns:\n return False\n if xmldata:\n # Parsing of column declaration\n dom = parseString(xmldata.encode(\"UTF-8\"))\n column = dom.getElementsByTagName(\"column\")[0]\n name = un_quote(column.getAttribute(\"name\"))\n if not name:\n return False\n declaration = name\n constraints = {}\n cid = column.getAttribute(\"id\")\n type = column.getAttribute(\"type\")\n if not type or type == \"INTEGER\" or type == \"REAL\" or type == \"TEXT\" or type == \"BLOB\":\n constraints[\"type\"] = type\n if column.getAttribute(\"notnull\") == \"true\":\n constraints[\"not null\"] = True\n if column.getAttribute(\"primary\") == \"true\":\n if column.getAttribute(\"autoincrement\") == \"true\":\n constraints[\"primary key\"] = \"autoincrement\"\n else:\n constraints[\"primary key\"] = True\n if column.getAttribute(\"unique\") == \"true\":\n constraints[\"unique\"] = True\n\n if column.getAttribute(\"default\") and column.getAttribute(\"default\") != \"\" and column.getAttribute(\"default\") != \"NULL\":\n constraints[\"default\"] = column.getAttribute(\"default\")\n\n column_obj = VDOM_db_column(name, constraints)\n column_obj.id = cid\n\n # praparing SQL code\n old_column = None\n for col in columns:\n if columns[col].id == cid:\n old_column = columns[col]\n break\n if not old_column:\n return False\n\n newtable = \"%s_new(\" % self.name\n oldtable = \"%s(\" % self.name\n for col in headers:\n if oldtable[-1] != \"(\":\n oldtable += \", \"\n oldtable += columns[col].to_declaration()\n\n if columns[col].id == cid:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += column_obj.to_declaration()\n\n else:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += columns[col].to_declaration()\n newtable += \")\"\n if newtable[-2] == \"(\":\n return False\n newcols = []\n newcols.extend(headers)\n newcols.remove(old_column.name)\n newcols_decl = \"\"\n for ctr in newcols:\n newcols_decl += \", `%s`\" % ctr\n\n sql = \"\"\"BEGIN TRANSACTION;\nCREATE TABLE %(newtable)s;\nINSERT INTO `%(newtablename)s` (%(newcols)s) SELECT %(newcols)s FROM `%(oldtablename)s`;\nDROP TABLE `%(oldtablename)s`;\nALTER TABLE `%(newtablename)s` RENAME TO `%(oldtablename)s`;\nEND TRANSACTION;\"\"\" % {\"newtable\": newtable, \"newtablename\": self.name + \"_new\", \"oldtablename\": self.name, \"newcols\": newcols_decl[2:]}\n query = VDOM_sql_query(self.owner_id, self.database_id, sql, None, True)\n query.commit()\n columns.pop(old_column.name)\n columns[column_obj.name] = column_obj\n managers.request_manager.get_request().session().value(\"columns\", columns)\n self.restore_structure()\n return True", "def add_columns(self, table, col_data, col_type):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n for data, typ in zip(col_data, col_type):\n c.execute(\"ALTER TABLE {tn} ADD COLUMN {cn} {ct}\".\n format(tn=table, cn=data, ct=typ))\n conn.commit() \n conn.close()", "def addemptycolumn(self, colname, coltype):\n setattr(self,colname,N.zeros((len(self),),coltype))\n self._modflag=True\n self._type[colname]=coltype\n\n #Looks strange here because we count columns from 1 but\n #Python counts them from 0\n self._ncolumns+=1\n self._d[colname]=self._ncolumns\n self._colnames.append(colname)\n self._header+='# %d %s\\n'%(self._ncolumns,colname)", "def add_column(self, fieldname, column, align=..., valign=...):\n ...", "def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)", "def append_columns(classdict, shape=()):\n heavy = common.heavy\n for (itype, type_) in enumerate(sorted(type_info)):\n if not heavy and type_ in heavy_types:\n continue # skip heavy type in non-heavy mode\n colpos = itype + 1\n colname = 'c_%s' % type_\n if type_ == 'enum':\n base = tb.Atom.from_sctype(sctype_from_type[type_])\n col = tb.EnumCol(enum, enum(0), base, shape=shape, pos=colpos)\n else:\n sctype = sctype_from_type[type_]\n dtype = np.dtype((sctype, shape))\n col = tb.Col.from_dtype(dtype, pos=colpos)\n classdict[colname] = col\n ncols = colpos\n return ncols", "def initalise_column_lengths(coldata):\n for key, _ in coldata.items():\n coldata[key]['collen'] = len(coldata[key]['head'])\n return coldata", "def setoutputsize(self, size, column=None):\n pass", "def charcolumns(con,table):\n dbcolumns(con,table,\n # basic classification\n image=\"blob\",\n cls=\"text\",\n cost=\"real\",\n # separate prediction\n # pocost is the cost for the transcribed cls\n pred=\"text\",\n pcost=\"real\",\n pocost=\"real\",\n # cluster information\n cluster=\"integer\",\n count=\"integer\",\n classes=\"text\",\n # geometry\n rel=\"text\",\n lgeo=\"text\",\n # line source\n file=\"text\",\n segid=\"integer\",\n bbox=\"text\",\n )\n con.execute(\"create index if not exists cls_index on %s (cls)\"%table)\n con.execute(\"create index if not exists cluster_index on %s (cluster)\"%table)\n con.execute(\"create index if not exists cost_index on %s (cost)\"%table)\n con.execute(\"create index if not exists countcost_index on %s (count,cost)\"%table)\n con.commit()", "def _update_columns(self):\n self.columns, self.new_columns = self.new_columns, self.columns\n self.num_columns = self.num_new_columns\n self.num_new_columns = 0\n\n # Now update new_columns and mapping with the information for the commit\n # after this one.\n #\n # First, make sure we have enough room. At most, there will be\n # self.num_columns + self.num_parents columns for the next commit.\n max_new_columns = self.num_columns + self.num_parents\n\n # Clear out self.mapping\n self.mapping_size = 2 * max_new_columns\n for i in range(self.mapping_size):\n self.mapping[i] = -1\n\n # Populate self.new_columns and self.mapping\n #\n # Some of the parents of this commit may already be in self.columns. If\n # so, self.new_columns should only contain a single entry for each such\n # commit. self.mapping should contain information about where each\n # current branch line is supposed to end up after the collapsing is\n # performed.\n seen_this = False\n mapping_idx = 0\n is_commit_in_columns = True\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n is_commit_in_columns = False\n col_commit = self.commit\n else:\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n old_mapping_idx = mapping_idx\n seen_this = True\n self.commit_index = i\n for parent in self._interesting_parents():\n # If this is a merge, or the start of a new childless\n # column, increment the current color.\n if self.num_parents > 1 or not is_commit_in_columns:\n self._increment_column_color()\n mapping_idx = self._insert_into_new_columns(\n parent,\n mapping_idx)\n # We always need to increment mapping_idx by at least 2, even if\n # it has no interesting parents. The current commit always takes\n # up at least 2 spaces.\n if mapping_idx == old_mapping_idx:\n mapping_idx += 2\n else:\n mapping_idx = self._insert_into_new_columns(col_commit,\n mapping_idx)\n\n # Shrink mapping_size to be the minimum necessary\n while (self.mapping_size > 1 and\n self.mapping[self.mapping_size - 1] < 0):\n self.mapping_size -= 1\n\n # Compute self.width for this commit\n self._update_width(is_commit_in_columns)", "def insert_column(self):\n try:\n lbl_name = Tk.Label(self, text='Enter a column name: ')\n lbl_name.grid(row=0, column=0, sticky=Tk.W+Tk.E)\n ent_name = Tk.Entry(self)\n ent_name.grid(row=0, column=1, sticky=Tk.W+Tk.E)\n lbl_type = Tk.Label(self, text='Enter a column type: ')\n lbl_type.grid(row=1, column=0, sticky=Tk.W+Tk.E)\n ent_type = Tk.Entry(self)\n ent_type.grid(row=1, column=1, sticky=Tk.W+Tk.E)\n\n def _insert_column():\n c_name = ent_name.get()\n c_type = ent_type.get()\n self.parent.insert_column(self.parent.table, c_name, c_type)\n self.destroy()\n self.parent.populate_display()\n b_ins = Tk.Button(self,\n text='Insert Column',\n command=_insert_column)\\\n .grid(row=2, column=1, sticky=Tk.W+Tk.E)\n except Exception, ex:\n logging.error(ex)\n traceback.print_exc()", "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)", "def addColumn(self, *column):\n self.insertColumn(self._width, *column)", "def addcolumn_from_xml(self, xmldata):\n num_added = 0\n if xmldata:\n dom = parseString(xmldata.encode(\"UTF-8\"))\n for column in dom.getElementsByTagName(\"column\"):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n if not columns:\n columns = self.parse_declaration()\n name = un_quote(column.getAttribute(\"name\"))\n if not name:\n continue\n if name in columns:\n continue # column already exists\n declaration = name\n constraints = {}\n cid = column.getAttribute(\"id\")\n type = column.getAttribute(\"type\")\n if not type or type == \"INTEGER\" or type == \"REAL\" or type == \"TEXT\" or type == \"BLOB\":\n constraints[\"type\"] = type\n if column.getAttribute(\"notnull\") == \"true\":\n constraints[\"not null\"] = True\n if column.getAttribute(\"primary\") == \"true\":\n if column.getAttribute(\"autoincrement\") == \"true\":\n constraints[\"primary key\"] = \"autoincrement\"\n else:\n constraints[\"primary key\"] = True\n if column.getAttribute(\"unique\") == \"true\":\n constraints[\"unique\"] = True\n\n if column.getAttribute(\"default\") and column.getAttribute(\"default\") != \"\" and column.getAttribute(\"default\") != \"NULL\":\n constraints[\"default\"] = column.getAttribute(\"default\")\n\n column_obj = VDOM_db_column(name, constraints)\n column_obj.id = cid\n columns[name] = column_obj\n self.addcolumn(column_obj)\n managers.request_manager.get_request().session().value(\"columns\", columns)\n num_added += 1\n return num_added", "def set_file_size(self, file_path, row):\n row[_column_name] = os.path.getsize(file_path)", "def _resize(self, c):\n old = list(self.items())\n self._table = [None] * c\n self._n = 0\n for (k, v) in old:\n self[k] = v", "def update_column_format(self):\n pass", "def _updateColAttrs(self, grid):\n col = 0\n\n for colname in self.table.columns:\n attr = wx.grid.GridCellAttr()\n renderer = MegaFontRenderer(self.table)\n attr.SetRenderer(renderer)\n grid.SetColAttr(col, attr)\n col += 1", "def open_column(self):\n self._open_layouting.append(\"Column\")\n self._client.add_layout(Column())", "def insert_column(self, tb_name, column_name, data_type):\n sentences = f\"\"\"\n ALTER TABLE {tb_name} ADD COLUMN {column_name} {data_type};\n \"\"\"\n print(sentences)\n self.commit(sentences)", "def resize(self, columns, rows):\n\t\tsuper().resize(columns, rows)\n\t\tself.fill(None)\n\t\tself.create()", "def adjust_columns(self):\r\n for col in range(3):\r\n self.resizeColumnToContents(col)", "def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])", "def size_cols(self):\n # initial is the header requirement\n for c, hdr in enumerate(self.headers):\n self.col_size[c+self.col_start] = len(str(hdr))\n \n # go through the rows and check the column sizing to find the max\n row_lbls, col_lbls = self.get_idxvals()\n for rlbl in row_lbls:\n for c, clbl in enumerate(col_lbls):\n self.col_size[c+self.col_start] = max(\n self.col_size[c+self.col_start],\n len(self.cell_base.format(self.data[rlbl,clbl]))\n )", "def set_column_width(self, index, width):\n self.colwid[index] = width", "def _get_column_size(self) -> int:\n return self.BARCODE_SIZE[0] // self.BARCODE_COLUMN_NUMBER", "def add_column(self, name, type):\n raise NotImplementedError(\n \"Please implement the 'add_column' method in a derived class.\")", "def make_column(options, name, column):\n # (ElasticsearchFDWOptions, str, multicorn.ColumnDefinition) -> Column\n assert name not in {\n options.rowid_column,\n options.score_column,\n options.query_column,\n }, \"Programmer error: bad name passed to make_column {name}\".format(name=name)\n\n if column.base_type_name.upper() in {\"JSON\", \"JSONB\"}:\n return JsonColumn(name=name)\n return BasicColumn(name=name)", "def _modify_columns(self, cols, X, y=None):", "def AddColumn(self, column):\n self.columns.append(column)\n self.column_dict[column.column_id] = column", "def set_size(self, size):\n self.dtSize = size", "def add_column(self, colspec):\n if colspec.name == DEFAULT_COLUMN_NAME or colspec.name in self.columns.keys():\n raise Exception(\"Column {} already exists.\".format(colspec.name))\n\n self.info.add_column(colspec.name, colspec.video, colspec.dtype)", "def setoutputsize(self, size, column=None):\r\n if self._closed:\r\n raise Error('The cursor has been closed.')\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n else:\r\n pass", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns", "def assign_columns(f, df):\n df = assign_image(f, df)\n df = assign_cropbox(df)\n df = assign_uid(df)\n return df", "def updateSize(self, *args):\n return None", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def add_column(\n self,\n header: \"RenderableType\" = \"\",\n footer: \"RenderableType\" = \"\",\n *,\n header_style: Optional[StyleType] = None,\n footer_style: Optional[StyleType] = None,\n style: Optional[StyleType] = None,\n justify: \"JustifyMethod\" = \"left\",\n vertical: \"VerticalAlignMethod\" = \"top\",\n overflow: \"OverflowMethod\" = \"ellipsis\",\n width: Optional[int] = None,\n min_width: Optional[int] = None,\n max_width: Optional[int] = None,\n ratio: Optional[int] = None,\n no_wrap: bool = False,\n ) -> None:\n\n column = Column(\n _index=len(self.columns),\n header=header,\n footer=footer,\n header_style=header_style or \"\",\n footer_style=footer_style or \"\",\n style=style or \"\",\n justify=justify,\n vertical=vertical,\n overflow=overflow,\n width=width,\n min_width=min_width,\n max_width=max_width,\n ratio=ratio,\n no_wrap=no_wrap,\n )\n self.columns.append(column)", "def _set_column(self, column_name, column):\n self._dirty = True\n\n if column.ndim != 1:\n raise ValueError(\"Can only add one-dimensional columns.\")\n if column.dtype.hasobject:\n # Attempting to create a specific non-object based numpy type.\n try:\n first = column[0]\n except IndexError:\n column = np.array([])\n else:\n try:\n # Determining type from the first element.\n if isinstance(first, datetime.datetime):\n # Datetime.\n column = np.array(column, dtype='datetime64[us]')\n elif isinstance(first, datetime.timedelta):\n # Timedelta.\n try:\n column = np.array(column, dtype='timedelta64[us]')\n except TypeError:\n # This exception can occur in numpy 1.9.1 on 32-bit\n # Windows if there is a mix of zero-value and\n # non-zero-value timedeltas. Work around by not\n # converting the zero-value timedeltas to numpy,\n # but using it as the default value instead.\n temp_column = np.zeros_like(\n column, dtype='timedelta64[us]')\n for i, v in enumerate(column):\n if v != datetime.timedelta(0):\n temp_column[i] = v\n column = temp_column\n else:\n # No conversion possible.\n raise ValueError()\n except (ValueError, TypeError):\n raise Exception(\n u'Unsupported object type in column {}'.format(\n column_name))\n\n column = Column(np.array(column))\n self._set_column_column(column_name, column)\n self._number_of_columns = len(column)", "def add_metadata(self, column_name, data_type=None, version=None, description=None, dbname=None, delimiter='\\t'):\n data_line = '##COLUMNNAME='+'\"'+ column_name +'\"'\n if column_name not in self.metadata:\n if data_type:\n if data_type not in ['Float', 'String', 'Integer']:\n raise SyntaxError(\"Type must be 'Float', 'String' or 'Integer'. You tried: %s\" % data_type)\n data_line += delimiter + 'TYPE=\"' + data_type + '\"'\n if version:\n data_line += delimiter + 'VERSION=\"' + version + '\"'\n if description:\n data_line += delimiter + 'DESCRIPTION=\"' + description + '\"'\n if dbname:\n data_line += delimiter + 'SCOUTHEADER=\"' + dbname + '\"'\n self.metadata.pop(column_name, 0)\n self.metadata[column_name] = data_line\n return", "def columnCount(self, parent): # pylint: disable=unused-argument\n return 5", "def insert_column(self, identifier, position, name, datastore):\n # Raise ValueError if given colum name is invalid.\n if name is not None and not is_valid_name(name):\n raise ValueError(\"invalid column name '{}'\".format(name))\n # Get dataset. Raise exception if dataset is unknown.\n dataset = datastore.get_dataset(identifier)\n if dataset is None:\n raise ValueError(\"unknown dataset '{}'\".format(identifier))\n # Insert new column into dataset.\n df = dataset.to_dataframe()\n df = vizual.insert_column(df=df, names=[name], pos=position)\n # Store updated dataset to get new identifier.\n ds = datastore.update_dataset(\n origin=dataset,\n df=df,\n annotations=dataset.annotations\n )\n return VizualApiResult(ds)", "def __update_feature_table_columns(self):\n self.__init_table()\n\n feature_dict_sorted_keys = feature_extractor_definition.keys()\n feature_dict_sorted_keys.sort()\n for key in feature_dict_sorted_keys:\n if not self.__has_feature_column(key):\n self.__add_feature_column(key, feature_extractor_definition[key])", "def set_size(self, w, h):\n\t\tpass", "def build_schema(self, dframe, overwrite=False, set_num_columns=True):\n new_schema = self.schema.rebuild(dframe, overwrite)\n self.set_schema(new_schema,\n set_num_columns=(set_num_columns or overwrite))", "def updateHeaderSize( self, nNewDataSize ):\n self.nDataSize = int( nNewDataSize )\n self.nNbrSample = int( self.nDataSize * 8 / self.nNbrChannel / self.nNbrBitsPerSample )\n self.rDuration = self.nDataSize / float( self.nAvgBytesPerSec )", "def add_blank_data_column(self):\n\n header_title, ok_pressed = QInputDialog.getText(self, \"Add Column\", \"Enter heading for the column:\",\n QLineEdit.Normal, \"\")\n if ok_pressed and header_title != '':\n # print(header_title)\n\n default_value, set_default_pressed = QInputDialog.getText(self, \"Set Default Value\",\n \"Enter default value to set for column if any:\",\n QLineEdit.Normal, \"\")\n\n row_count = self.csv_data_table.rowCount()\n last_column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertColumn(last_column_count)\n for empty_row in range(0, row_count):\n item = QTableWidgetItem(default_value)\n self.csv_data_table.setItem(empty_row, last_column_count, item)\n\n # TODO: fix untraced bug present in show/hide columns\n self.column_headers.append(header_title)\n self.column_headers_all.append(header_title)\n # print(self.column_headers)\n # print(self.column_headers_all)\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)", "def add_widget_multicolumn(self,name, widget_dict, sizes=None):\n if recoverer is not None:\n for widget_name in widget_dict:\n widget_dict[widget_name] = self.stato_iniziale(widget_name, widget_dict[widget_name])\n\n self.widget_dict[name] = partial(st_functional_columns, widget_dict, sizes)", "def with_column(self, label, values):\n \n \n \n # self.column_labels.append(label)\n # for i in range(len(self.rows)):\n # self.rows[i].append(values[i]) \n \n new_label = []\n new_rows = []\n for x in self.column_labels:\n new_label.append(x)\n new_label.append(label)\n \n for i in range(len(self.rows)):\n new_row = []\n new_row += self.rows[i]\n # for i in range(len(b)): \n new_row.append(values[i])\n new_rows.append(new_row)\n \n \n new_Table = T88ble(new_rows, new_label)\n\n return new_Table", "def setColumnWidth(self, column, newWidth = None):\n\t\t\t\tdef yieldWidths():\n\t\t\t\t\tfor i, row in enumerate(self.thing.iter_rows(), start = 1):\n\t\t\t\t\t\twidth = self.getCellWidth(i, column)\n\t\t\t\t\t\tif (width is not None):\n\t\t\t\t\t\t\tyield width\n\n\t\t\t\tif (newWidth is None):\n\t\t\t\t\t#Find the longest cell in the column\n\t\t\t\t\tpossibleWidths = tuple(yieldWidths())\n\t\t\t\t\tif (possibleWidths):\n\t\t\t\t\t\tnewWidth = max(possibleWidths)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewWidth = -1 #Compensate for blank columns\n\n\t\t\t\t#Apply the new width\n\t\t\t\tnewWidth += 2\n\t\t\t\tself.thing.column_dimensions[openpyxl.utils.get_column_letter(column)].width = newWidth", "def _SetWidth(self, column_index, content_length):\n # Updates the width at position column_index to be the max of the existing\n # value and the new content's length, or this instance's max_column_width if\n # the value would be greater than max_column_width.\n if column_index == len(self._widths):\n self._widths.append(0)\n\n new_width = max(self._widths[column_index], content_length)\n if self._max_column_width is not None:\n new_width = min(self._max_column_width, new_width)\n self._widths[column_index] = new_width", "def col_width(self,column_no): \n if(column_no == 0 and self.serialize):\n return self.col_width_dict['_serial_'] \n column = self.columns[column_no - (1 if self.serialize else 0)]\n return self.col_width_dict[column]", "def columnCount(self, parent):\n return 1", "def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None", "def add_column_into_target_sf(self, tap_type, table, new_column):\n self.run_query_target_snowflake(\n f'ALTER TABLE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table} ADD {new_column[\"name\"]} int'\n )\n self.run_query_target_snowflake(\n f'UPDATE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table}'\n f' SET {new_column[\"name\"]}={new_column[\"value\"]} WHERE 1=1'\n )", "def constant_allocation_size(dataframe: pd.DataFrame, fixed_allocation_size: float = 1.0) -> pd.DataFrame:\n dataframe[PandasEnum.ALLOCATION.value] = fixed_allocation_size\n return dataframe", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def add_attribute(self, col, attr_name):\n # not optimised: not expected to be a usual operation\n new_table = np.c_[self.np_table, col]\n new_attributes = self.attributes + [attr_name]\n self.__init__(new_table, self.objects, new_attributes)", "def __init__(self, cell_size, nrows, ncols, **kwds):\n #\n # Python 3 update\n #\n super().__init__(**kwds)\n self.cell_size = cell_size\n w, h = cell_size\n d = 2 * self.margin\n self.size = (w * ncols + d, h * nrows + d)\n self.cell_size = cell_size", "def setAllColumns(self, newAllColumns):\n \n pass", "def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )", "def column_create(request):\n try:\n dataset = DatasetSchema.objects.get(\n slug=request.matchdict['slug']\n ) \n except DatasetSchema.DoesNotExist:\n return {\n 'success': False, \n 'message': 'No dataset named: %s' % \n (request.matchdict['slug'])\n }\n # make sure required params are here\n required_params_list = ['name', 'data_type']\n for param in required_params_list:\n if not request.POST.get(param):\n return {\n 'success': False, \n 'message': 'Param: %s missing from request' % (param),\n }\n\n name = request.POST['name']\n data_type = request.POST['data_type']\n\n # make sure datatype is acceptable\n if data_type not in VALID_DATA_TYPES:\n return {\n 'success': False,\n 'message': 'Data Type: %s not a valid data type' % (data_type),\n }\n\n # start building new field\n new_field = Field(\n name = name,\n data_type = data_type,\n created_by_user_id = request.user.id,\n created_datetime = datetime.now(),\n )\n\n # if type is datetime make sure that a format is along with it\n\n if request.POST.get('data_type') == 'datetime':\n if not request.POST.get('datetime_format'):\n return {\n 'success': False,\n 'message': 'Missing a datetime format',\n }\n else:\n # add it\n new_field.datetime_format = request.POST['datetime_format']\n\n # save the new field\n dataset.fields.append(new_field)\n dataset.save()\n return HTTPMovedPermanently(location='/dataset/get/{}'.format(dataset.slug))", "def _assign_sizes(self):", "def populate_stat(self, table):\n myrow = table.row\n # HDF5 doesn't handle unicode strings, so we need to convert to \n # *byte* strings, which we can put in the HDF5 file \n addy = numpy.zeros(len(self.address), \n dtype=(numpy.str, glob.nchar_address))\n for i in range(len(addy)):\n addy[i] = (self.address[i]).encode('utf8')\n\n myrow[\"address\"] = addy\n myrow[\"bike_stands\"] = self.bike_stands\n myrow[\"number\"] = self.number\n myrow[\"position\"] = self.position\n myrow.append()\n table.flush()", "def _build_columns_struct(self):\n struct = []\n for column in self.columns:\n struct.append({\n \"id\": column.id,\n \"label\": column.label,\n \"pattern\": \"\",\n \"type\": column.type,\n \"p\": column.p}\n )\n return struct", "def add_column(self, pos, char='-', new_label=None):\n MutableAlignment.add_column(self, pos, char)\n if new_label == \"MAX\":\n self._col_labels.insert(pos, max(self._col_labels) + 1)\n elif new_label == \"INC_LAST\":\n self._col_labels.append(max(self._col_labels) + 1)\n elif new_label == \"RESET\":\n self._reset_col_names()\n else:\n self._col_labels.insert(pos, new_label)", "def num_columns(self):\n parse_to_small_int_columns = [\n 'doors',\n 'cylinders',\n 'transmission_speeds',\n 'passengers'\n ]\n\n parse_to_big_int_columns = [\n 'd_id',\n 'v_id',\n 'odometer'\n ]\n\n parse_to_float_columns = [\n 'displacement',\n 'price',\n 'msrp'\n ]\n\n for i in range(len(parse_to_small_int_columns)):\n self.data[parse_to_small_int_columns[i]\n ] = self.data[parse_to_small_int_columns[i]].astype(np.int8)\n\n for i in range(len(parse_to_big_int_columns)):\n self.data[parse_to_big_int_columns[i]\n ] = self.data[parse_to_big_int_columns[i]].astype(np.int32)\n\n for i in range(len(parse_to_float_columns)):\n # SQLite float type is np.float32\n self.data[parse_to_float_columns[i]\n ] = self.data[parse_to_float_columns[i]].astype(np.float64)\n\n # print(self.data[parse_to_float_columns[2]])\n\n return None", "def add_new_column(dataframe, column_name):\r\n dataframe[column_name] = \"\"\r\n return dataframe", "def updateHeaderSizeFromDataLength( self ):\n self.updateHeaderSize( int( len( self.data ) * self.nNbrBitsPerSample / 8 ) )", "def customize_headers(self,executer, tree, cursor, table,custom_headers):\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = custom_headers\n\n\n set_width = int(self.column_length_configurator / len(headers))\n\n # Setting columns width and headers\n for column in custom_headers:\n tree.column(column, width=set_width, minwidth=self.min_width)\n tree.heading(column, text=column)", "def test_num_columns(self):\n pass", "def addColumn(self, name, column):\n self.columnNames.append(name)\n self.addColumnValues(column)", "def apply(self, name, size, type):\n self.properties['name'] = name\n self.properties['size'] = size\n self.properties['type'] = type", "def create_column(self, row_idx, idx, name=None):\n col_name = f'Col_{idx}' if not name else name\n row_name = self.rows[row_idx]\n col_path = os.path.join(os.path.join(self.root_path, row_name), col_name)\n\n # check to see if the user is trying to create a row that already exists\n if os.path.exists(col_path):\n raise FileExistsError(f'A column subgroup with the name {col_name} already exists')\n else:\n self.store[self.rows[row_idx]].create_group(col_name)\n self.columns[idx] = col_name", "def add_col2tab(con_db, cur_db, tab_name, col_name, col_type):\n\n # Iterate through all existing column names of the database table using\n # the PRAGMA table_info command\n for row in cur_db.execute(f'PRAGMA table_info({tab_name})'):\n\n # If the column exists: exit the function\n if row[1] == col_name:\n break\n\n # If the column is not existing yet, add the new column\n else:\n cur_db.execute(f'ALTER TABLE {tab_name} ' \\\n f'ADD COLUMN {col_name} {col_type}')\n con_db.commit()", "def _insert_into_new_columns(self, commit, mapping_index):\n for i in range(self.num_new_columns):\n if self.new_columns[i].commit == commit:\n self.mapping[mapping_index] = i\n return mapping_index + 2\n\n # This commit isn't already in new_columns. Add it.\n column = Column(commit, self._find_commit_color(commit))\n self.new_columns[self.num_new_columns] = column\n self.mapping[mapping_index] = self.num_new_columns\n self.num_new_columns += 1\n return mapping_index + 2", "def add_column(self, name, display=None):\n column = column_v2()\n column.data.name = name\n column.display.name = display\n self.columns.append(column)\n self.column_length=len(self.columns)\n self.update_ordinals()", "def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )" ]
[ "0.6953581", "0.6563744", "0.63662314", "0.62849265", "0.6020931", "0.5901503", "0.5833289", "0.58037275", "0.5675224", "0.5647959", "0.56247205", "0.5617577", "0.55949396", "0.5576177", "0.55671567", "0.5556555", "0.5535544", "0.55261594", "0.55159014", "0.5498711", "0.54230773", "0.54193276", "0.5403627", "0.5396774", "0.53628767", "0.53617436", "0.5341596", "0.53378534", "0.53260034", "0.5313883", "0.52981305", "0.529079", "0.526982", "0.5251435", "0.5240079", "0.5232787", "0.5226067", "0.52248985", "0.52106696", "0.52084583", "0.5207946", "0.51956713", "0.51913273", "0.51836133", "0.51640177", "0.5139996", "0.5133307", "0.5127827", "0.51253057", "0.51250726", "0.51120377", "0.5107731", "0.50976044", "0.5089449", "0.50613695", "0.50501865", "0.50456995", "0.5034617", "0.5031435", "0.50251967", "0.50204587", "0.5019932", "0.5015773", "0.50103575", "0.5006146", "0.500345", "0.50006825", "0.49901488", "0.4987641", "0.49861595", "0.49746868", "0.49689865", "0.49657682", "0.4965299", "0.4952051", "0.49475425", "0.49401656", "0.49378958", "0.4928958", "0.49195072", "0.49186406", "0.4917691", "0.49170873", "0.49138772", "0.49064168", "0.4895169", "0.4894007", "0.48935744", "0.48914206", "0.48890218", "0.4886528", "0.4884647", "0.48842308", "0.4877436", "0.4874265", "0.48735136", "0.48697966", "0.4866491", "0.48651165", "0.48648414" ]
0.76685566
0
Simply copy metadata from source to target
def copy_stock_metas( meta_source, target, copy_columns_info=True, ) -> None: set_attr( target, KEY_ALIAS_MAP, copy(getattr(meta_source, KEY_ALIAS_MAP)) ) if copy_columns_info: set_attr( target, KEY_COLUMNS_INFO_MAP, deepcopy(getattr(meta_source, KEY_COLUMNS_INFO_MAP)) ) else: set_attr(target, KEY_COLUMNS_INFO_MAP, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_metadata(from_dir, to_dir):\n if not FLAGS.dry_run:\n tf.io.gfile.makedirs(to_dir)\n for fname in tfds.core.utils.list_info_files(from_dir):\n from_path = os.path.join(from_dir, fname)\n to_path = os.path.join(to_dir, fname)\n logging.info('cp %s %s', from_path, to_path)\n if not FLAGS.dry_run:\n tf.io.gfile.copy(from_path, to_path, overwrite=True)", "def cp_metadata(src_client, src_index, target_client, target_index):\n print \"Copy settings, aliases & mappings from source index %s to target index %s...\" % (src_index, target_index)\n try:\n res = src_client.indices.get_settings(src_index)\n settings = res[src_index]['settings'] if res and 'settings' in res[src_index] else {}\n res = src_client.indices.get_mapping(src_index)\n mappings = res[src_index]['mappings'] if res and 'mappings' in res[src_index] else {}\n res = src_client.indices.get_aliases(src_index)\n aliases = res[src_index]['aliases'] if res and 'aliases' in res[src_index] else {}\n res = target_client.indices.create(index=target_index, body={\"settings\": settings, \"mappings\": mappings, \"aliases\": aliases})\n print 'Metadata copied'\n return res['acknowledged']\n except Exception, e:\n print e\n return False", "def copy_metadata():\n\n common_root = os.path.join(os.path.dirname(__file__), \"wsdotroute\", \"esri\")\n src = os.path.join(common_root, \"toolboxes\")\n dest = os.path.join(common_root, \"help\", \"gp\", \"toolboxes\")\n\n if os.path.exists(dest):\n shutil.rmtree(dest)\n\n shutil.copytree(src, dest, ignore=shutil.ignore_patterns(\"*.pyt\"))\n\n print(\"Completed copying metadata XML files\")", "def copy(self, src_path: str, tgt_path: str) -> None:", "def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):\n source_structure = self._lookup_course(source_course_key).structure\n with self.bulk_operations(dest_course_key):\n original_structure = self._lookup_course(dest_course_key).structure\n index_entry = self._get_index_if_valid(dest_course_key)\n new_structure = self.version_structure(dest_course_key, original_structure, user_id)\n\n new_structure['assets'] = source_structure.get('assets', {})\n new_structure['thumbnails'] = source_structure.get('thumbnails', [])\n\n # update index if appropriate and structures\n self.update_structure(dest_course_key, new_structure)\n\n if index_entry is not None:\n # update the index entry if appropriate\n self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])", "def copy(self, source_host, dest_host, filename):", "def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })", "def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }", "def _copy_output(src: Graph, dst: Graph):\n for n_src, n_dst in zip(src.nodes, dst.nodes):\n if n_src.op == 'output':\n n_dst.meta = n_src.meta", "def copy(source, target):\n\tshutil.copy(source, target)", "def copy_object_metadata(self, bucket_name, src_object_name, dst_object_name):\n\n return h3lib.copy_object_metadata(self._handle, bucket_name, src_object_name, dst_object_name, self._user_id)", "def append_ipma_metadata(orig: dict, dest: dict):\n for key in [key for key in orig.keys() if key != 'data']:\n dest[key] = orig[key]", "def _copy_file ( self, source, dest ):\n return", "def copyMedia(source, target):\n if not os.path.exists(target):\n print(\"copying source,target:\", source, target)\n shutil.copy2(source, target)", "def copy(from_dir: tfds.typing.PathLike, to_dir: tfds.typing.PathLike) -> None:\n for full_name in tfds.core.load.list_full_names():\n from_full_name_dir = os.path.join(from_dir, full_name)\n to_full_name_dir = os.path.join(to_dir, full_name)\n\n # Skip if the dataset isn't generated or that metadata are already copied\n if not tf.io.gfile.exists(from_full_name_dir):\n logging.info('Skipping %s (not found)', from_full_name_dir)\n continue\n if tf.io.gfile.exists(to_full_name_dir) and not FLAGS.overwrite:\n logging.info('Skipping %s (already exists)', to_full_name_dir)\n continue\n\n _copy_metadata(from_dir=from_full_name_dir, to_dir=to_full_name_dir)", "def _copyFile(\n syn,\n entity,\n destinationId,\n version=None,\n updateExisting=False,\n setProvenance=\"traceback\",\n skipCopyAnnotations=False,\n):\n ent = syn.get(entity, downloadFile=False, version=version, followLink=False)\n # CHECK: If File is in the same parent directory (throw an error) (Can choose to update files)\n if not updateExisting:\n existingEntity = syn.findEntityId(ent.name, parent=destinationId)\n if existingEntity is not None:\n raise ValueError(\n 'An entity named \"%s\" already exists in this location. File could not be copied'\n % ent.name\n )\n profile = syn.getUserProfile()\n # get provenance earlier to prevent errors from being called in the end\n # If traceback, set activity to old entity\n if setProvenance == \"traceback\":\n act = Activity(\"Copied file\", used=ent)\n # if existing, check if provenance exists\n elif setProvenance == \"existing\":\n try:\n act = syn.getProvenance(ent.id)\n except SynapseHTTPError as e:\n if e.response.status_code == 404:\n act = None\n else:\n raise e\n elif setProvenance is None or setProvenance.lower() == \"none\":\n act = None\n else:\n raise ValueError(\"setProvenance must be one of None, existing, or traceback\")\n # Grab entity bundle\n bundle = syn._getEntityBundle(\n ent.id,\n version=ent.versionNumber,\n requestedObjects={\"includeEntity\": True, \"includeFileHandles\": True},\n )\n fileHandle = synapseclient.core.utils.find_data_file_handle(bundle)\n createdBy = fileHandle[\"createdBy\"]\n # CHECK: If the user created the file, copy the file by using fileHandleId else copy the fileHandle\n if profile.ownerId == createdBy:\n newdataFileHandleId = ent.dataFileHandleId\n else:\n copiedFileHandle = copyFileHandles(\n syn,\n [fileHandle],\n [\"FileEntity\"],\n [bundle[\"entity\"][\"id\"]],\n [fileHandle[\"contentType\"]],\n [fileHandle[\"fileName\"]],\n )\n # Check if failurecodes exist\n copyResult = copiedFileHandle[0]\n if copyResult.get(\"failureCode\") is not None:\n raise ValueError(\n \"%s dataFileHandleId: %s\"\n % (copyResult[\"failureCode\"], copyResult[\"originalFileHandleId\"])\n )\n newdataFileHandleId = copyResult[\"newFileHandle\"][\"id\"]\n\n new_ent = File(\n dataFileHandleId=newdataFileHandleId, name=ent.name, parentId=destinationId\n )\n # Set annotations here\n if not skipCopyAnnotations:\n new_ent.annotations = ent.annotations\n # Store provenance if act is not None\n if act is not None:\n new_ent = syn.store(new_ent, activity=act)\n else:\n new_ent = syn.store(new_ent)\n # Leave this return statement for test\n return new_ent[\"id\"]", "def copy_to(raw_data, obj):\n\n shutil.copyfileobj(raw_data, obj)", "def data_copy(config, start, end, new, destination_node, source_url, source, destination):\n try:\n asyncio.run(_run(config.node, start, end, new, destination_node,\n source, destination, source_url))\n except errors.ApiError as e:\n raise click.ClickException(str(e)) from e\n finally:\n asyncio.run(\n config.close_node())", "def _StageMetadata(json_metadata, storage_service, staged_file: str):\n # Write computed metadata to object storage.\n temp_run_dir = temp_dir.GetRunDirPath()\n local_file = os.path.join(temp_run_dir, os.path.basename(staged_file))\n with open(local_file, 'w') as f:\n json.dump(json_metadata, f)\n storage_service.Copy(local_file, staged_file)", "def clone_data(self,req):\n # source folder\n source=req.source or \"/media/howie/archive/data/music/\"\n # destination folder\n dest=req.dest or \"/home/howie/data/music/\"\n # clone the music files\n c=0\n for i in self.list(isin={'kind':(\"track\",\"image\",\"file\")},orderby=\"uid\"):\n c+=1\n# print c,\" uid:\",i.uid,\" kind:\",i.kind,\" loc:\",i.file_folder(),\" name:\",i.name\n subfolder=i.file_folder()\n destfolder=dest+subfolder\n if not os.path.exists(destfolder):\n os.makedirs(destfolder)\n shutil.copy2(source+subfolder+\"/\"+i.code,destfolder)\n print(\"added %s\" % (dest+subfolder+\"/\"+i.code,))\n return \"clone completed: %s files added\" % c", "def _copy_visitor(path, source, destination, labels):\n\n # Skip paths corresponding to excluded labels\n if path.split('/')[0] in labels:\n return\n\n # Copy everything else\n source_obj = source[path]\n if isinstance(source_obj, h5py.Group):\n dest_obj = destination.create_group(path)\n else:\n ds = source_obj\n dest_obj = destination.create_dataset(\n path,\n data=source_obj[()],\n chunks=ds.chunks,\n maxshape=ds.maxshape,\n compression=ds.compression,\n compression_opts=ds.compression_opts,\n scaleoffset=ds.scaleoffset,\n shuffle=ds.shuffle,\n fletcher32=ds.fletcher32,\n fillvalue=ds.fillvalue,\n )\n\n dest_obj.attrs.update(source_obj.attrs)", "def run_copy(self, src, dst):\n pass", "def copy(self, target):\r\n py.process.cmdexec(\"svn copy %s %s\" %(str(self), str(target)))", "def test_copy(self):\n\n tempdir = tempfile.mkdtemp()\n include_example = os.path.join(here, 'include-example.ini')\n manifest = ManifestParser(manifests=(include_example,))\n manifest.copy(tempdir)\n self.assertEqual(sorted(os.listdir(tempdir)),\n ['fleem', 'include', 'include-example.ini'])\n self.assertEqual(sorted(os.listdir(os.path.join(tempdir, 'include'))),\n ['bar.ini', 'crash-handling', 'flowers', 'foo.ini'])\n from_manifest = ManifestParser(manifests=(include_example,))\n to_manifest = os.path.join(tempdir, 'include-example.ini')\n to_manifest = ManifestParser(manifests=(to_manifest,))\n self.assertEqual(to_manifest.get('name'), from_manifest.get('name'))\n shutil.rmtree(tempdir)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def _copyRecursive(\n syn, entity, destinationId, mapping=None, skipCopyAnnotations=False, **kwargs\n):\n\n version = kwargs.get(\"version\", None)\n setProvenance = kwargs.get(\"setProvenance\", \"traceback\")\n excludeTypes = kwargs.get(\"excludeTypes\", [])\n updateExisting = kwargs.get(\"updateExisting\", False)\n if mapping is None:\n mapping = dict()\n # Check that passed in excludeTypes is file, table, and link\n if not isinstance(excludeTypes, list):\n raise ValueError(\"Excluded types must be a list\")\n elif not all([i in [\"file\", \"link\", \"table\"] for i in excludeTypes]):\n raise ValueError(\n \"Excluded types can only be a list of these values: file, table, and link\"\n )\n\n ent = syn.get(entity, downloadFile=False)\n if ent.id == destinationId:\n raise ValueError(\"destinationId cannot be the same as entity id\")\n\n if (isinstance(ent, Project) or isinstance(ent, Folder)) and version is not None:\n raise ValueError(\"Cannot specify version when copying a project of folder\")\n\n if not isinstance(ent, (Project, Folder, File, Link, Schema, Entity)):\n raise ValueError(\"Not able to copy this type of file\")\n\n permissions = syn.restGET(\"/entity/{}/permissions\".format(ent.id))\n # Don't copy entities without DOWNLOAD permissions\n if not permissions[\"canDownload\"]:\n syn.logger.warning(\n \"%s not copied - this file lacks download permission\" % ent.id\n )\n return mapping\n\n access_requirements = syn.restGET(\"/entity/{}/accessRequirement\".format(ent.id))\n # If there are any access requirements, don't copy files\n if access_requirements[\"results\"]:\n syn.logger.warning(\n \"{} not copied - this file has access restrictions\".format(ent.id)\n )\n return mapping\n copiedId = None\n\n if isinstance(ent, Project):\n if not isinstance(syn.get(destinationId), Project):\n raise ValueError(\n \"You must give a destinationId of a new project to copy projects\"\n )\n copiedId = destinationId\n # Projects include Docker repos, and Docker repos cannot be copied\n # with the Synapse rest API. Entity views currently also aren't\n # supported\n entities = syn.getChildren(\n entity, includeTypes=[\"folder\", \"file\", \"table\", \"link\"]\n )\n for i in entities:\n mapping = _copyRecursive(\n syn,\n i[\"id\"],\n destinationId,\n mapping=mapping,\n skipCopyAnnotations=skipCopyAnnotations,\n **kwargs,\n )\n elif isinstance(ent, Folder):\n copiedId = _copyFolder(\n syn,\n ent.id,\n destinationId,\n mapping=mapping,\n skipCopyAnnotations=skipCopyAnnotations,\n **kwargs,\n )\n elif isinstance(ent, File) and \"file\" not in excludeTypes:\n copiedId = _copyFile(\n syn,\n ent.id,\n destinationId,\n version=version,\n updateExisting=updateExisting,\n setProvenance=setProvenance,\n skipCopyAnnotations=skipCopyAnnotations,\n )\n elif isinstance(ent, Link) and \"link\" not in excludeTypes:\n copiedId = _copyLink(syn, ent.id, destinationId, updateExisting=updateExisting)\n elif isinstance(ent, Schema) and \"table\" not in excludeTypes:\n copiedId = _copyTable(syn, ent.id, destinationId, updateExisting=updateExisting)\n # This is currently done because copyLink returns None sometimes\n if copiedId is not None:\n mapping[ent.id] = copiedId\n syn.logger.info(\"Copied %s to %s\" % (ent.id, copiedId))\n else:\n syn.logger.info(\"%s not copied\" % ent.id)\n return mapping", "def copy(\n self,\n source,\n dest,\n name=None,\n shallow=False,\n expand_soft=False,\n expand_external=False,\n expand_refs=False,\n without_attrs=False,\n ):", "def update_flow_metadata(self, update_with):\n if update_with:\n source, clone = self._fetch_flowdetail(clone=True)\n clone.meta.update(update_with)\n self._with_connection(self._save_flow_detail, source, clone)", "def add_source_metadata(self, src_name: SourceName, metadata: SourceMeta) -> None:\n metadata_item = metadata.dict()\n metadata_item[\"src_name\"] = src_name.value\n try:\n self.metadata.put_item(Item=metadata_item)\n except ClientError as e:\n raise DatabaseWriteException(e)", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def hard_update(target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def copy_item(self, src_key, target_key, nodup=False):\n copied = False\n src_entry = self.get(src_key)\n if (src_entry):\n if ((target_key not in self._key_set) or (not nodup)): # if no target or dups allowed\n copy = src_entry._replace(keyword=target_key, value=src_entry.value)\n self._metadata.append(copy)\n self._update_key_set()\n copied = True\n return copied", "def copy_params(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def copy(self, source, target, recursive=True):\n if recursive:\n command = 'cp -R %s %s'\n else:\n command = 'cp %s %s'\n self.communicate(command % (source, target))", "def populateMeta(self, *args):\n meta = self._getAllMeta()\n if not meta:\n raise MetaReadError(\"Error Reading Image MetaData, has image finished copying?\")\n else:\n self.exifKeys = self._getAllMetaKeys(meta)\n for key in self.exifKeys:\n if key == self._getExifKey_TimeCode():\n tag = meta[self._getExifKey_TimeCode()]\n self.startTimecode = tag.raw_value\n self._splitTimecode()\n \n if args:\n for arg in args:\n try:\n lTag = meta[arg]\n self.__dict__[arg.split('.')[1] + '_' + arg.split('.')[2]] = lTag.raw_value\n except:\n print 'could not get meta for tag ', arg", "def copy():\n put(os.path.join('dist', get_egg_name()), remote_egg_dir)", "def move_object_metadata(self, bucket_name, src_object_name, dst_object_name):\n\n return h3lib.move_object_metadata(self._handle, bucket_name, src_object_name, dst_object_name, self._user_id)", "def cp_index(src_client=None, src_index=None, target_client=None, target_index=None, chunk_size=1000):\n check_not_empty(src_client)\n check_not_empty(src_index)\n target_client = target_client or src_client\n target_index = target_index or src_index\n ok = cp_metadata(src_client, src_index, target_client, target_index)\n if ok:\n print \"Copy documents...\"\n reindex(\n client=src_client,\n source_index=src_index,\n target_client=target_client,\n target_index=target_index,\n chunk_size=chunk_size,\n query={\"query\": {\"match_all\": {}}}\n )\n print \"Data copied!\"", "def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)", "def CopyTo(self, *args, **kwargs):\n pass", "def _copy_metadata_deep(value, old_value):\n if value is None or old_value is None or value is old_value: return\n\n if isinstance(value, dict):\n for k, v in value.iteritems():\n _copy_metadata_deep(v, old_value[k])\n elif isinstance(value, list):\n for v, old_v in zip(value, old_value):\n _copy_metadata_deep(v, old_v)\n else:\n try:\n value.__dict__.update(old_value.__dict__)\n except AttributeError:\n pass", "def push(target):\n if target is None:\n target = getcwd()\n\n target = path.abspath(target)\n\n dot_chunk = load_chunk(target)\n src = dot_chunk[\"src\"]\n source = load_source(src)\n\n copy(target, source)", "def ExtractInfoAndCopyMaster(self):\n self.ExtractandWriteInfo()\n self.CreateMasterCopy()\n return \"TurnOffMirror\"", "def copy_header (fits_dest, header_src):\n\n # open fits_dest for updating\n with fits.open(fits_dest, mode='update') as hdulist:\n header_dest = hdulist[-1].header\n\n # delete hdr_dest keys\n process_keys (header_dest)\n\n # copy keys\n process_keys (header_dest, header_src)\n\n\n return", "def update_metadata(metadata_src_lst, metadata_dest_lst):\n if metadata_src_lst and metadata_dest_lst:\n if not isinstance(metadata_dest_lst[0], list): # annotation from one rater only\n metadata_dest_lst[0]._update(metadata_src_lst[0], TRANSFORM_PARAMS)\n else: # annotations from several raters\n for idx, _ in enumerate(metadata_dest_lst[0]):\n metadata_dest_lst[0][idx]._update(metadata_src_lst[0], TRANSFORM_PARAMS)\n return metadata_dest_lst", "def test_copy_sources(self):\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Source\", first(metric_copy[\"sources\"].values())[\"name\"])", "def copy(self, name, source, dest):\n self._assert_absolute_path_or_placeholder(source)\n self._assert_absolute_path_or_placeholder(dest)\n self._run(name, ['copy', source, dest])\n self.m.path.mock_copy_paths(source, dest)", "def cp(self, copy_from, copy_to, **kwargs):\n return self.exec_command('cp %s %s' % (copy_from, copy_to), **kwargs)", "def test_copy_without_name(self):\n self.source[\"name\"] = \"\"\n source_copy = copy_source(self.source, self.DATA_MODEL)\n self.assertEqual(\"pip (copy)\", source_copy[\"name\"])", "def copy_object(self, original_object, new_key=None,\n new_location=None, new_player=None, new_home=None, \n new_permissions=None, new_locks=None, new_aliases=None, new_destination=None):\n\n # get all the object's stats\n typeclass_path = original_object.typeclass_path\n if not new_key: \n new_key = original_object.key\n if not new_location:\n new_location = original_object.location\n if not new_home:\n new_home = original_object.home\n if not new_player:\n new_player = original_object.player\n if not new_aliases:\n new_aliases = original_object.aliases \n if not new_locks:\n new_locks = original_object.db_lock_storage\n if not new_permissions:\n new_permissions = original_object.permissions \n if not new_destination:\n new_destination = original_object.destination\n \n # create new object \n from src.utils import create \n from src.scripts.models import ScriptDB\n new_object = create.create_object(typeclass_path, key=new_key, location=new_location,\n home=new_home, player=new_player, permissions=new_permissions, \n locks=new_locks, aliases=new_aliases, destination=new_destination)\n if not new_object:\n return None \n\n # copy over all attributes from old to new. \n for attr in original_object.get_all_attributes():\n new_object.set_attribute(attr.key, attr.value)\n\n # copy over all cmdsets, if any \n for icmdset, cmdset in enumerate(original_object.cmdset.all()):\n if icmdset == 0:\n new_object.cmdset.add_default(cmdset)\n else:\n new_object.cmdset.add(cmdset)\n\n # copy over all scripts, if any \n for script in original_object.scripts.all():\n ScriptDB.objects.copy_script(script, new_obj=new_object.dbobj)\n \n return new_object", "def copy_properties(self, from_metadata, clear_properties=False):\n if (clear_properties):\n self.clear_properties()\n\n for key, value in from_metadata.properties.items():\n self.properties[key] = copy.deepcopy(value)", "def copy(source: str, dest: str):\n source_auth = credentials.authenticate(source)\n dest_auth = credentials.authenticate(dest)\n copier = COPIER_REGISTRY.get_handler(source_auth.scheme + \"+\" + dest_auth.scheme)\n copier.copy(source_auth, dest_auth)", "def copy(self):\n source = os.path.abspath(self.path)\n destination = os.path.abspath(self.target)\n\n logger.info(\"Running Copy Method - SOURCE=\\\"{src}\\\" DESTINATION=\\\"{dst}\\\" IGNORE=\\\"{ignore}\\\"\".format(src=source, dst=destination, ignore=self.ignore))\n\n if not os.path.exists(source):\n logger.error(\"\\\"{source}\\\" PATH DOESN'T EXIST. PROGRAM TERMINATED. Please check log file.\".format(source=source))\n\n if self.rules is not None:\n files = self.rules\n else:\n self.create_packet_structure(source)\n files = self.files\n\n for (k,v) in files.items():\n src = os.path.join(source,k)\n dst = os.path.join(destination,v)\n dirpath = os.path.dirname(dst)\n if not os.path.isdir(dirpath):\n logger.info(\"Create directory - \\\"{dst}\\\"\".format(dst=dirpath))\n os.makedirs(dirpath)\n logger.info(\"copy from \\\"{f}\\\" to \\\"{t}\\\"\".format(f=src,t=dst))\n shutil.copyfile(src,dst)\n logger.info(\"OK\")", "def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)", "def copy_attrs(data_orig, data_new):\n\n if isinstance(data_orig, Dataset):\n\n # Variables\n for v in data_orig.data_vars:\n field = data_orig[v]\n for attr, val in field.attrs.items():\n data_new[v].attrs[attr] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n # Metadata\n for attr, val in data_orig.attrs.items():\n data_new.attrs[attr] = val\n\n elif isinstance(data_orig, DataArray):\n\n # Variable Metadata\n for att, val in data_orig.attrs.items():\n data_new.attrs[att] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n else:\n raise ValueError(\"Couldn't handle type %r\" % type(data_orig))\n\n return data_new", "def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))", "def copy_map1_name(source, target):\n\n if not is_matching_type(source, target):\n return\n\n source_uv_name = cmds.getAttr(\"{}.uvSet[0].uvSetName\".format(source))\n\n try:\n cmds.setAttr(\"{}.uvSet[0].uvSetName\".format(target), source_uv_name,\n type=\"string\")\n except RuntimeError:\n logger.debug(\"{} doesn't not have uvs, skipping udpate map1 name\"\n .format(target))\n return", "def _backup_meta_data(meta_path: Path) -> None:\n meta_path = meta_path.resolve()\n backup_meta_path = meta_path.parent / (meta_path.name + \".bak\")\n i = 0\n while backup_meta_path.exists():\n backup_meta_path = backup_meta_path.with_suffix(\".bak{}\".format(i))\n i += 1\n shutil.copy(str(meta_path), str(backup_meta_path))", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest", "def test_update_metadata(self):\n pass", "def copy(ctx, source, dest, force=False):\n # print \"COPY:\", locals()\n # print \"COPY:\", ctx.force, ctx.verbose\n if source == dest:\n return dest\n\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = \"\"\n if sys.platform == 'win32':\n if force:\n flags += \" /Y\"\n # print 'copy {flags} {source} {dest}'.format(**locals())\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else: # pragma: nocover\n if force:\n flags += \" --force\"\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest", "def copy (from_key, to_key):\n source = registry (from_key, accept_value=False)\n target = registry (to_key, accept_value=False)\n if not target:\n target.create ()\n\n for root, subkeys, subvalues in walk (source, _want_types=True):\n target_root = registry (target.moniker + utils.relative_to (root.moniker, source.moniker), accept_value=False)\n for k in subkeys:\n target_key = registry (target.moniker + utils.relative_to (k.moniker, source.moniker), accept_value=False)\n target_key.create ()\n for name, value, type in subvalues:\n target_root.set_value (name, value, type)\n\n return target", "def _copy(self, src, dest):\n\t\ttry:\n\t\t\tself.bucket.copy_key(dest, self.bucket.name, src)\n\t\texcept boto.exception.S3CopyError as e:\n\t\t\tself.log.debug(\"bucket copy failed for on %s failed\", dest, exc_info=True)\n\t\t\traise e", "def _copyTemplate(sourceBase, destination):\n source = os.path.normpath(os.path.join(sourceBase, \"_New_Region\", \"Template\"))\n copy_tree(source, destination, preserve_mode=1, preserve_times=1, preserve_symlinks=1)", "def copystat(src, dest):\n import shutil\n\n shutil.copystat(str(src), str(dest))", "def generate_metadata(\n source_name,\n source_version\n):\n now = datetime.utcnow()\n write_recipt = str(uuid.uuid1())\n metadata_obj = {\n 'write_recipt': write_recipt,\n 'data_source': source_name,\n 'machine_source': platform.node(),\n 'version': source_version,\n 'package_version': _version.__version__,\n 'cron_datetime': now.isoformat()\n }\n\n return metadata_obj", "def cp(self, src, dest):\r\n return self._call(\"-cp\", src, dest, suppress_output=True)", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def cp(src, dest):\n _shutil.copy2(native(src), native(dest))", "def copy_annotations(source: str, target: str) -> str:\n if isinstance(source, AnnotatedStr):\n if not isinstance(target, AnnotatedStr):\n target = AnnotatedStr(target)\n target.optional = source.optional\n target.exists = source.exists\n target.phony = source.phony\n target.precious = source.precious\n return target", "def copy(CopySource=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, SourceClient=None, Config=None):\n pass", "def copyData(self, src_schema, src_table, src_columns, dest_schema, dest_table, dest_columns):\r\n sql = 'INSERT INTO {} ( {} ) SELECT {} FROM {}'.format(self.encodeTableName(dest_schema, dest_table), ','.join(dest_columns),\r\n ','.join(src_columns), self.encodeTableName(src_schema, src_table))\r\n return self.runSql(sql)", "def move_info(self,req):\n def copy_item(st,dt):\n \"copy info from st to dt\"\n # move plays\n# print \"moving plays for \",st.uid,\" to \",dt.uid\n execute(\"update %s.plays set page=%s where page=%s \" % (self.Config.database,dt.uid,st.uid))\n # move tags \n execute(\"update %s.tags set page=%s where page=%s \" % (self.Config.database,dt.uid,st.uid))\n # copy info\n dt.name=st.name\n dt.when=st.when\n dt.composer=st.composer\n dt.artist=st.artist\n dt.text=st.text\n dt.rating=st.rating\n dt.prefs=st.prefs\n #dt.score=st.score\n dt.flush()\n st.name=st.name+\" (old version)\"\n st.rating= -4 # set to X \n st.flush()\n # move images\n st.get_images() # create st.images\n for i in st.images:\n i.parent=dt.uid\n i.set_lineage(pob=dt)\n i.flush()\n try:\n dob=self.get(safeint(req.to))\n except:\n dob=None\n if (not dob):\n return \"specify destination as ?to=[UID]\"\n elif (self.kind!=dob.kind):\n return \"source is a %s but destination is a %s\" % (self.kind,dob.kind) \n if self.parent!=dob.parent:\n return \"source and destination parent mismatch\"\n if self.kind=='album':\n copy_item(self,dob)\n# for st in self.list(parent=self.uid,kind='track',where=\"rating>=0\",orderby=\"uid\"):\n for st in self.list(parent=self.uid,kind='track',orderby=\"uid\"):\n dt=dob.list(parent=dob.uid,kind=\"track\",seq=st.seq) # get corresponding track from dob\n if dt:\n copy_item(st,dt[0])\n elif self.kind=='track':\n copy_item(self,dob)\n else:\n return \"not an album or track...\"\n req.message=\"info copied/moved to %s\" % dob.uid\n return self.view(req)", "def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned", "def copy_structure(self, other_directory):\n pass", "def _update_extra_metadata(self, extra_metadata):\n self._add_filename_metadata(extra_metadata)\n self._derive_extra_metadata(extra_metadata)\n \n if type(self) == SAFESentinel3:\n self._extract_metadata_from_zipfile(extra_metadata)", "def update_copy(self, source, dest):\n relsource = os.path.relpath(source, os.path.realpath(self.dirname))\n for copy in self.runscript.copies:\n if copy[1] == dest:\n copy[0] = relsource\n break\n else:\n self.runscript.add_copy(relsource, dest)", "def _generate_copy_target(self, src: 'mesonlib.FileOrString', output: Path) -> None:\n if isinstance(src, File):\n instr = src.absolute_path(self.environment.source_dir, self.environment.build_dir)\n else:\n instr = src\n elem = NinjaBuildElement(self.all_outputs, [str(output)], 'COPY_FILE', [instr])\n elem.add_orderdep(instr)\n self.add_build(elem)", "def copy_skel(src, dest):\n md_common.copytree(src, dest)", "def clone(name1, name2):\n if not SchModule._ready:\n raise ValueError(\"not mounted\")\n\n schdir1 = SchModule.DIR.hpath(name1)\n schdir2 = SchModule.DIR.hpath(name2)\n\n if not path.exists(schdir1):\n raise Exception(\"Source scheme does not exist\")\n if path.exists(schdir2):\n raise Exception(\"Destination scheme already exists\")\n\n # copy unmodified files\n shutil.copytree(schdir1, schdir2)\n # modify descr-file\n descr2 = path.join(schdir2, SchModule.DESCR)\n dc = DotCfg()\n with codecs.open(descr2, \"r\", encoding=\"utf8\") as f:\n dc.parse(f)\n par = dc.get(u\"date\")\n if par:\n par.value = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n par = dc.get(u\"author\")\n if par:\n par.value = os.getenv(\"USER\", os.getenv(\"USERNAME\", \"Unknown\"))\n dc.flush()", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def copy(self, *args, **kwargs): # real signature unknown\n pass", "def add_metadata(self, metadata: dict) -> None:", "def bundle_metadata(self, metadata):\n\n metadata_file = None\n try:\n metadata_file = tempfile.NamedTemporaryFile(delete=False)\n except IOError:\n task_error('Cannot create metadata file in working directory')\n\n metadata_file.write(metadata)\n fname = metadata_file.name\n metadata_file.close()\n\n metadata_file = open(fname, mode='rb')\n\n # metadata_file.seek(0)\n\n if self.empty_tar:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='w')\n self.empty_tar = False\n else:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='a')\n\n try:\n tar_info = tarfile.TarInfo('metadata.txt')\n tar_info.size = len(metadata)\n tar_info.mtime = time.time()\n tarball.addfile(tar_info, metadata_file)\n metadata_file.close()\n tarball.close()\n os.remove(fname)\n except Exception, ex:\n print ex\n traceback.print_exc(file=sys.stdout)\n raise ex", "def test_promote_metadata():\n\n def original(dispatcher, intent):\n \"\"\"Original!\"\"\"\n return 1\n\n original.attr = 1\n wrapped = do(original)\n assert wrapped.__name__ == \"original\"\n assert wrapped.attr == 1\n assert wrapped.__doc__ == \"Original!\"", "def gen_copy_struct(self, dst, src, amount):\n self.emit(ir.CopyBlob(dst, src, amount))", "def copyto_at(self, n, src, m):\n for k in self.containers:\n self.__dict__[k][n] = src.__dict__[k][m]", "def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def test_update_metadata1(self):\n pass", "def copy(self, dest=None, *args, **kw):\r\n if dest is None:\r\n dest = mkdtemp()\r\n tmptar = file(mktemp(), \"wb\")\r\n self.tar(out=tmptar, *args, **kw)\r\n tmptar.close()\r\n tarfile.open(tmptar.name, \"r\").extractall(dest)\r\n return dest", "def copy_nc_attrs(src, dest):\n with xarray.open_dataset(src) as s:\n attrs = s.attrs\n # Write empty root dataset with attributes\n ds = xarray.Dataset(attrs=attrs)\n ds.to_netcdf(dest, mode=\"a\")", "def test_15_copyto(self):\n with mock.patch(BUILTINS + '.open', mock.mock_open()):\n status = udocker.FileUtil(\"source\").copyto(\"dest\")\n self.assertTrue(status)\n status = udocker.FileUtil(\"source\").copyto(\"dest\", \"w\")\n self.assertTrue(status)\n status = udocker.FileUtil(\"source\").copyto(\"dest\", \"a\")\n self.assertTrue(status)" ]
[ "0.7023326", "0.6726561", "0.65834624", "0.6537077", "0.64390403", "0.62709254", "0.62083226", "0.61535376", "0.60626066", "0.60160655", "0.60077727", "0.59770507", "0.59708804", "0.5922094", "0.5846591", "0.57966024", "0.57608265", "0.57467353", "0.56573725", "0.56544816", "0.5609208", "0.5597532", "0.55618346", "0.5541921", "0.5539623", "0.5539623", "0.5539623", "0.5532673", "0.5519487", "0.5509952", "0.5505541", "0.55013525", "0.55001223", "0.5498828", "0.5489252", "0.5487994", "0.54678005", "0.5457167", "0.54413337", "0.5418238", "0.5415945", "0.53997034", "0.53989285", "0.53962773", "0.53926986", "0.537159", "0.5353484", "0.5351375", "0.5351185", "0.5337269", "0.5333092", "0.5327098", "0.53264505", "0.5325624", "0.53159183", "0.53099173", "0.53015393", "0.5299801", "0.5294431", "0.529286", "0.52917695", "0.5274485", "0.52733505", "0.52669287", "0.52659595", "0.5259934", "0.5255737", "0.5255483", "0.52418876", "0.5239277", "0.52324235", "0.52309674", "0.5230867", "0.52171636", "0.5213786", "0.521376", "0.5206475", "0.52041864", "0.5191006", "0.51668775", "0.51656646", "0.51636386", "0.5150429", "0.5150429", "0.5150429", "0.5150429", "0.5150429", "0.5150429", "0.5150429", "0.5149087", "0.5144948", "0.5143846", "0.5142313", "0.51405674", "0.51401854", "0.51401854", "0.51397806", "0.5134934", "0.5129883", "0.5116834" ]
0.7013717
1
Saves ciphers or keys or any text to the given file path; more efficient than manual saving.
def save(string, file): save_file = open(file, 'w') save_file.write(string) save_file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def store_file(text: str, file_path: str) -> None:\n with open(file=file_path, mode='w', encoding='utf8') as f:\n f.write(text)", "def save_text_file(text, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, \"w\") as f:\n f.write(text)", "def save(self, path):\n f = open(path, 'w')\n f.write(self.content().encode('utf-8'))\n f.close()", "def save(self, file_path):\n with open(file_path, 'w') as file:\n file.write(self.text)\n file.close()", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def encrypt_and_store_file(path_to_original_file):\n\t\toriginal_file_name, _ = os.path.splitext(path_to_original_file)\n\t\toutput_string = EncryptDecrypt.ascii_string_to_hex(EncryptDecrypt.file_to_string(path_to_original_file))\n\t\twith open(original_file_name+\".enc\", \"w+\") as save_file:\n\t\t\tsave_file.write(output_string)\n\t\tos.remove(path_to_original_file)", "def save(self, path: str):\n pass", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def save(self, path: str):\n\n\t\tinfo_dict = {\n\t\t\t\"n_gram_size\": self.n_gram_size,\n\t\t\t\"caseless\": self.caseless,\n\t\t\t\"ignore_punctuation\": self.ignore_punctuation,\n\t\t\t\"add_pos_tags\": self.add_pos_tags,\n\t\t\t\"uses_lemma\": self.uses_lemma,\n\t\t\t\"uses_sentences\": self.uses_sentences\n\t\t}\n\n\t\twith open(path, \"wt\", encoding=\"utf8\") as f:\n\t\t\tjson.dump(info_dict, f)", "def save(self, path):\n # Force an update of the canvas\n self._canvas.Update()\n\n # Save to file\n self._canvas.SaveAs(path)", "def save_path(path_to_account):\r\n with open(\"config.txt\", 'w+') as write_in_file:\r\n write_in_file.write(path_to_account)", "def save_file(path, file_data):\n file_data.save(path)", "def write(path):\n return mac_slideshow.preferences.write(KEY, path)", "def writefile(path: Union[str, Path], txt: str) -> None:\n with open(path, 'w') as outfile:\n outfile.write(txt)", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n pass", "def save(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n if not name:\n raise ValueError, \"name is required\"\n\n path = os.path.join(folder, name + self.extension)\n f = open(path, \"wb\")\n f.write(self.contents)\n f.close()\n\n return path", "def savefile(text):\n file = tkinter.filedialog.asksaveasfile(mode='w', defaultextension='.txt')\n if not file:\n return\n file.write(text)\n file.close()", "def _save_file(self, file_path, data):\n self._ensure_directory(os.path.dirname(file_path))\n with open(file_path, \"wb\") as f:\n f.write(data)", "def to_disk(self, path: Union[str, Path], **kwargs: Any) -> None:\n path = ensure_path(path)\n cfg = {\n \"spaczz_overwrite\": self.overwrite,\n \"spaczz_defaults\": self.defaults,\n \"spaczz_ent_id_sep\": self.ent_id_sep,\n }\n serializers = {\n \"spaczz_patterns\": lambda p: srsly.write_jsonl(\n p.with_suffix(\".jsonl\"), self.patterns\n ),\n \"cfg\": lambda p: srsly.write_json(p, cfg),\n }\n if path.suffix == \".jsonl\": # user wants to save only JSONL\n srsly.write_jsonl(path, self.patterns)\n else:\n write_to_disk(path, serializers, {})", "def save(self, path):\n with open(path, 'wb') as f:\n pkl.dump(self, f)", "def save(btctxstore, path, cfg, password=None):\n # always validate before saving\n validate(btctxstore, cfg)\n\n # Create root path if it doesn't already exist.\n root_path = os.path.dirname(path)\n if not os.path.exists(root_path):\n os.makedirs(root_path)\n\n # WRite config to file.\n if password is None: # unencrypted\n with open(path, 'w') as config_file:\n config_file.write(json.dumps(cfg))\n return cfg\n else:\n raise NotImplementedError(\"encryption not implemented\")", "def SaveToFile(self):\n\n if len(self.paris) == 0:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return\n\n data = \"\"\n for x in self.paris.iterkeys():\n data += \"%\" + x + \":\" + self.paris[x] + \"%\"\n \n data = self.Encrypt(data)\n\n with open(self.fileLoc, \"w\") as file:\n file.write(data)", "def saveText(file, path):\n files = os.listdir(path)\n for fil in files:\n if filetype.guess(os.path.join(path,fil)) is None:\n os.remove(os.path.join(path,fil)) \n tx = open(os.path.join(path, str(file)), 'wb')\n file.open()\n tx.write(file.read())\n file.close()\n tx.close()", "def encrypt(path, key, default, output):\n if not key:\n key = getpass('Encryption key: ')\n\n path, file_type, file_mtime = get_file_type_and_mtime(path)\n config = get_config(path, file_type, default)\n data = encrypt_credentials(config, key)\n\n if output:\n if output[0] == '.':\n output = output[1:]\n file_type = '.' + output.lower()\n\n with open(path + file_type, 'w') as save_file:\n if file_type == '.json':\n json.dump(data, save_file, indent=2)\n\n elif file_type in {'.ini', '.conf'}:\n if default:\n default_section = 'DEFAULT'\n else:\n default_section = 'DEFAULT' + os.urandom(16).hex()\n\n for heading in data:\n save_file.write(\"[{}]\\n\".format(heading))\n for item in data[heading]:\n save_file.write(\"{} = {}\\n\".format(item, data[heading][item]))\n save_file.write(\"\\n\")\n\n else:\n write_yaml(save_file, data)", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n out_file.write(data)", "def save_file(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'wb') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save_file(content: Any, filename: str, path: str):\n\n logging.info('Saving file: %s ' % filename)\n path_to_file = join(path, filename)\n if isfile(path_to_file):\n ctrl = input('%s exists already in\\n %s.\\n'\n ' Are you sure you want to overwrite it [y/N]: '\n % (filename, path))\n if ctrl.lower() == 'y' or ctrl.lower() == 'yes':\n with open(path_to_file, \"wb\") as f:\n pickle.dump(content, f)\n else:\n logging.warning(\"%s NOT saved..\" % filename)\n return\n else:\n with open(path_to_file, \"wb\") as f:\n pickle.dump(content, f)\n\n logging.info(\"File '%s' saved.\" % filename)", "def save_to_file(self, file_path: str):\n with open(file_path, 'w') as engine_settings_file:\n json.dump(self._encode_json(), engine_settings_file, indent=4)", "def save_file_(msl_data_path, filename, content):\n with open(msl_data_path + filename, 'w') as (file_):\n file_.write(content)\n file_.flush()\n file_.close()", "def save_txt(filename, data, encoding):\n with open(filename, \"w\") as f:\n f.write(dump(data, encoding))", "def save(self, path):\n save(self.actor_net.state_dict(), path + '_actor.pkl')\n save(self.critic_net.state_dict(), path + '_critic.pkl')", "def save_pickle(file, path):\n with open(path, 'wb') as f:\n pickle.dump(file, f)\n file_name = re.findall(r\"/?[^/]+\", path)[-1].strip(\"/\")\n print(f\"Stored {file_name}.\")", "def save(self, path):\n print(\"Warning: Default save used\")\n with open(path, 'wb') as f:\n pickle.dump(self, f)", "def save(self, file):\n self._save(file.encode())", "def saveToFile(self, filePath):\n d = self.save()\n with open(filePath, 'wb') as f:\n f.write(d)", "def write_file(content, file_path, mode='w', encoding='utf-8'):\n with codecs.open(file_path, mode, encoding=encoding) as fid:\n fid.write(content)", "def write(s, path, encoding=\"utf-8\"):\n with open(path, \"wb\") as f:\n f.write(s.encode(encoding))", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save(self, path: str):\n with open(path, 'w', encoding='utf-8') as f:\n f.write(self.to_json())", "def save():\n\n subject = subject_var.get()\n category = cat_var.get()\n private = private_var.get()\n message = message_inp.get('1.0', tk.END)\n datestamp_type = datestamp_var.get()\n\n extension = 'txt' if not private else 'secret'\n filename = f'{category} - {subject}.{extension}'\n\n # Apply optional datestamp in message\n if datestamp_type == 'Date':\n datestamp = datetime.today().strftime('%Y-%m-%d')\n elif datestamp_type == 'Date+Time':\n datestamp = datetime.today().strftime('%Y-%m-%d_%H-%M-%S')\n else:\n datestamp = ''\n if datestamp:\n message = f'{message}\\n\\n{datestamp}'\n\n if private:\n password = tksd.askstring(\n 'Enter password',\n 'Enter a password to encrypt the message.'\n )\n message = weaksauce_encrypt(message, password)\n\n with open(filename, 'w') as fh:\n fh.write(message)\n\n status_var.set(f'Message was saved to {filename}')\n tkmb.showinfo('Saved', f'Message was saved to {filename}')", "def _write_cache(self, path, text):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n folder = os.path.split(cache_path)[0]\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n with io.open(cache_path, 'w', encoding='utf-8') as f:\n f.write(text)", "def save(self, path, project_name=\"project\"):\n save_path = os.path.join(path, self.save_path)\n save_path = re.sub(r\"/^{}/\".format(self.template.name), project_name, save_path)\n try:\n os.makedirs(os.path.dirname(save_path))\n except FileExistsError:\n pass\n file = open(save_path, \"w\")\n file.write(self.text)\n file.close()\n print(\"save file: \", save_path)", "def save(self, path):\n pickle.dump(self, open(path, 'wb'))", "def write_scram_toolfile(self, contents, filename):\n with open(self.spec.prefix.etc + '/scram.d/' + filename, 'w') as f:\n f.write(contents)\n f.close()", "def save(self, path):\n self._scala.save(self._tc._scala_sc, path)", "def write_to_path(self, path):\n assert not path.exists()\n fout = path.open(\"wb\")\n fout.write(self.to_string())\n assert not fout.close()\n path.setdata()", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def write(cls, path, text):\n with cls.open(path, 'wt') as fd:\n return fd.write(text)", "def save(self, export_path: str):", "def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))", "def save(file_path, nodes=[]):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n return file_path\n print time.time() - t", "def Save(file=CONFIG):\n\tif file in files:\n\t\tfiles[file].SaveFile()", "def save_file():\n filepath = asksaveasfilename(\n defaultextension=\"txt\",\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")],\n )\n if not filepath:\n return\n with open(filepath, \"w\") as output_file:\n text = txt_edit.get(\"1.0\", tk.END)\n output_file.write(text)\n window.title(f\"Switch port Consolidation - {filepath}\")", "def savekey(comp):\n with open('clave.key', mode='w') as key:\n key.write(str(comp[0])+'\\n'+str(comp[2]))\n\n with open('clave.cr', mode='w') as pub:\n pub.write(str(comp[1])+'\\n'+str(comp[2]))", "def write_text_file(path: Path, data: str) -> None:\n path.write_text(data, encoding='utf-8')", "def save(self):\n os.rename(self.scores_filename, '%s-%s' % (self.scores_filename, str(time.time())))\n scores_file = codecs.open(self.scores_filename, 'w', encoding='utf-8')\n for each_chan in self.scores_dict:\n for each_nick in self.scores_dict[each_chan]:\n line = '{0},{1},{2},{3}\\n'.format(each_chan, each_nick, self.scores_dict[each_chan][each_nick][0], self.scores_dict[each_chan][each_nick][1])\n scores_file.write(uc.decode(line))\n scores_file.close()", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n session_key_file = f'{self.file_folder}/{self.user_id}_{filename}.bin'\n\n if not os.path.exists(session_key_file):\n with open(session_key_file, 'wb') as f:\n f.write(session_key)\n\n out_file.write(nonce)\n out_file.write(tag)\n out_file.write(cipher_text)", "def write_cipher_text(self, data: bytes, out_file: BinaryIO, filename: str):\n\n cipher_text, tag, nonce, session_key = self.encrypt(data)\n session_key_file = f'{self.file_folder}/{self.user_id}_{filename}.bin'\n\n if not os.path.exists(session_key_file):\n with open(session_key_file, 'wb') as f:\n f.write(session_key)\n\n out_file.write(nonce)\n out_file.write(tag)\n out_file.write(cipher_text)", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def save(file, corpus):\n with open(file, 'w') as f_out:\n f_out.write(corpus)", "def save(self, filename, format = \"text\"):\n #\n for time in self.mdvtc.keys():\n if format == \"csv\":\n save_filename = filename + str(int(time)) + \".csv\"\n elif format == \"text\":\n save_filename = filename + str(int(time)) + \".txt\"\n else:\n save_filename = filename + str(int(time)) + \".txt\"\n self.mdvtc[time].save(save_filename, format)", "def save(self, path):\n if path.endswith(\".gz\"):\n file = gzip.open(path, \"w\", 9)\n else:\n file = open(path, \"wb\")\n\n # update the settings in the data to the latest value\n data = json.loads(self.value)\n data[\"settings\"] = self.settings\n\n file.write(json.dumps(data).encode(\"utf8\"))\n file.close()", "def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))", "def saveText(texto, fileName, nameLib): \r\n arq = open(fileName + \"-\" + nameLib + \".txt\", \"w\")\r\n arq.write(texto) \r\n arq.close()", "def encrypt_file(path, key):\n # if file ends in encrypted file extension, skip\n if os.path.splitext(path)[1] == settings.ENCRYPTED_FILE_EXTENSION:\n return\n f = Fernet(key)\n # keep reading, encrypting and writting to file separate\n # incase encyrpting fail file doesn't get truncated\n # read\n try:\n with open(path, \"rb\") as file:\n file_content = file.read()\n # encrypt\n cypher = f.encrypt(file_content)\n # write to file\n with open(path, \"wb\") as file:\n file.write(cypher)\n except PermissionError:\n # not enough permission, skip\n return\n except FileNotFoundError:\n # file is an alias, skip\n return\n # rename the file with encrypted file extension\n os.rename(path, path + settings.ENCRYPTED_FILE_EXTENSION)", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def writeFile(fileName, text):\n with open(fileName, 'w', encoding='utf-8') as f:\n f.write(text)", "def save_to_file(path, question_dic, type, set_type, doc_size=None):\n print(\"def save_to_file(path, question_dic, type, set_type, doc_size=None) ...\")\n\n # Check whether question dic contains values\n assert len(question_dic)>0, \"question dic is empty\"\n\n # Create filename\n if type == \"quasar\":\n filename = \"_\".join([type, set_type, doc_size]) + \".pkl\"\n else:\n filename = \"_\".join([type, set_type]) + \".pkl\"\n full_path_to_file = Path(\"/\".join([str(path), str(filename)]))\n\n # Create the output directory if doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n\n # Write to the file\n with open(full_path_to_file, \"wb\") as of:\n pickle.dump(question_dic, of)\n print(\"pickled file {} and saved it to {}\".format(filename, full_path_to_file))", "def save(self, filename):\n pass", "def save_to_file(self):\n # Create a new file name based off date and time\n file_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S_RTI_CFG.txt\")\n file_path = os.path.expanduser(\"~\\\\Desktop\\\\\"+file_name)\n\n file = open(file_path, 'w')\n file.write(self.commandFileTextBrowser.toPlainText())\n file.close()\n\n self.parent.statusBar().showMessage('File saved to ' + file_path)", "def save_text_file(i):\n\n fn = i['text_file']\n\n s = i['string']\n\n try:\n s = s.replace('\\r', '')\n except Exception as e:\n pass\n\n try:\n s = s.replace(b'\\r', b'')\n except Exception as e:\n pass\n\n m = 'w'\n if i.get('append', '') == 'yes':\n m = 'a'\n\n try:\n s = s.encode('utf8')\n except Exception as e:\n pass\n\n try:\n # if sys.version_info[0]>2:\n # f=open(fn, m+'b')\n # f.write(s)\n # else:\n f = open(fn, m+'b')\n f.write(s)\n except Exception as e:\n return {'return': 1, 'error': 'problem writing text file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n return {'return': 0}", "def savefile(filename, data):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n output = dumps(data, ensure_ascii=False, sort_keys=True, indent=2)\n file.write(output)", "def fwrite(filename, text):\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n with open(filename, 'w') as f:\n f.write(text)", "def save_content(content, dir_path, file_name):\r\n if not os.path.exists(dir_path):\r\n os.mkdir(dir_path)\r\n with open(f'{dir_path}\\\\{file_name}', 'w') as output_file:\r\n output_file.write(content)", "def save(self, characters, filepath):\n\n\t\twith open(filepath, 'w') as out:\n\t\t\tjson.dump(characters, out, sort_keys=True, indent=4)", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def save_txt_file():\n global output_on_display\n if data_base == '':\n mistake_load_table()\n else:\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"TXT\", \"*.txt\"), (\"all files\", \"*.*\")),\n defaultextension='.txt')\n if Path(save_name).suffix == '.txt':\n data_txt = output_on_display.get('1.0', 'end')\n f = open(save_name, 'w')\n f.write(data_txt)\n f.close()", "def txt_file_writer(path):\n return open(path, 'w', encoding=cfg.ENCODING)", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)", "def save(file_path, nodes):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n print time.time() - t", "def save(self, cert_path: Union[Path, str], key_path: Union[Path, str]):\n cert_path, key_path = Path(cert_path), Path(key_path)\n\n cert_path.parent.mkdir(parents=True, exist_ok=True)\n with cert_path.open(\"wb\") as file:\n file.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, self.cert))\n\n key_path.parent.mkdir(parents=True, exist_ok=True)\n with key_path.open(\"wb\") as file:\n file.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, self.key))", "def save_file(file_name, suffix, content):\n full_path = os.path.abspath(file_name)\n filename, file_extension = os.path.splitext(full_path)\n save_path = '_'.join([filename, suffix]) + file_extension\n with open(save_path, 'w') as f:\n f.write(content)\n return save_path", "def save_cookies(self):\n\n with open(self.location_of_cookies, 'wb') as f:\n pickle.dump(self.get_cookies(), f)\n f.close()", "def writefile(filename, content):\n with open(Path(os.path.expanduser(filename)), 'w') as outfile:\n outfile.write(content)", "def save(self, fname):\n pass", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def write_file(path, contents, mode=\"w\"):\n with open(path, mode) as f:\n f.write(contents)", "def save(self,cookie_jar):\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n with open(self.file_path, \"wb\") as cookie_file:\n cookie_file.write(bytearray(pickle.dumps(cookie_jar)))", "def to_disk(\n self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()\n ) -> None:\n serialize = {\n \"vocab\": lambda p: self.vocab.to_disk(p, exclude=exclude),\n \"patterns\": lambda p: srsly.write_msgpack(p, self.patterns),\n }\n util.to_disk(path, serialize, exclude)", "def write_keys(path, keys):\n p_keys = pickle.dumps(keys)\n b_keys = base64.b64encode(p_keys)\n with open(path, \"wb+\") as walletfile:\n walletfile.write(b_keys)", "def _save_keys(self) -> None:\n algorithm = self.algorithm_combobox.currentText()\n filename = AesKeyGenerator(algorithm).save_session_key()\n msg_success(f\"Created keys as {filename}\")", "def store_to_disk(text_corpus, path_preprocessed_files, append_mode=True):\n\n if append_mode:\n text_corpus.to_csv(path_preprocessed_files, sep='|',\n index=False, mode='a', header=False)\n else:\n text_corpus.to_csv(path_preprocessed_files, sep='|',\n index=False, header=True)", "def write_cert(path, filename, data, mode=0o600):\n with os.fdopen(os.open(os.path.join(path, filename),\n os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:\n f.write(data)", "def write_cert(path, filename, data, mode=0o600):\n with os.fdopen(os.open(os.path.join(path, filename),\n os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:\n f.write(data)", "def save_pkl_data(path, data):\n with open(path, 'wb') as fo:\n pickle.dump(data, fo)", "def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path" ]
[ "0.68838364", "0.6661782", "0.65330315", "0.6515433", "0.6431692", "0.64074653", "0.6158728", "0.61471856", "0.6121656", "0.6058847", "0.6049562", "0.60482824", "0.60414743", "0.6010596", "0.598768", "0.59727114", "0.59727114", "0.59727114", "0.59499484", "0.5907642", "0.5892403", "0.5888667", "0.58779985", "0.58776546", "0.5875111", "0.5869437", "0.5813983", "0.5813705", "0.5747593", "0.5747412", "0.5741654", "0.5734627", "0.5712189", "0.5712101", "0.5704289", "0.5689042", "0.56865686", "0.56801635", "0.5678045", "0.5676494", "0.56638503", "0.5661987", "0.5655915", "0.5644095", "0.5642544", "0.5638281", "0.5636288", "0.5636094", "0.56334066", "0.5627573", "0.56239915", "0.56120414", "0.5609023", "0.55848217", "0.558206", "0.55802166", "0.55770934", "0.55633867", "0.5560519", "0.5554076", "0.5554076", "0.5550386", "0.5548146", "0.55454224", "0.5539631", "0.5530926", "0.5525628", "0.5518514", "0.5514363", "0.55137354", "0.5511237", "0.55077237", "0.5502988", "0.5497649", "0.5488436", "0.54836357", "0.5480459", "0.54801303", "0.54572123", "0.5455227", "0.54543704", "0.54464406", "0.54413354", "0.54412425", "0.54330003", "0.5428404", "0.54281056", "0.542715", "0.54253167", "0.5414329", "0.5411709", "0.54108226", "0.54082614", "0.54042107", "0.54031456", "0.5388833", "0.53847533", "0.53847533", "0.5383665", "0.5381527" ]
0.55175346
68
Returns number of permutations of size r from population of size n; accurate for arbitrarily large integers, unlike standard formula n! / (nr)!
def permute(n, r): product = 1 for i in range(n - r + 1, n + 1): product *= i return product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def r_permutations(n, r):\n return math.factorial(n) / math.factorial(n - r)", "def permutations_(n, r):\n return factorial(n) / factorial(n-r)", "def permutations(n, r):\n result = 1\n for i in range(n, n-r, -1):\n result *= i\n return result", "def r_combinations(n,r):\n return r_permutations(n,r) / math.factorial(r)", "def combinations_count(n, r):\n # TODO: How should I do when n - r is negative?\n if n < 0 or r < 0:\n raise Exception('combinations_count(n, r) not defined when n or r is negative')\n if n - r < r: r = n - r\n if r < 0: return 0\n if r == 0: return 1\n if r == 1: return n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n return result", "def bruteForcePopulation(N):\n return list(itertools.permutations(range(N), N))", "def main():\n\n import sys\n sys.setrecursionlimit(10**7)\n from itertools import accumulate, combinations, permutations, product # https://docs.python.org/ja/3/library/itertools.html\n # accumulate() returns iterator! to get list: list(accumulate())\n from math import factorial, ceil, floor\n def factorize(n):\n \"\"\"return the factors of the Arg and count of each factor\n \n Args:\n n (long): number to be resolved into factors\n \n Returns:\n list of tuples: factorize(220) returns [(2, 2), (5, 1), (11, 1)]\n \"\"\"\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct\n def combinations_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n # TODO: How should I do when n - r is negative?\n if n < 0 or r < 0:\n raise Exception('combinations_count(n, r) not defined when n or r is negative')\n if n - r < r: r = n - r\n if r < 0: return 0\n if r == 0: return 1\n if r == 1: return n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n return result\n def combinations_with_replacement_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items allowing individual elements to be repeated more than once.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n if n < 0 or r < 0:\n raise Exception('combinations_with_replacement_count(n, r) not defined when n or r is negative')\n elif n == 0:\n return 1\n else:\n return combinations_count(n + r - 1, r)\n from bisect import bisect_left, bisect_right\n from collections import deque, Counter, defaultdict # https://docs.python.org/ja/3/library/collections.html#collections.deque\n from heapq import heapify, heappop, heappush, heappushpop, heapreplace,nlargest,nsmallest # https://docs.python.org/ja/3/library/heapq.html\n from copy import deepcopy, copy # https://docs.python.org/ja/3/library/copy.html\n from operator import itemgetter\n # ex1: List.sort(key=itemgetter(1))\n # ex2: sorted(tuples, key=itemgetter(1,2))\n from functools import reduce\n def chmin(x, y):\n \"\"\"change minimum\n if x > y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current minimum value\n y (long): potential minimum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x > y:\n x = y\n return (x, True)\n else:\n return (x, False)\n def chmax(x, y):\n \"\"\"change maximum\n if x < y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current maximum value\n y (long): potential maximum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x < y:\n x = y\n return (x, True)\n else:\n return (x, False)\n\n from fractions import gcd # Deprecated since version 3.5: Use math.gcd() instead.\n def gcds(numbers):\n return reduce(gcd, numbers)\n def lcm(x, y):\n return (x * y) // gcd(x, y)\n def lcms(numbers):\n return reduce(lcm, numbers, 1)\n\n # first create factorial_list\n # fac_list = mod_factorial_list(n)\n INF = 10 ** 18\n MOD = 10 ** 9 + 7\n modpow = lambda a, n, p = MOD: pow(a, n, p) # Recursive function in python is slow!\n def modinv(a, p = MOD):\n # evaluate reciprocal using Fermat's little theorem:\n # a**(p-1) is identical to 1 (mod p) when a and p is coprime\n return modpow(a, p-2, p)\n def modinv_list(n, p = MOD):\n if n <= 1:\n return [0,1][:n+1]\n else:\n inv_t = [0,1]\n for i in range(2, n+1):\n inv_t += [inv_t[p % i] * (p - int(p / i)) % p]\n return inv_t\n def modfactorial_list(n, p = MOD):\n if n == 0:\n return [1]\n else:\n l = [0] * (n+1)\n tmp = 1\n for i in range(1, n+1):\n tmp = tmp * i % p\n l[i] = tmp\n return l\n def modcomb(n, k, fac_list = [], p = MOD):\n # fac_list = modfactorial_list(100)\n # print(modcomb(100, 5, modfactorial_list(100)))\n from math import factorial\n if n < 0 or k < 0 or n < k: return 0\n if n == 0 or k == 0: return 1\n if len(fac_list) <= n:\n a = factorial(n) % p\n b = factorial(k) % p\n c = factorial(n-k) % p\n else:\n a = fac_list[n]\n b = fac_list[k]\n c = fac_list[n-k]\n return (a * modpow(b, p-2, p) * modpow(c, p-2, p)) % p\n def modadd(a, b, p = MOD):\n return (a + b) % MOD\n def modsub(a, b, p = MOD):\n return (a - b) % p\n def modmul(a, b, p = MOD):\n return ((a % p) * (b % p)) % p\n def moddiv(a, b, p = MOD):\n return modmul(a, modpow(b, p-2, p))\n\n \"\"\" initialize variables and set inputs\n # initialize variables\n # to initialize list, use [0] * n\n # to initialize two dimentional array, use [[0] * N for _ in range(N)]\n # set inputs\n # open(0).read() is a convenient method:\n # ex) n, m, *x = map(int, open(0).read().split())\n # min(x[::2]) - max(x[1::2])\n # ex2) *x, = map(int, open(0).read().split())\n # don't forget to add comma after *x if only one variable is used\n # preprocessing\n # transpose = [x for x in zip(*data)]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [(1, 4, 7), (2, 5, 8), (3, 6, 9)]\n # flat = [flatten for inner in data for flatten in inner]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # calculate and output\n # output pattern\n # ex1) print(*l) => when l = [2, 5, 6], printed 2 5 6\n \"\"\"\n\n # functions used\n r = lambda: sys.stdin.readline().strip()\n r_int = lambda: int(r())\n R = lambda: list(map(int, r().split()))\n Rfloat = lambda: list(map(float, r().split()))\n Rtuple = lambda: tuple(map(int, r().split()))\n Rmap = lambda: map(int, r().split())\n\n \"\"\" how to treat input\n # single int: int(r())\n # single string: r()\n # single float: float(r())\n # line int: R()\n # line string: r().split()\n # line (str, int, int): [j if i == 0 else int(j) for i, j in enumerate(r().split())]\n # lines int: [R() for _ in range(n)]\n \"\"\"\n\n # main\n N, Q = R()\n STX = [R() for _ in range(N)]\n STX.sort(key=itemgetter(2))\n\n D = [int(r()) for _ in range(Q)]\n Stopped = [-1] * Q\n ans = [-1] * Q\n\n for s, t, x in STX:\n l = bisect_left(D, s-x)\n r = bisect_left(D,t-x)\n a = l\n while a < r:\n if Stopped[a] == -1:\n ans[a] = x\n Stopped[a] = r\n a += 1\n else:\n a = Stopped[a]\n\n for i in ans:\n print(i)\n\n \"\"\"memo: how to use defaultdict of list\n # initialize\n Dic = defaultdict(list)\n # append / extend\n Dic[x].append(y)\n # for\n for k, v in Dic.items():\n \"\"\"", "def number_of_permutations(self) -> int:\n perms = math.factorial(len(self._word))\n for v in self._char_counts.values():\n if v > 1:\n perms /= math.factorial(v)\n return perms", "def partial_permutations(n, k):\n return int((factorial(n) / factorial(n - k)) % 1000000)", "def combinations_with_replacement_count(n, r):\n if n < 0 or r < 0:\n raise Exception('combinations_with_replacement_count(n, r) not defined when n or r is negative')\n elif n == 0:\n return 1\n else:\n return combinations_count(n + r - 1, r)", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def countTriplets2(arr, r):\n from collections import Counter\n from math import factorial\n\n # If the ratio 'r' is 1 then this is a special case of combinations\n if r == 1:\n n = len(arr)\n r = 3\n return factorial(n)//(factorial(r)*factorial(n-r))\n\n arr_dict = Counter()\n ratio_range = []\n index = 0\n counter = 0\n triplets = 0\n \n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n max_arr_dict = max(arr_dict)\n\n # With the 1 special case removed, there now cannot be triplets if there are not 3 items in the dict\n if len(arr_dict) < 3: return triplets\n \n # There is now the potential for triplets so build all possible values\n while index < max_arr_dict:\n index = r**counter\n ratio_range.append(index)\n counter += 1\n if ratio_range[-1] > max_arr_dict: ratio_range.pop(-1)\n\n for y in range(len(ratio_range)-2):\n firstkey = ratio_range[y]\n secondkey = ratio_range[y+1]\n thirdkey = ratio_range[y+2]\n \n # If there are no triplets then the loop will exit without incrementing triplets \n if firstkey not in arr_dict or secondkey not in arr_dict or thirdkey not in arr_dict: \n continue\n else:\n firstvalue = arr_dict[firstkey]\n secondvalue = arr_dict[secondkey]\n thirdvalue = arr_dict[thirdkey]\n \n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n triplets += triplet_count\n\n return triplets", "def num_mutations(self):\n return sum(len(site.mutations) for site in self.sites())", "def countTriplets3(arr, r):\n from collections import Counter\n from math import factorial\n arr_dict = Counter()\n n0 = arr[0]\n\n # If the ratio 'r' is 1 then this is a special case of combinations\n if r == 1:\n for x in arr:\n if x == n0: arr_dict[x] += 1\n n = arr_dict[n0]\n r2 = 3\n return factorial(n)//(factorial(r2)*factorial(n-r2)), arr_dict\n\n # Main variables for the rest of the function\n max_arr = max(arr)\n ratio_range = [n0]\n triplets = 0\n\n # Build all possible values\n index = n0 \n counter = 0\n while index < max_arr:\n index *= r\n ratio_range.append(index)\n counter += 1\n if ratio_range[-1] > max_arr: ratio_range.pop(-1)\n \n # Build the counter\n for x in arr:\n if x in ratio_range: arr_dict[x] += 1\n\n # With the 1 special case removed, there now cannot be triplets if there are not 3 items in the dict\n if len(arr_dict) < 3: return triplets, arr_dict, ratio_range\n\n for y in range(len(ratio_range)-2):\n firstkey = ratio_range[y]\n secondkey = ratio_range[y+1]\n thirdkey = ratio_range[y+2]\n \n # If there are no triplets then the loop will exit without incrementing triplets \n if firstkey not in arr_dict or secondkey not in arr_dict or thirdkey not in arr_dict: \n continue\n else:\n firstvalue = arr_dict[firstkey]\n secondvalue = arr_dict[secondkey]\n thirdvalue = arr_dict[thirdkey]\n \n triplet_count = (firstvalue) * (secondvalue) * (thirdvalue)\n triplets += triplet_count\n\n return triplets, arr_dict", "def nth_permutation(elems, n):\n pos, summ = 0, 0\n permutation = \"\"\n for i in reversed(range(1,len(elems))):\n fact_i = fact(i)\n while summ+fact_i < n:\n summ += fact_i\n pos += 1\n permutation += str(elems[pos])\n del(elems[pos])\n pos = 0\n return permutation + str(elems[0])", "def permutations(k: int) -> int:\n return factorial(k)", "def makePermutations(n):\n\thalf = n // 2\n\tfull = half * 2\n\tswap = np.random.rand(half) > 0.5\n\tpx = np.arange(n)\n\tpx[:full:2] += swap\n\tpx[1:full:2] -= swap\n\treturn px", "def count_partitions(n, m):\n # print(n, m)\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n return count_partitions(n-m, m) + count_partitions(n, m//2)", "def countTriplets4(arr, r):\n from collections import Counter\n arr_dict = {}\n n0 = arr[0]\n max_arr = max(arr)\n ratio_range = {n0: 0}\n triplets = 0\n\n # Build all possible values\n index = n0 \n counter = 1\n while index < max_arr:\n index *= r\n ratio_range[index] = counter\n counter += 1\n if index > max_arr: ratio_range.pop(index)\n \n # Remove anything that isn't a possible value and build the dictionary\n for x in range(len(arr)-1, -1, -1):\n if arr[x] not in ratio_range: \n arr.pop(x)\n continue\n if arr[x] in arr_dict:\n arr_dict[arr[x]] = [x] + arr_dict[arr[x]]\n else:\n arr_dict[arr[x]] = [x]\n if len(arr) < 3: return triplets # return 0 if there are not enough items left in arr to make a triplet\n\n # Iterate backwards through arr starting at index arr[-2]\n for n in range(len(arr)-2, -1, -1):\n item = arr[n]\n item_before = item // r if item // r in ratio_range else 0 # Set to 0 if the next value in the progression does not appear in the input\n item_after = item * r if item * r in ratio_range else 0 # Set to 0 if the previous value in the progression does not appear in the input\n if not item_before or not item_after: continue # Continue in the loop if triplets are not possible with 'item' as 'j'\n \n counter_before = sum(1 for x in arr_dict[item_before] if x < n)\n counter_after = sum(1 for x in arr_dict[item_after] if x > n)\n triplets += counter_before * counter_after\n return triplets", "def perm(n, k):\n return factorial(n)/factorial(n-k)", "def countArrangement(self, n: int) -> int:\n def iter_digit(n):\n while n:\n yield n % 2\n n //= 2\n\n @lru_cache(None)\n def dfs(i, remains):\n if i == n+1:\n return 1\n cnt = 0\n for j, d in enumerate(iter_digit(remains)):\n if d == 0:\n continue\n if j%i == 0 or i%j == 0:\n remains ^= 2**j\n cnt += dfs(i+1, remains)\n remains ^= 2**j\n return cnt\n\n # starting from 11..10 (length is n+1)\n return dfs(1, 2**(n+1)-2)", "def numel_from_size(size):\n s = 1\n for i in size:\n s *= i\n return s", "def get_total_combo(pool, size):\n return len(pool) ** argp.length", "def permute(n: int, k: int) -> int:\n\n # no possible permutations if k > n\n if n < k:\n return 0\n\n # if faster, compute n! and (n - k)! and return their quotient\n fact_count = len(_factorial_sequence)\n if n - fact_count <= k:\n return factorial(n) // factorial(n - k)\n\n # compute the product (n - k + 1) * (n - k + 2) * ... * n\n return seqs.arithmetic_product(n - k + 1, k)", "def get_perms(n):\n \n from itertools import permutations\n bases = 'CATGN'\n return [''.join(perm) for perm in permutations(bases, n)]", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def solution(n: int) -> int:\n sizearr = n + 1\n\n # create zero-filled multi_arr\n multi_arr = [[0 for x in range(sizearr)] for n in range(sizearr)]\n\n # base value is always skipped after being padded\n multi_arr[0][0] = 1\n for last in range(1, sizearr):\n for next in range(0, sizearr):\n multi_arr[last][next] = multi_arr[last - 1][next]\n if next >= last:\n multi_arr[last][next] += multi_arr[last - 1][next - last]\n\n return multi_arr[n][n] - 1", "def count_partitions(n, m):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n with_m = count_partitions(n - m, m)\n without_m = count_partitions(n, m - 1)\n return with_m + without_m", "def countbrute(m):\n nfound=0\n\n for i in range(1,m+1):\n for j in range(1,i+1):\n for k in range(1,j+1):\n d1=i*i+(j+k)*(j+k) \n if(checkpsq(d1)):\n nfound=nfound+1\n\n return nfound", "def distribution_for_length(\n self, n: int, perm_class: Optional[Av] = None\n ) -> List[int]:\n iterator = perm_class.of_length(n) if perm_class else Perm.of_length(n)\n cnt = Counter(self.func(p) for p in iterator)\n lis = [0] * (max(cnt.keys(), default=0) + 1)\n for key, val in cnt.items():\n lis[key] = val\n return lis", "def get_length_itertools(iter_type, iter_obj, iter_size):\n\n candidates = len(iter_obj)\n if 'permutation' in iter_type:\n total = 1\n for i in range(iter_size):\n total *= (candidates - i)\n elif 'product' in iter_type:\n total = candidates ** iter_size\n elif 'combination' in iter_type:\n total = binomail(candidates, iter_size)\n return total", "def countArrangement(self, n: int) -> int:\n @lru_cache(None)\n def dfs(i, remains: Set[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in remains:\n if i%j == 0 or j%i == 0:\n cnt += dfs(i+1, remains - {j})\n return cnt\n\n return dfs(1, frozenset(range(1, n+1)))", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))", "def nN(self):\n return int((self._n+1).prod())", "def determine_size(self):\n size = np.inf\n while size >= self.n:\n size = np.random.pareto(0.2)\n size = int(math.ceil(size))\n return size", "def combinations(n) -> float:\r\n c = math.factorial(n) / (math.factorial(2) * math.factorial(n - 2))\r\n return c", "def digit_permutations(n):\n if n != 0:\n return set([int(''.join(p)) for p in permutations(str(n)) if p[0] != '0'])\n else:\n return {0}", "def list_permutations(n,r, multiset=False):\n if not multiset:\n return list(itertools.permutations(n,r))\n\n elif multiset:\n return set(list(itertools.permutations(n,r)))", "def permutations(iterable, r=None):\n pool = tuple(iterable)\n n = len(pool)\n if r is None:\n r = n\n indices = list(range(n))\n cycles = list(range(n-r+1, n+1))[::-1]\n yield tuple(pool[i] for i in indices[:r])\n while n:\n for i in reversed(list(range(r))):\n cycles[i] -= 1\n if cycles[i] == 0:\n indices[i:] = indices[i+1:] + indices[i:i+1]\n cycles[i] = n - i\n else:\n j = cycles[i]\n indices[i], indices[-j] = indices[-j], indices[i]\n yield tuple(pool[i] for i in indices[:r])\n break\n else:\n return", "def number_of_trees_of_order(n):\n if n < 2:\n return n\n result = 0\n for k in range(1, n):\n result += k * number_of_trees_of_order(k) * _s(n-1, k)\n return result // (n - 1)", "def get_generation_number(self, size=None):\n if size is None:\n size = self.get_param('population_size')\n if size is None:\n # size = len(list(self.c.select(relaxed=0,generation=0)))\n return 0\n lg = size\n g = 0\n all_candidates = list(self.c.select(relaxed=1))\n while lg > 0:\n lg = len([c for c in all_candidates if c.generation == g])\n if lg >= size:\n g += 1\n else:\n return g", "def permutationSizeList(c, size=1, prev=[]):\n for n in xrange(len(c)):\n if size == 1:\n yield prev + [c[n]]\n else:\n for p in permutationSizeList(c, size-1, prev + [c[n]]):\n yield p", "def n_per_item(self):\n return self.lam().sum(axis=0)", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n return sum(product(len(v) for v in p.values()) if p else 1\n for p in self.param_grid)", "def windows_of_permutations(n, step):\n def gen(p):\n for i in range(0, NB_AVIONS-n, step):\n for perm in all_permutations(range(i, i+n))(p):\n yield perm\n return gen", "def num_squares(n):\n nums = _squares(n)\n\n dp = [0] * (n + 1)\n dp[0] = 0\n\n for i in range(1, n + 1):\n can = [j for j in nums if j <= i]\n dp[i] = 1 + min([dp[i - c] for c in can])\n\n return dp[n]", "def countTriplets(arr, r):\n c_2, c_3 = Counter(), Counter()\n n_triplets = 0\n for e in arr:\n # print(f'arr: {arr}, e: {e}, c_3: {c_3}, c_2: {c_2}, n_triplets: {n_triplets}')\n if e in c_3:\n n_triplets += c_3[e]\n if e in c_2:\n c_3[e*r] += c_2[e]\n c_2[e*r] += 1\n return n_triplets", "def cube_perms(num_perms=5):\n cubed_counts = collections.defaultdict(Cubes)\n\n result = None\n for i in count(start=1):\n cubed_num = i**3\n perm = ''.join(sorted(str(cubed_num))) # encountered cubed_num sorted into single string\n\n if result is not None and len(perm) > len(str(result)):\n return result # return result if result exists and shorter than current perm\n \n cubed_counts[perm] += cubed_num # count each 'permutation' and add cubed_num to list of cubes\n\n if cubed_counts[perm].count == num_perms:\n if not has_more_cubes(perm, i):\n smallest_cube = min(cubed_counts[perm].cubes)\n if result is None or smallest_cube < result:\n result = smallest_cube", "def permutation(random_state, size=None, n=1, ndim=None, dtype='int64'):\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size)\r\n #print \"NDIM\", ndim, size\r\n op = RandomFunction(permutation_helper,\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast + (False,)),\r\n ndim_added=1)\r\n return op(random_state, size, n)", "def calulate_total_permutations(value):\n if len(value) == len(set(value)):\n return factorial(len(value))\n\n count_map = {}\n for char in value:\n if char in count_map:\n count_map[char] += 1\n else:\n count_map[char] = 1\n\n base_factor = factorial(len(value))\n additional_factors = [factorial(x) for x in count_map.values()]\n result = 1\n for x in additional_factors:\n result = result * x\n return base_factor / result", "def choose(n: int, k: int) -> int:\n return permute(n, k) // factorial(k)", "def get_integers(n: int) -> int:\n l = [int(sqrt(n))]\n val = l[0] * l[0]\n index = 0\n while val != n:\n val = sum([x*x for x in l])\n if val > n:\n l[index] -= 1\n elif val < n:\n index += 1\n l.append(l[index - 1])\n\n return len(l)", "def combinations(iterable, r):\n pool = tuple(iterable)\n n = len(pool)\n if r > n:\n return\n indices = list(range(r))\n yield tuple(pool[i] for i in indices)\n while True:\n for i in reversed(range(r)):\n if indices[i] != i + n - r:\n break\n else:\n return\n indices[i] += 1\n for j in range(i+1, r):\n indices[j] = indices[j-1] + 1\n yield tuple(pool[i] for i in indices)", "def compute_pool(in_size):\n return (in_size - 2) // 2 + 1", "def __len__(self):\n # Product function that can handle iterables (np.product can't).\n product = partial(reduce, operator.mul)\n modstr = '%s__' % self.modality\n return sum(product(len(v) for k, v in p.items() if modstr in k) if p else 1\n for p in self.param_grid)", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def choose(n,r):\n \tc=0\n \tif (n>=0) and (0 <= r <= n):\n\t \tc = 1L\n\t \tdenom = 1\n\t \tfor (num,denom) in zip(xrange(n,n-r,-1), xrange(1,r+1,1)):\n\t\t\tc = (c * num) // denom", "def number_of_ways(n):\r\n return number_of_ways_helper([1, 5, 10, 25], n)", "def cardinality(self):\n from sage.arith.all import binomial\n n = self._size\n if n == 0:\n return Integer(1)\n return (2 * binomial(4 * n + 1, n - 1)) // (n * (n + 1))\n # return Integer(2 * factorial(4*n+1)/(factorial(n+1)*factorial(3*n+2)))", "def len_score(n):\n return len(n)", "def _rep(self, num_repeats):\n return int(np.ceil(self.depth_mul * num_repeats))", "def permutations(a):\n n = len(a)\n return _heap_perm_(n, a)", "def totalSolutions(n:int):\n\n memo = [0]*6\n memo[5] = 1\n if n == 1:\n return 1\n\n for i in range(n):\n solutions = sum(memo)\n memo.pop(0)\n memo.append(solutions)\n\n return memo.pop()", "def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div", "def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result", "def combination(num_m: int, num_n: int) -> int:\n if num_m < num_n:\n return 0\n\n tmp_m = 1\n tmp_n = 1\n tmp_cnt = 0\n for i in range(num_n, 0, -1):\n tmp_n *= i\n tmp_m *= num_m - tmp_cnt\n tmp_cnt += 1\n\n return tmp_m / tmp_n", "def main():\n check_input(sys.argv[0])\n with open(sys.argv[1]) as infile:\n n = int(infile.readline().strip())\n k = int(infile.readline().strip())\n\n print(partial_permutations(n, k))", "def countArrangement(self, n: int) -> int:\n def dfs(i, remains: List[int]):\n if i == n+1:\n return 1\n cnt = 0\n for j in range(1, n+1):\n if remains[j] is None and (i%j == 0 or j%i == 0):\n remains[j] = i\n cnt += dfs(i+1, remains)\n remains[j] = None\n return cnt\n\n return dfs(1, [None]*(n+1))", "def n_matrices_per_weight(weight, n_bits):\n return int(_sp.special.binom(n_bits, weight))", "def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div", "def numberOfNodes( gen ):\n return int( scipy.sum( [ 3.**i for i in range( 1, gen + 2 ) ] ) )", "def countm(m):\n nfound=0\n\n for i in range(1,m+1):\n for jpk in range(2,(2*i)+1):\n d1=i*i+(jpk)*(jpk) \n if(checkpsq(d1)): \n if(jpk<=i):\n factor=jpk/2 \n else:\n factor=((2*i-jpk)+2)/2 \n nfound=nfound+factor\n\n return nfound", "def num_vertex_sets(self, r):\n return sage.all.binomial(self.n, r)", "def solve_sample_size(self):\n e = self.normalized_effect_size()\n df_denom_solve = FTestPower().solve_power(\n effect_size=e\n ,df_num=None\n ,df_denom=self.df_num\n ,alpha=self.alpha\n ,power=(1 - self.beta)\n ,ncc=1\n )\n n = int(df_denom_solve + len(self.test_splits))\n return n", "def num_lines(dim: int, size: int) -> int:\n\n count = 0\n for i in range(1, dim + 1):\n count += comb(dim, i, True) * (size ** (dim - i)) * (2 ** (i - 1)) \n return count", "def num_mutations(self):\n return len(self.fuzz_library)", "def find_prime_permutations(primes, n):\n\n candidates = [int(\"\".join(digits)) for digits in sorted(set(permutations(str(n))))]\n return [c for c in candidates if c in primes]", "def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val", "def int_to_perm(i, n=1):\n\twhile factorial(n) <= i:\n\t\tn += 1\n\tperm = [0 for _ in xrange(n)]\n\tfor k in xrange(n - 1, -1, -1):\n\t\tj, num_zeros = 0, 0\n\t\tkf = factorial(k)\n\t\t# position within the remaining (nonzero) slots\n\t\tpos = k - i / kf\n\t\ti %= kf\n\t\tplaced = False\n\t\twhile not placed:\n\t\t\tif perm[j] != 0:\n\t\t\t\tj += 1\n\t\t\telif num_zeros < pos:\n\t\t\t\tnum_zeros += 1\n\t\t\t\tj += 1\n\t\t\telse:\n\t\t\t\t# pos == num_zeros; you've found the right slot\n\t\t\t\tperm[j] = k + 1\n\t\t\t\tplaced = True\n\treturn Permutation(perm)", "def numSquares(self, n):\n # Generate perfect square numbers smaller than n.\n perfect_square_numbers = []\n i = 1\n square_i = i * i\n while square_i <= n:\n perfect_square_numbers.append(square_i)\n i += 1\n square_i = i * i\n\n cur_level = [n]\n count = 0\n while cur_level:\n count += 1\n tmp = []\n for num in cur_level:\n for val in perfect_square_numbers:\n if num == val:\n return count\n if val < num:\n tmp.append(num - val)\n if val > num:\n break\n cur_level = tmp\n return count", "def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total", "def trivial_permutation(n: int) -> List[int]:\n return list(range(n))", "def unique_climbs(N, X={1, 2}):\n result = 0\n\n if N == 0:\n return 1\n if N < 0:\n return 0\n\n for cnt in X:\n result += unique_climbs(N-cnt, X)\n\n return result", "def binom(n, r):\n return factorial(n) // ((factorial(r) * factorial(n - r)))", "def sum_proper_divisors(n):\r\n return sum(proper_divisors(n))", "def nCr(\n n: int,\n r: int = 0,\n l: list = [],\n ) -> int:\n\n # Initialisations\n c = 0\n\n # Check if the list is not empty\n if l != []:\n\n # Loop through the list\n for i in l:\n\n # Cumulatively calculate the n Choose r value for each list item\n c += comb(n, i, exact = True)\n\n # Check if r is not zero\n elif r != 0:\n\n # Calculate the n Choose r value\n c = comb(n, r, exact = True)\n\n else:\n\n # Loop through all possible r\n for i in range(0, n):\n\n # Cumulatively calculate the n Choose r value\n c += comb(n, i, exact = True)\n\n return c", "def test_permutation(n: int) -> List[int]:\n permutation = [i + 1 for i in range(n)]\n cycle_length = 2\n cycle_index = 0\n\n while cycle_length + cycle_index < n:\n permutation[cycle_index + cycle_length - 1] = cycle_index\n cycle_index += cycle_length\n cycle_length += 1\n\n if n > 0:\n permutation[n - 1] = cycle_index\n\n return permutation", "def num_divisors(n):\n\tif n < 2:\n\t\treturn 1 \t# not really correct\n\t\n\tdivisors = 1\n\ti = 2\n\n\twhile n > 1:\n\t\tp = 0 \t# p will be the maximum x such that i^x evenly divides n\n\n\t\t# repeatedly divide n by i, and store the number of times into p\n\t\twhile (n % i == 0):\n\t\t\tn = n / i\n\t\t\tp += 1\n\n\t\tdivisors = divisors * (p + 1)\n\t\ti += 1\n\n\treturn divisors", "def countPrimesOptimized(self, n: int) -> int:\n if n < 2:\n return 0\n\n size = (n - 3) // 2 + 1 # -3 for 0,1,2 and // 2 to ignore evens\n primes = [2]\n is_prime = [True for i in range(size)] # represents if (2i+3) is prime\n\n for i in range(size):\n if is_prime[i]:\n p = 2 * i + 3\n primes.append(p)\n # Sieve from p^2, where p^2 = (2i+3)^2 = (4i^2 + 12i + 9)\n # Index in is_prime is (2i^2 + 6i + 3)\n # because is_prime[i] = 2i + 3.\n for j in range(2 * i**2 + 6 * i + 3, size, p):\n is_prime[j] = False\n\n return len(primes) - 1 if primes[-1] == n else len(primes)", "def num_squares(n):\n squares = _squares(n)\n cnt = 0\n remains = {n}\n while remains:\n cnt += 1\n tmp = set()\n for remain in remains:\n for sq in [sqq for sqq in squares if sqq <= remain]:\n if remain == sq:\n return cnt\n else:\n tmp.add(remain - sq)\n remains = tmp", "def factorPR(n):\n\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n)))\n\tfor additive in range(1,5):\n\t\tfast=slow=1; i=1\n\t\twhile i<numsteps:\n\t\t\tslow = (slow*slow + additive) % n\n\t\t\ti = i + 1\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tfast = (fast*fast + additive) % n\n\t\t\tg = gcd(fast-slow,n)\n\t\t\tif (g != 1):\n\t\t\t\tif (g == n):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\treturn g\n\treturn 1", "def permutation_test_mat(matrix,\n n_1, n_2, n_permutations,\n a00=1, a11=1, a01=0):\n n = n_1 + n_2\n pi = np.zeros(n, dtype=np.int8)\n pi[n_1:] = 1\n\n larger = 0.\n count = 0\n \n for sample_n in range(1 + n_permutations):\n count = 0.\n for i in range(n):\n for j in range(i, n):\n mij = matrix[i, j] + matrix[j, i]\n if pi[i] == pi[j] == 0:\n count += a00 * mij\n elif pi[i] == pi[j] == 1:\n count += a11 * mij\n else:\n count += a01 * mij\n if sample_n == 0:\n statistic = count\n elif statistic <= count:\n larger += 1\n\n np.random.shuffle(pi)\n\n return larger / n_permutations", "def BLOCK_SIZE(id, p, n):\n\n\treturn BLOCK_HIGH(id, p, n) - BLOCK_LOW(id, p, n) + 1", "def perms(n, k):\n if n < k:\n return 0\n return partition(n, [n - k])", "def get_num_pairs(seq):\n n = len(seq)\n return int(n * (n-1)/2) # sum of arphmetic progression (n-1)...1", "def n_choose_r(n, r):\n if r == 1:\n return n\n elif r == n:\n return 1\n elif r == 0 or n == 0:\n return 0\n else:\n return n_choose_r(n-1, r-1) + n_choose_r(n-1, r)", "def get_permutatation_by_length(length, permutation_set):\n pass", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)" ]
[ "0.77895117", "0.7716931", "0.7273337", "0.72685605", "0.69712794", "0.6970604", "0.6709193", "0.66836315", "0.6649834", "0.6625802", "0.6509674", "0.6436063", "0.641432", "0.6391373", "0.63186556", "0.63137746", "0.6243626", "0.62363577", "0.62117946", "0.61875135", "0.6152499", "0.6148352", "0.6143356", "0.61321783", "0.61187434", "0.6113931", "0.6113931", "0.60933304", "0.607266", "0.606661", "0.60531056", "0.6026212", "0.6020567", "0.6016457", "0.6011566", "0.6010351", "0.6001242", "0.60010266", "0.5997774", "0.59876627", "0.59500396", "0.5945387", "0.59150594", "0.5906148", "0.58784246", "0.5876959", "0.5873657", "0.5872746", "0.58719546", "0.5852894", "0.5838529", "0.5832077", "0.58319044", "0.5827651", "0.5824684", "0.58165705", "0.58087075", "0.5801233", "0.57989144", "0.5775007", "0.57665443", "0.57652795", "0.5764691", "0.57570195", "0.5755271", "0.57527024", "0.5735978", "0.573589", "0.5735177", "0.5725096", "0.5715118", "0.57090014", "0.56954837", "0.56941944", "0.5684221", "0.5673116", "0.5665992", "0.5661407", "0.5658949", "0.5643462", "0.56425786", "0.5639229", "0.56359375", "0.5631594", "0.56248856", "0.562118", "0.56189716", "0.56134486", "0.5608684", "0.5606244", "0.5598601", "0.55967814", "0.5586431", "0.558465", "0.55800897", "0.55786026", "0.55763936", "0.55698", "0.5569521", "0.5563439" ]
0.7123683
4
Returns the default collector settings
def get_default_config(self): config = super(YammerCollector, self).get_default_config() config.update({ 'path': 'yammer', 'url': 'http://127.0.0.1:8081/metrics', 'username': '', 'password': '', }) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_default_config(self):\r\n config = super(CMDCollector, self).get_default_config()\r\n config.update({\r\n 'enabled': 'True',\r\n 'fs': ',',\r\n 'timeout': 300,\r\n })\r\n return config", "def getDefaultSettings():\n return {}", "def get_default_config(self):\n config = super(DropwizardCollector, self).get_default_config()\n config.update({\n 'url': DEFAULT_METRICS_URL,\n 'path': 'dropwizard'\n })\n return config", "def get_default_config(self):\n config = super(VMSDomsCollector, self).get_default_config()\n config.update({\n 'path': 'vms'\n })\n return config", "def get_default_options():\n return GROUPS_.values()", "def get_default_config(self):\n config = super(EndecaDgraphCollector, self).get_default_config()\n config.update({\n 'path': 'endeca.dgraph',\n 'host': 'localhost',\n 'port': 8080,\n 'timeout': 1,\n })\n return config", "def get_defaults(self):\n\t\treturn self.__defaults", "def get_default_config(self):\n config = super(DiskSpaceCollector, self).get_default_config()\n config.update({\n 'path': 'diskspace',\n # filesystems to examine\n 'filesystems': 'ext2, ext3, ext4, xfs, glusterfs, nfs, nfs4, ' +\n ' ntfs, hfs, fat32, fat16, btrfs',\n\n # exclude_filters\n # A list of regex patterns\n # A filesystem matching any of these patterns will be excluded\n # from disk space metrics collection.\n #\n # Examples:\n # exclude_filters =,\n # no exclude filters at all\n # exclude_filters = ^/boot, ^/mnt\n # exclude everything that begins /boot or /mnt\n # exclude_filters = m,\n # exclude everything that includes the letter \"m\"\n 'exclude_filters': ['^/export/home'],\n\n # Default numeric output\n 'byte_unit': ['byte']\n })\n return config", "def _default_config(cls):\n return dict()", "def get_default_config(self):\n default_config = super(SNMPInterfacePollCollector,\n self).get_default_config()\n default_config['path'] = 'snmp.interface'\n default_config['byte_unit'] = ['bit', 'byte']\n return default_config", "def get_default_config(self):\n config = super(SlurmJobWasteCollector, self).get_default_config()\n config.update({\n 'path': 'waste'\n })\n return config", "def default_settings(self, settings):\n return {}", "def get_default_config(self):\n config = super(PuppetAgentCollector, self).get_default_config()\n config.update({\n 'yaml_path': '/var/lib/puppet/state/last_run_summary.yaml',\n 'path': 'puppetagent',\n })\n return config", "def defaults(self):\n return self.conf.get(\"defaults\", [])", "def get_default_config(self):\n config = super(BindCollector, self).get_default_config()\n config.update({\n 'host': 'localhost',\n 'port': 8080,\n 'path': 'bind',\n # Available stats:\n # - resolver (Per-view resolver and cache statistics)\n # - server (Incoming requests and their answers)\n # - zonemgmt (Requests/responses related to zone management)\n # - sockets (Socket statistics)\n # - memory (Global memory usage)\n 'publish': [\n 'resolver',\n 'server',\n 'zonemgmt',\n 'sockets',\n 'memory',\n ],\n # By default we don't publish these special views\n 'publish_view_bind': False,\n 'publish_view_meta': False,\n })\n return config", "def get_config_defaults(self): # pylint: disable=R0201\n return {}", "def get_default_config(self):\n config = super(DiskHealthCollector, self).get_default_config()\n config.update({\n 'enabled': 'True',\n 'devices': ('PhysicalDrive[0-9]+$'\n + '|md[0-9]+$'\n + '|sd[a-z]+[0-9]*$'\n + '|x?vd[a-z]+[0-9]*$'\n + '|disk[0-9]+$'\n + '|dm\\-[0-9]+$'),\n 'fs_types': ','.join(self.SUPPORTED_FS_TYPES),\n 'raw_stats_only': False,\n 'test_file_name': self.TEST_FILE_NAME,\n 'test_file_size': self.TEST_FILE_SIZE\n })\n return config", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def get_default_config(self):\n return {\n 'path': 'mysql',\n # Connection settings\n 'host': 'localhost',\n 'port': 3306,\n 'db': 'yourdatabase',\n 'user': 'yourusername',\n 'passwd': 'yourpassword',\n \n # Which rows of 'SHOW GLOBAL STATUS' you would like to publish.\n # http://dev.mysql.com/doc/refman/5.1/en/show-status.html\n # Leave unset to publish all\n #'publish' : '',\n\n 'slave': 'False',\n 'master': 'False',\n }", "def get_default_config(self):\n config = super(UDPCollector, self).get_default_config()\n config.update({\n 'path': 'udp',\n 'allowed_names': 'InDatagrams, NoPorts, '\n + 'InErrors, OutDatagrams, RcvbufErrors, SndbufErrors'\n })\n return config", "def getDefaultConfig():\n config = {\n \"samples\": _DEFAULT_SAMPLE_COUNT,\n \"channel\": \"all\",\n \"rate\": _DEFAULT_SAMPLE_RATE,\n \"update\": 1,\n \"output\": \"data.rld\",\n \"format\": \"rld\",\n \"size\": _DEFAULT_FILE_SIZE,\n \"comment\": _DEFAULT_FILE_COMMENT,\n \"digital\": True,\n \"ambient\": False,\n \"aggregate\": \"downsample\",\n \"high-range\": [],\n \"web\": False,\n }\n return config", "def defaults(self):\n return self._config_parser.defaults()", "def getDefaults():\n return {\n 'minsize': 10, # minimum size in MB\n 'pattern': [], # file name patterns\n }", "def get_cfg_defaults():\n return _C.clone()", "def get_cfg_defaults():\r\n # Return a clone so that the defaults will not be altered\r\n # This is for the \"local variable\" use pattern\r\n return _C.clone()", "def get_default_config(self):\n config = super(NumaCollector, self).get_default_config()\n config.update(\n {\n \"path\": \"numa\",\n \"bin\": self.find_binary(\"numactl\"),\n }\n )\n\n return config", "def default_options(cls) -> Dict:\n return {}", "def defaults(self):\n\n return None", "def defaults(self):\n\n return None", "def get_default_settings(cfg):\n cfg = deepcopy(cfg)\n cfg.setdefault('metric', 'regression_slope')\n cfg.setdefault('n_jobs', 1)\n cfg.setdefault('savefig_kwargs', {\n 'bbox_inches': 'tight',\n 'dpi': 600,\n 'orientation': 'landscape',\n })\n logger.info(\"Using at most %i processes\", cfg['n_jobs'])\n return cfg", "def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}", "def requested_config_vals():\n return {'transfer_stats_per_file':'opt'}", "def get_default_job_settings(self):\n return self._default_job_settings", "def get_default_config(self):\n config = super(InterruptCollector, self).get_default_config()\n config.update({\n 'path': 'interrupts'\n })\n return config", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def _read_default_settings():\n global settings\n return {s: getattr(settings, s)\n for s in dir(settings)\n if s.isupper()}", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return C.clone()", "def defaults():\n return {}", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern recommended by the YACS repo.\n # It will be subsequently overwritten with local YAML.\n return __C.clone()", "def get_default(cls):\n return cls.ALL", "def get_default(cls):\n return cls.ALL", "def default_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig']:\n return pulumi.get(self, \"default_config\")", "def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }", "def get_default_config(self):\n config = super(NvidiaGPUCollector, self).get_default_config()\n config.update({\n 'path': 'nvidia',\n 'bin': '/usr/bin/nvidia-smi',\n 'stats': [\n 'index',\n 'memory.total',\n 'memory.used',\n 'memory.free',\n 'utilization.gpu',\n 'utilization.memory',\n 'temperature.gpu'\n ]\n })\n return config", "def loadConfig(self):\n return self._defaults", "def get_default_config(self):\n config = super(UserScriptsCollector, self).get_default_config()\n config.update( {\n 'path': '.',\n 'scripts_path': '/etc/diamond/user_scripts/',\n 'method': 'Threaded',\n } )\n return config", "def defaults() -> dict:\n pass", "def default_setting(self):\n\t\tunknown_as_zero = False\n\t\tbinary_rele = False # using the original values\n\t\tpresort = False # a default setting\n\n\t\tscale_data, scaler_id, scaler_level = get_default_scaler_setting(data_id=self.data_id)\n\n\t\t# more data settings that are rarely changed\n\t\tself.data_dict = dict(data_id=self.data_id, dir_data=self.dir_data, min_docs=10, min_rele=1,\n\t\t\t\t\t\t sample_rankings_per_q=1, unknown_as_zero=unknown_as_zero, binary_rele=binary_rele,\n\t\t\t\t\t\t presort=presort, scale_data=scale_data, scaler_id=scaler_id, scaler_level=scaler_level)\n\n\t\tdata_meta = get_data_meta(data_id=self.data_id) # add meta-information\n\t\tself.data_dict.update(data_meta)\n\n\t\treturn self.data_dict", "def get_defaults():\r\n profile = settings.profile_manager.get(\"default\")\r\n config = profile.get_config('freeseer.conf', settings.FreeseerConfig, storage_args=['Global'], read_only=True)\r\n return {\r\n \"video_directory\": config.videodir,\r\n \"oauth2_token\": os.path.join(settings.configdir, \"oauth2_token.json\"),\r\n \"client_secrets\": os.path.join(settings.configdir, \"client_secrets.json\")\r\n }", "def get_settings():\n return SettingCollection.build()", "def get_default_config():\n\n config = {}\n\n for name, cls in get_tools().items():\n config[name] = cls.get_default_config()\n\n try:\n workers = multiprocessing.cpu_count() - 1\n except NotImplementedError: # pragma: no cover\n workers = 1\n workers = max(1, min(4, workers))\n\n config.update({\n 'exclude': [],\n 'merge-issues': True,\n 'workers': workers,\n 'disabled': [],\n 'noqa': True,\n 'extends': [],\n 'ignore-missing-extends': False,\n })\n\n return config", "def bootstrap_default():\n\treturn default_configuration", "def settings(self):\n return {}", "def get_default_visualizer_config(self):\n\n default_config = {\n\n \"format\": \"png\",\n\n \"fontsize\": 16,\n \"fontname\": 'Roboto',\n \"rankdir\": \"TB\",\n\n # Specific offsets can be specified here\n # for different shades. Change the values\n # below 0 and 1. For best results we recommend\n # to keep the range between 0.1 - 0.3\n \"layer_color_dict\": {\n \"InputLayer\": 0.1,\n \"Reshape\": 0.12,\n \"Conv1D\": 0.13,\n \"Conv2D\": 0.17,\n \"MaxPooling1D\": 0.19,\n \"MaxPooling2D\": 0.20,\n \"ZeroPadding3D\": 0.22,\n \"Flatten\": 0.25,\n \"AveragePooling2D\": 0.27,\n \"Dropout\": 0.29,\n \"Dense\": 0.3,\n \"Concatenate\": 0.32,\n \"Model\": 0.34,\n \"RepeatVector\": 0.36,\n \"Multiply\": 0.38,\n \"Add\": 0.39,\n \"Lambda\": 0.4,\n \"SpatialDropout1D\": 0.41,\n \"SpatialDropout2D\": 0.44\n },\n # Please provide as many colors\n # as many models you expect.\n # This package will\n # generate random colors incase\n # colors fall short, but there\n # is no guarantee that they will be\n # pretty\n 'default_color_package': [\n [0.586, 1.000, 1.000],\n [0.513, 0.141, 0.725],\n [0.094, 1.000, 1.000],\n [0.375, 0.739, 0.780],\n [0.967, 0.816, 0.961],\n [0.286, 1.000, 1.000],\n [0.750, 0.416, 0.961],\n [0.778, 0.631, 0.871],\n [0.613, 0.141, 0.725],\n [0.850, 0.539, 0.780],\n [0.186, 1.000, 1.000]\n ],\n \"class_names\": True,\n \"graph_label\": \"Nested SeeNN\",\n \"node_seperation_distance\": 0.4,\n\n 'module_connection_color': 'black',\n 'collapse_inputs': False,\n 'layer_height': 0.5,\n 'layer_width': 2,\n # 'condense_dropout_layer': False,\n\n # Specify if to use multiple color layers,\n # rather than shade\n 'use_multiple_colors_layers': False,\n\n # If use_multiple_colors_layers is Fa,se,\n # provide the colors\n 'multiple_colors_layer_package': {\n \"InputLayer\": \"grey\",\n \"Reshape\": \"#F5A286\",\n \"Conv1D\": \"#F7D7A8\",\n \"Conv2D\": \"#F7D7A8\",\n \"MaxPooling1D\": \"#AADFA2\",\n \"MaxPooling2D\": \"#AADFA2\",\n \"ZeroPadding3D\": \"grey\",\n \"Flatten\": \"grey\",\n \"AveragePooling2D\": \"#A8CFE7\",\n \"Dropout\": \"#9896C8\",\n \"Dense\": \"#C66AA7\",\n \"Concatenate\": \"#F5A286\",\n \"Model\": \"#292D30\",\n \"RepeatVector\": \"grey\",\n \"Multiply\": \"grey\",\n \"Add\": \"grey\",\n \"Lambda\": \"#CAAFE7\",\n \"SpatialDropout1D\": \"#FFAAEE\",\n \"SpatialDropout2D\": \"#CAAFE7\"\n },\n\n 'show_activation': False,\n 'rotate': 90,\n 'show_constant_input': False\n\n }\n\n return default_config", "def default(cls) -> 'Config':\n parser: configparser.ConfigParser = configparser.ConfigParser()\n parser.read_dict(dict(wpwatcher=Config.DEFAULT_CONFIG))\n return cls.fromparser(parser)", "def get_default_config():\n return _config_schema_to_namespace(_CONFIG_SCHEMA)", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def _getGroupDefaults(self):\n defaults = self.getDefaultGroupContainer(\n _name = \"defaults\",\n diff_command = self.general.diff_command,\n cia_rpc_server = self.general.cia_rpc_server,\n )\n try:\n self._passConfig(defaults, \"defaults\")\n except ConfigSectionNotFoundError:\n # [defaults] is optional\n pass\n else:\n self._config.remove_section('defaults')\n\n return defaults", "def get_default_config():\n # pylint: disable=cyclic-import\n from raylab.agents.sac import DEFAULT_CONFIG\n\n return DEFAULT_CONFIG", "def default_user_settings(self) -> pulumi.Output['outputs.DomainUserSettings']:\n return pulumi.get(self, \"default_user_settings\")", "def fusion_api_get_lsg_default_settings(self, api=None, headers=None):\n return self.lsg.get(api=api, param='/defaultSettings', headers=headers)", "def default_configs(cls):\n return {\n 'redirect_path': None,\n 'nif_page_structure': None,\n 'nif_text_links': None,\n }", "def getDefaultProperties(self):\n return _libsbml.CompFlatteningConverter_getDefaultProperties(self)", "def default_page_config(self):\n return self._default_page_config", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings", "def get_persisted_default_config_fields(self):\n return []", "def defaults(self):\n\n return Storage(\n vulnerability_resilience_id = lambda i: [],\n vulnerability_ids = lambda i: None,\n )", "def _get_default_options():\n return {\n \"library_folders\": [],\n \"verbose\": False,\n \"check_balanced\": True,\n \"mtime_check\": True,\n \"cache\": False,\n \"codegen\": False,\n \"expand_mx\": False,\n \"unroll_loops\": True,\n \"inline_functions\": True,\n \"expand_vectors\": False,\n \"resolve_parameter_values\": False,\n \"replace_parameter_expressions\": False,\n \"replace_constant_expressions\": False,\n \"eliminate_constant_assignments\": False,\n \"replace_parameter_values\": False,\n \"replace_constant_values\": False,\n \"eliminable_variable_expression\": None,\n \"factor_and_simplify_equations\": False,\n \"detect_aliases\": False,\n \"allow_derivative_aliases\": True,\n \"reduce_affine_expression\": False,\n }", "def get_bot_defaults():\n return {POST_TIMELIMIT: 86400,\n REDIS_BACKEND: 'redis',\n REDIS_URL: 'localhost',\n REDIS_PORT: 6379,\n REDIS_PASSWORD: None,\n ROLLBAR_ENVIRONMENT: 'staging'}", "def _default_options(cls):\n default_options = super()._default_options()\n default_options.data_processor = dp.DataProcessor(\n input_key=\"counts\",\n data_actions=[dp.Probability(\"1\"), dp.BasisExpectationValue()],\n )\n default_options.plotter.set_figure_options(\n xlabel=\"Flat top width\",\n ylabel=\"Pauli expectation values\",\n xval_unit=\"s\",\n ylim=(-1, 1),\n )\n default_options.data_subfit_map = {\n \"x\": {\"meas_basis\": \"x\"},\n \"y\": {\"meas_basis\": \"y\"},\n \"z\": {\"meas_basis\": \"z\"},\n }\n\n return default_options", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def __init__(self, collectd):\n self.collectd = collectd\n self.conf = self.default_config()\n self.types = {}\n\n collectd.info('Initialized MetricsConfig with default config %s' % self.conf)", "def default():\n return DefaultSwh.default()", "def loadDefaults(self):\n # (025) Merged into settings.RawSettings.\n pass", "def default(self):\n return self._configs[0] if len(self._configs) else None", "def default_options(cls):\n options_to_report = dict()\n for c in inspect.getmro(cls):\n parameter_names, _, _, defaults, _, _, _ = inspect.getfullargspec(c.__init__)\n if defaults:\n class_options = {parameter_name: defaults[index] for (index, parameter_name) in\n enumerate(parameter_names[-len(defaults):])}\n options_to_report.update(class_options)\n options_to_report.pop('mcmc_moves')\n return options_to_report", "def get_default_params():\n\n with IOTools.open_file(os.path.join(os.path.dirname(__file__),\n \"defaults.yml\")) as inf:\n result = yaml.load(inf, Loader=RoundTripLoader)\n return result", "def getCurrentSetting(self):\n return {}", "def GetDefaultLayerProperties():\r\n pass", "def defaults(self) -> Dict[str, Any]:\n if self._defaults is _missing:\n return {}\n return self._defaults", "def GetFileCleanerSettings():\n obj = ndb.Key(FileCleanerSettings, FILE_CLEANER_SETTINGS_ID).get()\n return obj or DEFAULT_FILE_CLEANER_SETTINGS", "def initDefaults(self):\n return _libsbml.Compartment_initDefaults(self)", "def settings(self) -> Dict[str, Any]:\n return {}", "def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}", "def default_mdc():\n MDC.put('instanceUUID', generate_uuid())\n MDC.put('InvocationID', generate_uuid())\n MDC.put('serviceName', 'OOF_OSDF')\n MDC.put('threadID', threading.currentThread().getName())\n default_server_info()\n MDC.put('requestID', generate_uuid())\n MDC.put('partnerName', 'N/A')\n MDC.put('entryTimestamp', get_time())", "def settings():\n return _get_settings()[1]", "def getSettings(self):\n return self.cfg", "def get_defaults(cls, mode):\n mode_defaults = getattr(cls, f\"{mode.upper()}_DEFAULTS\")\n defaults = PlotConfig({**cls.COMMON_DEFAULTS, **mode_defaults})\n return defaults", "def get_defaults(cls, mode):\n mode_defaults = getattr(cls, f\"{mode.upper()}_DEFAULTS\")\n defaults = PlotConfig({**cls.COMMON_DEFAULTS, **mode_defaults})\n return defaults", "def default_space_settings(self) -> pulumi.Output[Optional['outputs.DomainDefaultSpaceSettings']]:\n return pulumi.get(self, \"default_space_settings\")", "def default_user_settings(self) -> pulumi.Input['DomainUserSettingsArgs']:\n return pulumi.get(self, \"default_user_settings\")", "def getDefaultProperties(self):\n return _libsbml.SBMLStripPackageConverter_getDefaultProperties(self)", "def requested_config_vals():\n return {} # no extra values needed", "def get_config(self):\n return {}", "def get_settings(self):\n return [('test_environment', self.test_environment),\n ('base_data_dir', self.base_data_dir),\n ('locale', self.locale)]", "def default_setting(self):\n\t\tdo_log = False if self.debug else True\n\t\tdo_validation, do_summary = False, False\n\t\tlog_step = 2\n\t\tepochs = 50\n\t\tvali_k = 5\n\n\t\t'''on the usage of mask_label\n\t\t(1) given a supervised dataset, True means that mask a supervised data to mimic unsupervised data\n\t\t(2) given an unsupervised dataset, this setting is not supported, since it is already an unsupervised data\n\t\t'''\n\t\tmask_label = False\n\t\tif mask_label:\n\t\t\tassert not self.data_id in MSLETOR_SEMI\n\t\t\tmask_ratio = 0.1\n\t\t\tmask_type = 'rand_mask_rele'\n\t\telse:\n\t\t\tmask_ratio = None\n\t\t\tmask_type = None\n\n\t\t# more evaluation settings that are rarely changed\n\t\tself.eval_dict = dict(debug=self.debug, grid_search=False, dir_output=self.dir_output,\n\t\t\t\t\t\t cutoffs=[1, 3, 5, 10, 20, 50], do_validation=do_validation, vali_k=vali_k,\n\t\t\t\t\t\t do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=False, epochs=epochs,\n\t\t\t\t\t\t mask_label=mask_label, mask_ratio=mask_ratio, mask_type=mask_type)\n\n\t\treturn self.eval_dict" ]
[ "0.70514023", "0.7008024", "0.69490683", "0.6713433", "0.65743333", "0.65309024", "0.6521635", "0.6515402", "0.6466526", "0.6447688", "0.641337", "0.6397818", "0.6380512", "0.63683224", "0.6357208", "0.6331759", "0.63120294", "0.62353724", "0.6231899", "0.62074906", "0.62016135", "0.61602294", "0.6106154", "0.6076398", "0.6072927", "0.6063105", "0.60533017", "0.6046588", "0.6046588", "0.6041885", "0.6022285", "0.6020986", "0.60192424", "0.60192394", "0.60078186", "0.60078186", "0.5988933", "0.5988933", "0.5987302", "0.5984581", "0.5971808", "0.5951448", "0.5920321", "0.5920321", "0.5900819", "0.59000623", "0.58790654", "0.5875652", "0.5875138", "0.5873527", "0.58605236", "0.5854432", "0.5835316", "0.583276", "0.5827835", "0.57966465", "0.5777822", "0.5773094", "0.5771888", "0.5764782", "0.57463074", "0.57401544", "0.57229847", "0.5710442", "0.5693394", "0.56896967", "0.56836766", "0.5672696", "0.5672696", "0.56634986", "0.5638263", "0.5632524", "0.562809", "0.5626661", "0.55995303", "0.5595934", "0.5587151", "0.55831826", "0.5559209", "0.5543885", "0.5537782", "0.55352116", "0.55266076", "0.55207926", "0.5513653", "0.55127424", "0.55040306", "0.5495367", "0.54920954", "0.54761785", "0.54674256", "0.5466208", "0.5466208", "0.5465693", "0.5456497", "0.5450436", "0.54363585", "0.5425607", "0.54209834", "0.54141355" ]
0.63517016
15
Setup authorization for whole API. Can be redefined for an endpoint. OpenAPI Authorization Specs
async def authorization(request): # Decode tokens, load/check users and etc # ... # in the example we just ensure that the authorization header exists return request.headers.get("authorization", "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authorization():\n pass", "def authn_and_authz():\n authentication()\n authorization()", "def ProcessApiAuthorization(self, msg):\n policy = self.server.GetPolicies()\n\n # Return the auth code from the config file if it's defined. Default to an\n # empty auth code, which will instruct the enrollment flow to skip robot\n # auth setup.\n response = dm.DeviceManagementResponse()\n response.service_api_access_response.auth_code = policy.get(\n 'robot_api_auth_code', '')\n\n return (200, response)", "def test_apis_wo_auth(self):\n\n # Order list API\n url = reverse('orders-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order summary API\n url = reverse('order-summary-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order create API\n url = reverse('orders-list')\n response = self.client.post(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Shares list/summary API\n url = reverse('shares-list', args=['summary'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n url = reverse('shares-list', args=['all'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n response = new_client.get('/posts/', kwargs={'pk': 3}, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_patch_o_auth_client_authorization(self):\n pass", "def authorize(self, service_id, operation_id, ctx):\n raise NotImplementedError", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n res = new_client.get('/bucketlists/', kwargs={'pk': 2}, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_replace_o_auth_client_authorization(self):\n pass", "def fusion_api_check_authorization(self, body=None, api=None, headers=None, sessionID=None):\n return self.auth.check(body=body, api=api, headers=headers, sessionID=sessionID)", "def test_vault_create_authorization_for_vault_section(self):\n pass", "def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()", "def authorize(self, http):\n return http", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def test_create_o_auth_client_authorization(self):\n pass", "def _enforce_authorization(self, **kwargs):\n # Get the env\n env_dict = kwargs.get('env')\n\n # Although it may already be set in the env, just override in case it was only set via command line or config\n # Convert to string since execve() (called by Popen in base classes) wants string values.\n env_dict['EG_IMPERSONATION_ENABLED'] = str(self.impersonation_enabled) # TODO - Leave EG_ for kernelspec?\n\n # Now perform authorization checks\n if self.kernel_username in self.unauthorized_users:\n self._raise_authorization_error(\"not authorized\")\n\n # If authorized users are non-empty, ensure user is in that set.\n if self.authorized_users.__len__() > 0:\n if self.kernel_username not in self.authorized_users:\n self._raise_authorization_error(\"not in the set of users authorized\")", "def _setup_api_properties(self):\n self.implicit_api_logical_id = GeneratedLogicalId.implicit_http_api()\n self.implicit_api_condition = \"ServerlessHttpApiCondition\"\n self.api_event_type = \"HttpApi\"\n self.api_type = SamResourceType.HttpApi.value\n self.api_id_property = \"ApiId\"\n self.editor = OpenApiEditor", "def test_public_status_page_add_authorization_to_public_status_page(self):\n pass", "def __init__(self):\n\n self._authorize()", "def test_add_authz(self):\n self.app.post_json(\"/config/authorize\", params={\n 'dn': '/DN=a.test.user',\n 'operation': 'config'\n }, status=200)\n\n audits = Session.query(ConfigAudit).all()\n self.assertEqual(1, len(audits))\n\n authz = Session.query(AuthorizationByDn).get(('/DN=a.test.user', 'config'))\n self.assertIsNotNone(authz)", "def get_authorization():\n return True", "def setUp(self):\n self.app = app.test_client()\n self.headers = {\n 'Content-Type':'application/json',\n 'Authorization': 'Basic %s' % b64encode(b\"relpek:puorg\").decode(\"ascii\")\n }", "def ODP_authorization(api_key, secret_key):\n \n # Make an auth object\n socrata_auth = Authorization(\n \"internaldata.sandag.org\",\n api_key,\n secret_key\n )\n\n # Authenticate into the domain\n client = Socrata(socrata_auth)\n\n http_auth = HTTPBasicAuth(api_key, secret_key)\n sess = requests.Session()\n sess.auth = http_auth\n\n # client is used for ODP Socrata-py and sess is for Request gets and pushes\n return client, sess", "async def authorization_endpoint(\n req: Request,\n user: User = Security(current_user),\n scope: str = Query(\n ...,\n description='OpenID Connect requests MUST contain the \"openid\" scope value. '\n \"If the openid scope value is not present, the behavior is entirely \"\n \"unspecified. Other scope values MAY be present. Scope values used that are \"\n \"not understood will be ignored.\",\n regex=r\"\\bopenid\\b\",\n ),\n response_type: str = Query(\n ...,\n description=\"OAuth 2.0 Response Type value that determines the authorization \"\n \"processing flow to be used, including what parameters are returned from the \"\n \"endpoints used. Because only the Authorization Code Flow is supported, this \"\n 'value MUST be \"code\".',\n regex=r\"^code$\",\n ),\n client_id: str = Query(\n ...,\n description=\"OAuth 2.0 Client Identifier valid at the Authorization Server.\",\n ),\n redirect_uri: str = Query(\n ...,\n description=\"Redirection URI to which the response will be sent. This URI MUST \"\n \"exactly match one of the Redirection URI values for the Client pre-registered \"\n \"at the OpenID Provider. The Redirection URI MAY use an alternate scheme, such \"\n \"as one that is intended to identify a callback into a native application.\",\n ),\n state: str = Query(\n None,\n description=\"RECOMMENDED. Opaque value used to maintain state between the \"\n \"request and the callback. Typically, Cross-Site Request Forgery (CSRF, XSRF) \"\n \"mitigation is done by cryptographically binding the value of this parameter \"\n \"with a browser cookie.\",\n ),\n idp: str = Query(\n None,\n description=\"OPTIONAL. Case sensitive ASCII strin that specifies the selected \"\n \"IdP the Authorization Server should use to authenticate the user.\",\n ),\n idp_params: str = Query(\n None, description=\"OPTIONAL. URL-encoded query string to pass to the IdP.\"\n ),\n nonce: str = Query(\n None,\n description=\"OPTIONAL. String value used to associate a Client session with \"\n \"an ID Token, and to mitigate replay attacks. The value is passed through \"\n \"unmodified from the Authentication Request to the ID Token. Sufficient \"\n \"entropy MUST be present in the nonce values used to prevent attackers from \"\n \"guessing values.\",\n ),\n prompt: str = Query(\n None,\n description=\"OPTIONAL. Space delimited, case sensitive list of ASCII string \"\n \"values that specifies whether the Authorization Server prompts the End-User \"\n \"for reauthentication and consent. If json is in prompt, a JSON response will \"\n \"be returned with necessary information to continue the authentication.\",\n ),\n code_challenge: str = Query(\n None,\n description=\"A PKCE (RFC7636) challenge derived from the code verifier to be \"\n \"verified against later.\",\n ),\n code_challenge_method: str = Query(\n None, description=\"A method that was used to derive code challenge.\"\n ),\n):\n try:\n request = await auth.create_oauth2_request(req)\n request.user = user\n await auth.get_authorization_grant(request).validate_consent_request()\n except OAuth2Error as error:\n return JSONResponse(dict(error.get_body()), status_code=error.status_code)\n\n prompt = request.data.get(\"prompt\", \"\")\n prompts = prompt.split()\n if \"json\" in prompts or config.DEBUG and config.USE_DEMO_LOGIN:\n now = int(time.time())\n token = jwt.encode(\n dict(alg=config.JWT_ALGORITHM),\n dict(\n iss=config.JWT_ISSUER,\n iat=now,\n exp=now + config.LOGIN_CONTEXT_TTL,\n ctx=request.data,\n ),\n config.JWT_PRIVATE_KEY,\n ).decode(\"ASCII\")\n if \"json\" not in prompts:\n url = req.url_for(\"demo_login\") + \"?token=\" + token\n return HTMLResponse(f'<html><body><a href=\"{url}\">{url}</a></body></html>')\n return dict(scope=request.scope, params=idp_params, context=token)\n\n return JSONResponse(\n dict(error=\"Not implemented\"), status_code=status.HTTP_501_NOT_IMPLEMENTED\n )", "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def before(self):\n access_token = self.request.header(\"HTTP_AUTHORIZATION\")\n\n if utils.validate_token(access_token) is True:\n token = re.sub(\"Bearer \", \"\", access_token)\n creator_info = utils.decode_token(token)\n if creator_info != False:\n creator_user = User.find(creator_info.get(\"id\"))\n self.request.set_user(creator_user)\n else:\n self.response.json({\"error\": \"Unauthorized access\"})\n \n if utils.validate_token(access_token) is not True:\n self.response.json({\"error\": \"Unauthorized access\"})", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def configure_apispec(app):\n pass", "def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)", "def authorize(func):\n def func_wrapper(*args):\n from mongo import connect\n event = args[0]\n context = args[1]\n e = None\n try:\n acl = connect('acl').find_by_auth(auth_key(event))\n if not acl:\n e = RestException(\"Unauthorized\", 403)\n is_authorized(acl, event, context)\n except RestException as e:\n return func(*args, exception=e)\n return func(*args, exception=e or None)\n return func_wrapper", "def test_read_o_auth_client_authorization(self):\n pass", "def __init__(self, authtoken, organization_id):\n self.headers = {\n 'Authorization': 'Zoho-oauthtoken ' + authtoken,\n }\n self.details = {\n 'organization_id': organization_id\n }", "def test_authorization_required(self, method):\n self.user.user_permissions.clear()\n\n response = getattr(self.client, method)(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 403", "def authorize(self) -> None:\n\n if not self.login_secret:\n #TODO trigger error\n self.login()\n \n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.access_token = sObj.getAccessToken(\n self.oauth_token,\n self.login_secret,\n self.oauth_verifier\n )", "def authorization(self) -> Optional['outputs.AuthorizationResponse']:\n return pulumi.get(self, \"authorization\")", "def main():\n\n config_file = 'auth_demo.cfg'\n config = ConfigParser.SafeConfigParser({\n 'username':'',\n })\n config.read(config_file)\n if not config.has_section('auth_demo_login'):\n config.add_section('auth_demo_login')\n\n username = config.get('auth_demo_login','username')\n password = None\n if username != '':\n password = keyring.get_password('auth_demo_login', username)\n\n if password == None or not auth(username, password):\n\n while 1:\n username = raw_input(\"Username:\\n\")\n password = getpass.getpass(\"Password:\\n\")\n\n if auth(username, password):\n break\n else:\n print \"Authorization failed.\"\n \n # store the username\n config.set('auth_demo_login', 'username', username)\n config.write(open(config_file, 'w'))\n\n # store the password\n keyring.set_password('auth_demo_login', username, password)\n\n # the stuff that needs authorization here\n print \"Authorization successful.\"", "def authorize(self):\n\n\t\tprint 'Authorizing...'\n\n\t\tif self.youtube:\n\t\t\tprint 'Already authorized'\n\t\t\treturn False\n\n\t\tself.youtube = build(self.YOUTUBE_API_SERVICE_NAME, \n\t\t\t\t\t\t self.YOUTUBE_API_VERSION,\n \t\t\t\t\t\t developerKey=self.DEVELOPER_KEY)", "def add_basic_auth(blueprint: Blueprint, username, password, realm='api'):\n\n @blueprint.before_request\n def basic_http_auth(*args, **kwargs):\n auth = request.authorization\n if auth is None or auth.password != password or auth.username != username:\n return Response('Please login', 401, {'WWW-Authenticate': f'Basic realm=\"{realm}\"'})", "def test_basic_api(self):\n self.create_and_verify_stack(\"single/basic_api\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n self.set_template_resource_property(\"MyApi\", \"DefinitionUri\", self.get_s3_uri(\"swagger2.json\"))\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def fill_authorization_basic(user, pass_word):\n return user, pass_word", "def authn_and_authz(authn_callback=None, authz_callback=None):\n authentication(authn_callback)\n authorization(authz_callback)", "def test_list_o_auth_client_authorization(self):\n pass", "def authorizeClients(self):\n pass", "def __init__(self, api_secret):\r\n self.apiroot = 'https://api.segment.io/v1'\r\n self.api_secret = api_secret\r\n\r\n self.add_filter(self.add_authorization)\r\n self.add_filter(self.use_json)", "def test_authorization_one_call(self, mock_init, mock_get_token):\n creds = credentials.Credentials('file')\n # On real init we would have had access_token set to None\n creds.access_token = None\n\n auth = creds.authorization\n self.assertEqual('Bearer access_token1', auth)\n mock_get_token.assert_called_once_with(creds)", "def authenticate(self):\n\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'ClientId': self.client_id,\n }\n self.headers.update(headers)", "def patch_namespaced_o_auth_client_authorization(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_o_auth_client_authorization\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_o_auth_client_authorization`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_o_auth_client_authorization`\")\n\n resource_path = '/oapi/v1/oauthclientauthorizations/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1OAuthClientAuthorization',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)", "def __init__(__self__, *,\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n source: pulumi.Input['AuthorizationPolicySpecSourceArgs'],\n target: pulumi.Input['AuthorizationPolicySpecTargetArgs']):\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"target\", target)", "def test_vault_get_authorizations_for_vault_section(self):\n pass", "def set_requests_auth(self):\n self.__auth = OAuth2(token=self.bearer_token)", "def _authorize(self):\n auth = tweepy.OAuthHandler(self.keys[\"consumer_key\"], self.keys[\"consumer_secret\"])\n auth.set_access_token(self.keys[\"access_token\"], self.keys[\"access_token_secret\"])\n return tweepy.API(auth)", "def create_authorizer(self, ApiId: str, AuthorizerType: str, AuthorizerUri: str, IdentitySource: List, Name: str, AuthorizerCredentialsArn: str = None, AuthorizerResultTtlInSeconds: int = None, IdentityValidationExpression: str = None, ProviderArns: List = None) -> Dict:\n pass", "def requires_admin(original_route):\n @functools.wraps(original_route)\n def wrapper(config, *args, **kwargs):\n if \"Authorization\" not in request.headers:\n raise MissingAuthorization()\n auth_header_value = request.headers[\"Authorization\"]\n if not auth_header_value.startswith(\"Bearer \"):\n raise InvalidAuthHeaderValue()\n auth_token_value = auth_header_value.split(\"Bearer \", 1)[1]\n tokens = get_valid_admin_tokens()\n if auth_token_value not in tokens:\n raise InvalidAuthorization()\n return original_route(config, *args, **kwargs)\n return wrapper", "def init_authsets(clean_db, api_client_mgmt):\n return do_init_authsets(api_client_mgmt)", "def __init__(self, authenticator, access_token, expires_in, scope):\n super(ImplicitAuthorizer, self).__init__(authenticator)\n self._expiration_timestamp = time.time() + expires_in\n self.access_token = access_token\n self.scopes = set(scope.split(' '))", "def authorize(self, request, **kwargs):\n return True", "def __init__(self, app_id, api_key):\r\n self.apiroot = 'https://api.intercom.io/v1'\r\n\r\n self.add_filter(auth.BasicAuth(app_id, api_key))\r\n self.add_filter(self.use_json)", "def test_auto_auth(mock_auth_approle, mock_is_authenticated, parser_dump_function, parser_name):\n creds = {\"vault_creds\": {\"auth_method\": \"approle\", \"role_id\": \"test_jim_bob\", \"secret_id\": \"test_123password\"}}\n mock_is_authenticated.return_value = False\n\n VaultAnyConfig(url=\"http://localhost\").auto_auth(parser_dump_function(creds), ac_parser=parser_name)\n\n mock_auth_approle.assert_called_with(\n role_id=creds[\"vault_creds\"][\"role_id\"], secret_id=creds[\"vault_creds\"][\"secret_id\"]\n )\n mock_is_authenticated.assert_called_with()", "def setUp(self):\n # Arrange\n self.client = APIClient()\n user_authlist_obj = {\n \"username\": \"user-authlist\",\n \"password\": \"password\",\n \"email\": \"test@user.cl\",\n \"first_name\": \"user-authlist\",\n \"last_name\": \"\",\n }\n user_normal_obj = {\n \"username\": \"user-normal\",\n \"password\": \"password\",\n \"email\": \"test@user.cl\",\n \"first_name\": \"user-normal\",\n \"last_name\": \"\",\n }\n self.user_authlist = User.objects.create_user(\n username=user_authlist_obj[\"username\"],\n password=user_authlist_obj[\"password\"],\n email=user_authlist_obj[\"email\"],\n first_name=user_authlist_obj[\"first_name\"],\n last_name=user_authlist_obj[\"last_name\"],\n )\n self.user_authlist.user_permissions.add(\n Permission.objects.get(name=\"Access and resolve AuthList requests\")\n )\n self.user_normal = User.objects.create_user(\n username=user_normal_obj[\"username\"],\n password=user_normal_obj[\"password\"],\n email=user_normal_obj[\"email\"],\n first_name=user_normal_obj[\"first_name\"],\n last_name=user_normal_obj[\"last_name\"],\n )\n self.token_user_authlist = Token.objects.create(user=self.user_authlist)\n self.token_user_normal = Token.objects.create(user=self.user_normal)\n\n self.user_authlist_host = f\"{self.user_authlist.username}@localhost\"\n self.user_normal_host = f\"{self.user_normal.username}@localhost\"\n self.user_external_host = \"user-external@localhost\"\n\n self.req1 = CSCAuthorizationRequest.objects.create(\n user=self.user_normal,\n cscs_to_change=\"Test:1\",\n authorized_users=f\"+{self.user_normal_host},-{self.user_external_host}\",\n unauthorized_cscs=\"+ATPtg:0\",\n requested_by=self.user_normal_host,\n )\n\n self.req2 = CSCAuthorizationRequest.objects.create(\n user=self.user_authlist,\n cscs_to_change=\"Test:1\",\n authorized_users=f\"-{self.user_normal_host},-{self.user_external_host}\",\n unauthorized_cscs=\"+ATPtg:0\",\n requested_by=self.user_normal_host,\n )\n\n self.req2 = CSCAuthorizationRequest.objects.create(\n user=self.user_authlist,\n cscs_to_change=\"Test:1\",\n authorized_users=f\"+{self.user_external_host}\",\n unauthorized_cscs=\"-ATPtg:0\",\n requested_by=self.user_normal_host,\n )", "def __init__(self, api_token):\r\n self.apiroot = 'https://api.pipedrive.com/v1'\r\n self.api_token = api_token\r\n self.add_filter(self.add_auth)", "def setup_oauth():\n # Request token\n oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)\n r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # Authorize\n authorize_url = AUTHORIZE_URL + resource_owner_key\n print 'Please go here and authorize: ' + authorize_url\n\n verifier = raw_input('Please input the verifier: ')\n oauth = OAuth1(CONSUMER_KEY,\n client_secret=CONSUMER_SECRET,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret,\n verifier=verifier)\n\n # Finally, Obtain the Access Token\n r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n token = credentials.get('oauth_token')[0]\n secret = credentials.get('oauth_token_secret')[0]\n\n return token, secret", "def api_authentication():\r\n apikey = request.args.get('api_key', None)\r\n from flask import _request_ctx_stack\r\n if 'Authorization' in request.headers:\r\n apikey = request.headers.get('Authorization')\r\n if apikey:\r\n user = db.session.query(model.user.User).filter_by(api_key=apikey).first()\r\n ## HACK:\r\n # login_user sets a session cookie which we really don't want.\r\n # login_user(user)\r\n if user:\r\n _request_ctx_stack.top.user = user", "def get_authorization(self):\n raise NotImplemented()", "def authorize_answer(context, answer):\n context.security_answer = answer\n context.execute_steps('Given I run the authorize command')", "def test_get_admin_authorized(app, client):\n credentials = base64.b64encode(\n f'{app.config[\"BASIC_AUTH_USERNAME\"]}:'\n f'{app.config[\"BASIC_AUTH_PASSWORD\"]}'.encode()\n ).decode()\n response = client.get(\n \"/admin/\", headers={\"Authorization\": f\"Basic {credentials}\"}\n )\n assert response.status_code == 200", "def authorized(fn):\n\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n # Unauthorized\n print(\"No token in header\")\n abort(401)\n\n\n if key not in request.headers['Authorization']:\n # Unauthorized\n print(\"Key not in auth header\")\n abort(401)\n\n return fn(*args, **kwargs)\n return _wrap", "def auth(mocker, app: FastAPI, faker: Faker) -> HTTPBasicAuth:\n # mock engine if db was not init\n if app.state.settings.API_SERVER_POSTGRES is None:\n engine = mocker.MagicMock()\n engine.minsize = 1\n engine.size = 10\n engine.freesize = 3\n engine.maxsize = 10\n app.state.engine = engine\n\n # patch authentication entry in repo\n faker_user_id = faker.pyint()\n\n # NOTE: here, instead of using the database, we patch repositories interface\n mocker.patch(\n \"simcore_service_api_server.db.repositories.api_keys.ApiKeysRepository.get_user_id\",\n autospec=True,\n return_value=faker_user_id,\n )\n mocker.patch(\n \"simcore_service_api_server.db.repositories.users.UsersRepository.get_user_id\",\n autospec=True,\n return_value=faker_user_id,\n )\n mocker.patch(\n \"simcore_service_api_server.db.repositories.users.UsersRepository.get_email_from_user_id\",\n autospec=True,\n return_value=faker.email(),\n )\n\n # patches simcore_postgres_database.utils_products.get_default_product_name\n mocker.patch(\n \"simcore_service_api_server.api.dependencies.application.get_default_product_name\",\n autospec=True,\n return_value=\"osparc\",\n )\n\n return HTTPBasicAuth(faker.word(), faker.password())", "def setup(self):\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time. ONLY NEED To AUTH Once\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as self.token:\n self.creds = pickle.load(self.token)\n # If there are no (valid) credentials available, let the user log in.\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n self.flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n self.creds = self.flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as self.token:\n pickle.dump(self.creds, self.token)\n\n self.service = build('calendar', 'v3', credentials=self.creds)", "def authorized(self):\n pass", "def test_patch_o_auth_authorize_token(self):\n pass", "def enable_auth(self):\n\n self._api_manager.enable_auth()", "def authorizations(self) -> Sequence['outputs.AuthorizationResponse']:\n return pulumi.get(self, \"authorizations\")", "def authorize_user(case: APITestCase, user: User):\n\n token = Token.objects.create(user=user)\n case.client.credentials(HTTP_AUTHORIZATION=f'Token {token}')", "def authorization_rule(self) -> Optional[pulumi.Input['EventhubSpecAuthorizationRuleArgs']]:\n return pulumi.get(self, \"authorization_rule\")", "def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)", "def test_auth_private(self):\n self.do_visible(True, None, False, tenant='froggy')", "def __init__(__self__, *,\n authorization_strategy: pulumi.Input['FhirDatastoreIdentityProviderConfigurationAuthorizationStrategy'],\n fine_grained_authorization_enabled: Optional[pulumi.Input[bool]] = None,\n idp_lambda_arn: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"authorization_strategy\", authorization_strategy)\n if fine_grained_authorization_enabled is not None:\n pulumi.set(__self__, \"fine_grained_authorization_enabled\", fine_grained_authorization_enabled)\n if idp_lambda_arn is not None:\n pulumi.set(__self__, \"idp_lambda_arn\", idp_lambda_arn)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)", "def async_setup(self, conf):\n if conf.get(VERIFICATION_CODE_KEY):\n return\n if conf.get(CONF_ACCESS_TOKEN_CACHE_FILE) is None:\n conf[\n CONF_ACCESS_TOKEN_CACHE_FILE\n ] = f\".{conf[CONF_USERNAME]}{DEFAULT_AUGUST_CONFIG_FILE}\"\n self._config = conf\n\n self._api = Api(\n timeout=self._config.get(CONF_TIMEOUT), http_session=self._api_http_session,\n )\n\n self._authenticator = Authenticator(\n self._api,\n self._config[CONF_LOGIN_METHOD],\n self._config[CONF_USERNAME],\n self._config[CONF_PASSWORD],\n install_id=self._config.get(CONF_INSTALL_ID),\n access_token_cache_file=self._hass.config.path(\n self._config[CONF_ACCESS_TOKEN_CACHE_FILE]\n ),\n )", "def step_impl(context):\n fields = {\n 'response_type': 'code',\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n 'scope': context.vendor_config['versioned_auth']['scope'],\n 'state': uuid.uuid4(),\n }\n\n fields.update(dict(context.table))\n\n authorize_uri = get_authorize_uri(context)\n\n response = requests.get(authorize_uri,\n params=fields,\n allow_redirects=False,\n timeout=5)\n\n context.response = response", "def authorization_url(self): # pragma: no cover\n raise NotImplementedError()", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "async def test_requires_authorization(method: str, path: str, spawn_client):\n client = await spawn_client()\n\n if method == \"GET\":\n resp = await client.get(path)\n elif method == \"POST\":\n resp = await client.post(path, {})\n elif method == \"PATCH\":\n resp = await client.patch(path, {})\n else:\n resp = await client.delete(path)\n\n assert await resp.json() == {\n \"id\": \"unauthorized\",\n \"message\": \"Requires authorization\",\n }\n\n assert resp.status == 401", "def test_auth_public(self):\n self.do_visible(True, None, True, tenant='froggy')", "def test_get_rbac_authorization_api_group(self):\n pass", "def authorize(event, context):\n token = event['authorizationToken']\n log.debug(\"Token: {}\".format(token))\n principalId = token\n context = {\n 'simpleAuth': True,\n }\n\n table = dynamodb.Table(os.environ['ACCESSTOKENS_TABLE'])\n dbresponse = table.scan(\n FilterExpression=Attr('token').eq(token)\n )\n if len(dbresponse['Items']) == 1:\n if dbresponse['Items'][0]['enabled'] == True:\n policy = generatePolicy('allow', event['methodArn'])\n context['user'] = dbresponse['Items'][0]['name']\n else:\n policy = generatePolicy('deny', event['methodArn'])\n else:\n # Check if metasmoke has a new token matching this one\n url = \"https://metasmoke.erwaysoftware.com/smoke_detector/check_token/{}\".format(token)\n with urlopen(url) as response:\n ms_response = json.load(response)\n if ms_response[\"exists\"]:\n # Add the token to our table\n \n item = {\n 'token': token,\n 'name': ms_response[\"location\"],\n 'created_at': ms_response[\"created_at\"],\n 'modified_by': ms_response[\"owner_name\"],\n 'modified_at': ms_response[\"updated_at\"],\n 'enabled': True\n }\n\n table.put_item(Item=item)\n\n # Allow the requests\n policy = generatePolicy('allow', event['methodArn'])\n context['user'] = item['name']\n else:\n # No token matches. Deny the request\n policy = generatePolicy('deny', event['methodArn'])\n\n response = {\n 'principalId': principalId,\n 'policyDocument': policy,\n 'context': context\n }\n log.debug(response)\n return response", "def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )", "def authorize(f):\n\n @wraps(f)\n def check_auth(*args, **kwargs):\n if not request.authorization:\n raise AuthError(\"Username / password required.\")\n user = current_app.auth.auth(\n request.authorization.username,\n request.authorization.password,\n )\n request.authorization = user\n\n return f(*args, **kwargs)\n\n return check_auth", "def zapi_perms(self, api_key, auth, needs_auth=False, target_username=None, target_media_id=None):\n\t\tself.log.debug(\"zapi_perms():\")\n\t\tself.log.debug(\"api_key: %s\" % api_key)\n\t\tself.log.debug(\"needs_auth: %s\" % needs_auth)\n\t\tself.log.debug(\"target_username: %s\" % target_username)\n\t\tself.log.debug(\"target_media_id: %s\" % target_media_id)\n\n\t\t# Get the info for the auth key (to make sure its valid)\n\t\td = self.get_key_info(api_key)\n\n\t\tdef act(valid):\n\t\t\tif not valid:\n\t\t\t\traise Fault, (5066, \"Invalid API key supplied\")\n\n\t\t\tinfo = {\n\t\t\t\t\"api_key\": api_key,\n\t\t\t\t\"perms\": 'public',\n\t\t\t\t\"authenticated\": False,\n\t\t\t\t\"is_contact\": False,\n\t\t\t\t\"userid\": None,\n\t\t\t\t\"target_userid\": 0,\n\t\t\t\t\"target_image_id\": 0\n\t\t\t}\n\n\t\t\tif not auth:\n\t\t\t\traise Fault, (5073, \"For anonymous access, use username:anonymous/password:anonymous\")\n\n\t\t\t\t# If the function requires auth, and none is present, bail\n\t\t\t\tif needs_auth and not auth:\n\t\t\t\t\traise Fault, (5066, \"This method requires an authorization token\")\n\n\t\t\t# test auth info\n\t\t\tif auth:\n\t\t\t\tif not isinstance(auth, dict):\n\t\t\t\t\t# no idea what they sent\n\t\t\t\t\traise Fault, (5067, \"Not sure what you're trying to do here!\")\n\t\t\t\tif not auth.has_key('username'):\n\t\t\t\t\traise Fault, (5070, \"Username is required\")\n\n\t\t\t\tif not auth.has_key('password') and not auth.has_key('token') and not auth.has_key('pswd_hash'):\n\t\t\t\t\traise Fault, (5071, \"Must supply either a password or an auth token\")\n\n\t\t\t\tusername = auth['username']\n\t\t\t\tinfo['username'] = username\n\n\t\t\t\tif username != \"anonymous\":\n\t\t\t\t\td_user = self.app.api.users.get_user_id(username)\n\t\t\t\t\td_user.addCallback(get_auth, auth, info)\n\t\t\t\t\treturn d_user\n\t\t\t\telse:\n\t\t\t\t\tif needs_auth:\n\t\t\t\t\t\traise Fault, (5066, \"This method requires authorization\")\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn resolve_args(info)\n\n\t\tdef get_auth(result, auth_ind, info):\n\t\t\tif result[0] != 0:\n\t\t\t\traise Fault, (6002, \"Internal ZAPI Error: %s\" % result[1])\n\n\t\t\tinfo['userid'] = result[1]\n\n\t\t\tif auth_ind.has_key('password'):\n\t\t\t\tpassword = auth['password']\n\t\t\t\td2 = self.app.api.users.check_authentication(info['userid'], password, 0)\n\t\t\telif auth.has_key('pswd_hash'):\n\t\t\t\tself.log.warning(\"checking a password hash: %s\" % auth['pswd_hash'])\n\t\t\t\tpswd_hash = auth['pswd_hash']\n\t\t\t\td2 = self.app.api.users.check_pswd_hash(info['userid'], pswd_hash)\n\t\t\telse:\n\t\t\t\t## we have an auth token\n\t\t\t\thash = auth['token']\n\t\t\t\td2 = self.app.api.users.check_authentication(info['userid'], None, 0, hash)\n\t\t\td2.addCallback(check_auth, info)\n\t\t\treturn d2\n\n\t\tdef resolve_args(info):\n\t\t\td3 = Deferred()\n\n\t\t\tif target_username:\n\t\t\t\td3.addCallback(get_target_user_id)\n\n\t\t\tif target_username and info['userid']:\n\t\t\t\td3.addCallback(get_contact)\n\t\t\t\n\t\t\tif target_media_id:\n\t\t\t\td3.addCallback(get_target_image_id)\n\n\t\t\td3.callback(info)\n\t\t\treturn d3\n\n\t\tdef check_auth(result, info):\n\n\t\t\tis_authd = False\n\t\t\tif result[0] != 0:\n\t\t\t\traise Fault, (5070, \"Auth creds (token or user/pass) are invalid\")\n\n\t\t\tauth_ind = result[1]\n\t\t\tis_authd = True\n\n\t\t\tinfo['authenticated'] = is_authd\n\t\t\treturn resolve_args(info)\n\n\t\tdef get_contact(info):\n\t\t\tif isinstance(target_username, (list, tuple)):\n\t\t\t\treturn info ## Can't check multiple targets\n\n\t\t\td8 = self.app.api.contacts.get_is_contact(info['target_userid'], info['userid'])\n\t\t\td8.addCallback(check_contact, info)\n\t\t\treturn d8\n\n\t\tdef check_contact(results, info):\n\t\t\tif results[0] == 0:\n\t\t\t\tinfo['is_contact'] = results[1]\n\t\t\t\treturn info\n\t\t\telse:\n\t\t\t\traise Fault(6001, \"Internal ZAPI error: %s\" % results[1])\n\n\t\tdef get_target_user_id(info):\n\t\t\tmulti_ids = []\n\n\t\t\tdef get_id(id_list, id, info):\n\t\t\t\td9 = self.app.api.users.get_user_id(id)\n\t\t\t\td9.addCallback(handle_multi_result, id_list)\n\t\t\t\treturn d9\n\n\t\t\tdef handle_multi_result(result, id_list):\n\t\t\t\tif result[0] == 0:\n\t\t\t\t\tid_list.append(result[1])\n\t\t\t\t\treturn id_list\n\t\t\t\telse:\n\t\t\t\t\traise Fault(6006, \"Internal ZAPI error: %s\" % result[1])\n\n\t\t\tif target_username and target_username != \"*ALL*\":\n\t\t\t\tif isinstance(target_username, (tuple, list)):\n\t\t\t\t\td5 = Deferred()\n\t\t\t\t\tfor id in target_username:\n\t\t\t\t\t\td5.addCallback(get_id, id, info)\n\t\t\t\t\td5.addCallback(lambda _: (0, _))\n\t\t\t\t\td5.callback(multi_ids)\n\t\t\t\telse:\n\t\t\t\t\td5 = self.app.api.users.get_user_id(target_username)\n\t\t\t\td5.addCallback(check_target_user_id, info)\n\t\t\t\treturn d5\n\t\t\telse:\n\t\t\t\treturn info\n\n\t\tdef check_target_user_id(results, info):\n\t\t\tif results[0] == 0:\n\t\t\t\tinfo['target_userid'] = results[1]\n\t\t\t\treturn info\n\t\t\telse:\n\t\t\t\traise Fault(6003, \"Internal ZAPI error: %s\" % results[1])\n\n\t\tdef get_target_image_id(info):\n\t\t\tmulti_ids = []\n\t\t\timage_user = 0\n\t\t\tif info['target_userid']:\n\t\t\t\timage_user = target_username\n\t\t\telif info['userid']:\n\t\t\t\timage_user = info['username']\n\t\t\telse:\n\t\t\t\timage_user = ''\n\n\t\t\tdef get_id(id_list, id, info):\n\t\t\t\td7 = self.app.api.images.get_image_id(image_user, id)\n\t\t\t\td7.addCallback(handle_multi_result, id_list)\n\t\t\t\treturn d7\n\n\t\t\tdef handle_multi_result(result, id_list):\n\t\t\t\tif result[0] == 0:\n\t\t\t\t\tid_list.append(result[1])\n\t\t\t\t\treturn id_list\n\t\t\t\telse:\n\t\t\t\t\traise Fault(6005, \"Internal ZAPI error: %s\" % result[1])\n\n\t\t\tif target_media_id and image_user:\n\t\t\t\tif isinstance(target_media_id, (tuple, list)):\n\t\t\t\t\td6 = Deferred()\n\t\t\t\t\tfor id in target_media_id:\n\t\t\t\t\t\td6.addCallback(get_id, id, info)\n\t\t\t\t\td6.addCallback(lambda _: (0, _))\n\t\t\t\t\td6.callback(multi_ids)\n\t\t\t\telse:\n\t\t\t\t\td6 = self.app.api.images.get_image_id(image_user, target_media_id)\n\t\t\t\td6.addCallback(check_target_image_id, info)\n\t\t\t\treturn d6\n\t\t\telse:\n\t\t\t\treturn info\n\n\t\tdef check_target_image_id(results, info):\n\t\t\tif results[0] == 0:\n\t\t\t\tinfo['target_image_id'] = results[1]\n\t\t\t\treturn info\n\t\t\telse:\n\t\t\t\traise Fault(6004, \"Internal ZAPI error: %s\" % results[1])\n\n\n\t\tdef handle_fail(fail):\n\t\t\tself.log.warning(fail.type)\n\t\t\tself.log.warning(fail.getErrorMessage())\n\t\t\tif fail.type != xmlrpclib.Fault:\n\t\t\t\traise Fault (6000, \"Internal ZAPI error: %s\" % fail)\n\t\t\telse:\n\t\t\t\traise fail\n\n\t\td.addCallback(act)\n\t\td.addErrback(handle_fail)\n\t\treturn d", "def __init__(__self__, *,\n authorizations: Sequence['outputs.AuthorizationResponse'],\n managed_by_tenant_id: str,\n managed_by_tenant_name: str,\n managee_tenant_id: str,\n managee_tenant_name: str,\n provisioning_state: str,\n description: Optional[str] = None,\n eligible_authorizations: Optional[Sequence['outputs.EligibleAuthorizationResponse']] = None,\n registration_definition_name: Optional[str] = None):\n pulumi.set(__self__, \"authorizations\", authorizations)\n pulumi.set(__self__, \"managed_by_tenant_id\", managed_by_tenant_id)\n pulumi.set(__self__, \"managed_by_tenant_name\", managed_by_tenant_name)\n pulumi.set(__self__, \"managee_tenant_id\", managee_tenant_id)\n pulumi.set(__self__, \"managee_tenant_name\", managee_tenant_name)\n pulumi.set(__self__, \"provisioning_state\", provisioning_state)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if eligible_authorizations is not None:\n pulumi.set(__self__, \"eligible_authorizations\", eligible_authorizations)\n if registration_definition_name is not None:\n pulumi.set(__self__, \"registration_definition_name\", registration_definition_name)", "def appprotect_setup(request, kube_apis, test_namespace) -> None:\n\n print(\"------------------------- Deploy logconf -----------------------------\")\n src_log_yaml = f\"{TEST_DATA}/ap-waf/logconf.yaml\"\n global log_name\n log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)\n\n print(\"------------------------- Create UserSig CRD resource-----------------------------\")\n usersig_name = create_ap_usersig_from_yaml(kube_apis.custom_objects, uds_crd_resource, test_namespace)\n\n print(f\"------------------------- Deploy dataguard-alarm appolicy ---------------------------\")\n src_pol_yaml = f\"{TEST_DATA}/ap-waf/{ap_policy_uds}.yaml\"\n global ap_pol_name\n ap_pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace)\n\n def fin():\n if request.config.getoption(\"--skip-fixture-teardown\") == \"no\":\n print(\"Clean up:\")\n delete_ap_policy(kube_apis.custom_objects, ap_pol_name, test_namespace)\n delete_ap_usersig(kube_apis.custom_objects, usersig_name, test_namespace)\n delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)\n\n request.addfinalizer(fin)", "def setup(app):\n app.add_config_value('http_status_codes_format',\n DEFAULT_HTTP_STATUS_CODES_FORMAT,\n True)\n app.add_config_value('http_status_codes_url',\n DEFAULT_HTTP_STATUS_CODES_URL,\n True)\n\n app.add_directive('http-status-codes-format',\n SetStatusCodesFormatDirective)\n app.add_role('http', http_role)", "def authorizations(self) -> Optional[Sequence['outputs.AuthorizationResponse']]:\n return pulumi.get(self, \"authorizations\")", "def __init__(self):\n self.headers = {\n 'Authorization': 'Bearer ' + app.config['SLACK_BEARER']\n }", "def _authenticate_for(self, resp):\n # Get the auth. info from the headers\n scheme, params = resp.headers['Www-Authenticate'].split(None, 1)\n assert (scheme == 'Bearer')\n info = {k: v.strip('\"') for k, v in (i.split('=')\n for i in params.split(','))}\n\n # Request a token from the auth server\n params = {k: v for k, v in info.items() if k in ('service', 'scope')}\n auth = HTTPBasicAuth(self.username, self.password)\n r2 = requests.get(info['realm'], params=params,\n auth=auth, verify=self.verify_ssl)\n\n if r2.status_code == 401:\n raise RuntimeError(\"Authentication Error\")\n r2.raise_for_status()\n\n self.auth = BearerAuth(r2.json()['token'])", "def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response", "def __call__(self, r):\n r.headers['Authorization'] = 'Bearer %s' % self.get_access_token()\n return r", "def init() -> None:\n appbuilder.add_permissions(update_perms=True)\n security_manager.sync_role_definitions()" ]
[ "0.6254318", "0.61565816", "0.60597587", "0.59296864", "0.58683383", "0.58554614", "0.5855057", "0.5846915", "0.57419527", "0.56949556", "0.56878364", "0.5677218", "0.5673581", "0.5643289", "0.5643289", "0.5630589", "0.56278825", "0.5616754", "0.5615052", "0.5611695", "0.55903226", "0.5556557", "0.5515315", "0.55130434", "0.55029887", "0.549835", "0.54905903", "0.5441305", "0.5428888", "0.5427452", "0.54133916", "0.5364706", "0.53617966", "0.5354688", "0.53385204", "0.5334897", "0.53135735", "0.5310342", "0.5294679", "0.5294589", "0.5294136", "0.52929854", "0.528576", "0.52792144", "0.52692825", "0.5264302", "0.5263799", "0.52591133", "0.5248883", "0.52386165", "0.5237469", "0.52305305", "0.52255356", "0.5224349", "0.5224255", "0.5204943", "0.52038836", "0.520198", "0.5179384", "0.5176388", "0.5173039", "0.5166653", "0.51416665", "0.5139223", "0.5136848", "0.5132355", "0.51304543", "0.5128235", "0.5127593", "0.51155746", "0.5114927", "0.5114031", "0.5107379", "0.5100901", "0.50969005", "0.50808024", "0.5079756", "0.5073269", "0.5073006", "0.5071465", "0.5070542", "0.5066379", "0.5058974", "0.5048388", "0.5041643", "0.5035604", "0.50306475", "0.50272906", "0.50247324", "0.50236696", "0.5021438", "0.50144863", "0.50105554", "0.5007925", "0.5005584", "0.50054884", "0.50044775", "0.500044", "0.4985871", "0.49845162" ]
0.49864846
98
A simple endpoint to get current API token. By default authorization is only awailable for class based endpoints. So the endpoint supports anonimous access. If you would like to use API authorization for the simple endpoints, you have to
async def token(request) -> ResponseText: return ResponseText( "".join(random.choices(string.ascii_uppercase + string.digits, k=42)) # noqa: S311 )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def auth(self):\n return self.api(self.token)", "def get(self):\n if current_user and not current_user.is_anonymous:\n user = current_user\n tok = Token(user, 3600)\n return tok\n return jsonify({404: 'User not found'})", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def api_token(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_token\")", "async def token(request: Request):\n return get_token()", "def authorization(self):\n token = self.create_auth_token(\n self.api_key.user, self.api_key.key, self.api_key.secret\n )\n return f'JWT {token}'", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def get_access_token():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n elif request.method == \"POST\":\n # Authenticate\n auth = Authorization()\n response = auth.post()\n return render_template(\"index.html\", data=response[0])", "def get(self):\n # Login of authorized user stores in Flask g object\n user = User.query.filter_by(username=g.user.username).first()\n # Generate token\n token = user.generate_auth_token()\n # Send token in ASCII format\n return {'token': token.decode('ascii')}", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def auth_token(self):", "def get_auth_token(cls):\n return jsonify({\n 'user': current_user.serialize(),\n 'token': current_user.get_auth_token(),\n })", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()", "def get_api_token(self):\n integration_context = demisto.getIntegrationContext()\n api_token = integration_context.get('api_token')\n valid_until = integration_context.get('valid_until')\n\n # Return api token from integration context, if found and not expired\n if api_token and valid_until and time.time() < valid_until:\n demisto.debug('Retrieved api-token from integration cache.')\n return api_token\n\n headers = {'Accept': CONTENT_TYPE_JSON}\n\n demisto.debug('Calling authentication API for retrieve api-token')\n resp = self.http_request(\n method='POST', url_suffix=URL_SUFFIX['GET_TOKEN'], headers=headers\n )\n integration_context = self.set_integration_context(resp)\n\n return integration_context.get('api_token')", "def get_api_token(self, app, user, pwd):\n authorization = ('Basic ' + base64.b64encode(user + \":\" + pwd))\n api_token_resp = app.post('/v1/api_token', headers={'Authorization': authorization})\n if api_token_resp.status != '200 OK':\n raise ValueError(api_token_resp.status)\n api_token = json.loads(api_token_resp.data)['api_token']\n return api_token", "def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def _get_token(self):\n return user.get_token()", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def decorated(*args, **kwargs):\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def token_endpoint_auth_method(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"token_endpoint_auth_method\")", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response", "def token_type(self):\n return 'Bearer'", "def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token", "def current_user(request, token):\n try:\n user = Token.objects.get(key=token).user\n except ObjectDoesNotExist:\n return Response(\n {\"Bad request\": \"Token does not correspond to an existing user\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n return Response(UserReadOnlySerializer(user).data)", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated", "def BearerToken():\n\t\tif bpformation._bearer_token: return(bpformation._bearer_token)\n\n\t\tif not bpformation._CONTROL_COOKIES: \n\t\t\tWeb._LoginScrape()\n\n\t\t# Ping - validate if we need to login\n\t\ttry:\n\t\t\tr = bpformation.web.CallScrape(\"GET\",\"/\")\n\t\t\tif not re.search(\"<title> Control Portal Dashboard </title>\",r.text):\n\t\t\t\traise(bpformation.BPFormationLoginException)\n\t\texcept requests.exceptions.ConnectionError:\n\t\t\traise\n\t\t\traise(bpformation.BPFormationLoginException)\n\t\t\n\t\t# Extract token\n\t\tm = re.search(\"\"\"shell.user.set\\(\\{\"token\":\"(.+?)\",\"userName\":\"(.+?)\"\\}\\);\"\"\",r.text)\n\t\tusername = m.group(2)\n\t\ttoken = m.group(1)\n\n\t\treturn(token)", "def get(auth_token):\n session = requests.Session()\n session.headers.update({'Accept': 'application/json',\n 'Authorization': 'Bearer %s' % auth_token})\n return FlattrApi(session)", "def get_token(self) -> None:\n with self._lock:\n if not self._endpoint:\n raise AuthenticationTokenError(\n 'Token is invalid and endpoint (auth_endpoint) for obtaining is not set.')\n\n url = self._endpoint + '/app'\n data = {\n \"client_id\": self._client_id,\n \"client_secret\": self._client_secret,\n \"username\": self._username,\n \"password\": self._password\n }\n\n res = self.post(url, data)\n self._token_info.parse_token_result(res, 'Get token')", "def get_token(): \n \n # Token url\n token_endpoint = \"https://api.signicat.io/oauth/connect/token\"\n # Setting the grant type to client_credentials\n data = {'grant_type':'client_credentials', 'scope':'identify'}\n # Posting to token url with HTTP basic authentication\n token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))\n # Converting json string to json\n token_json = json.loads(token.text)\n \n # Returning the access_token\n return token_json['access_token']", "def get_basic_auth_token(self):\n return f'Basic {self.key}'", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def get_token():\n if not request.is_json:\n return jsonify({\"msg\": \"Missing JSON in request\"}), 400\n username = request.json.get('username', None)\n password = request.json.get('password', None)\n\n if not username:\n abort(400, \"Invalid username or password\")\n if not password:\n abort(400, \"Invalid username or password\")\n users = app.data.driver.db[config.DOMAIN['user']['datasource']['source']]\n user = users.find_one({'email':username})\n # validate the user in the user's service\n if not user:\n abort(401, \"Invalid username or password\")\n if not check_password_hash(user.get('password'), password):\n abort(401, \"Invalid username or password\")\n role = user.get('role', 'user')\n user_id = str(user.get('_id'))\n user = User(user_id, username, role)\n access_token, refresh_token = create_token(user)\n return jsonify(\n token=access_token,\n type='bearer',\n roles=role,\n user=username,\n refreshToken=refresh_token), 200", "def UserToken(self) -> object:", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': 'thundoss@gmail.com', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "def _retrieve_token(request):\n auth_string = request.headers.get('Authorization')\n try:\n match = re.match(\"Bearer (.+)\", auth_string)\n except TypeError:\n match = None\n if match:\n return match.groups()[0]", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def token(self):\n token = jwt.encode(\n {\n \"id\": self.pk,\n \"username\": self.get_full_name,\n \"email\": self.email,\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + timedelta(minutes=int(os.getenv('TIME_DELTA')))\n },\n settings.SECRET_KEY, algorithm='HS256').decode()\n return token", "def get_token(self, obj):\n\n user = User.objects.get(email=obj.email)\n\n payload = jwt_payload_handler(user)\n\n if api_settings.JWT_ALLOW_REFRESH:\n payload['orig_iat'] = timegm(\n datetime.utcnow().utctimetuple()\n )\n\n token = jwt_encode_handler(payload)\n\n return token", "def auth_guard(endpoint):\n\n @wraps(endpoint)\n def wrap(*args, **kwargs):\n try:\n # Gets user access token from header\n # Throws an exception if token expires\n auth = request.headers.get('Authorization')\n\n if auth is None:\n response = {\n \"error_message\": \"Access Token Required\"\n }\n return json.dumps(response), 500\n\n access_token = request.headers.get('Authorization').split(' ')[1]\n jwt.decode(access_token, os.getenv('JWT_SECRET'), algorithms=[\"HS256\"])\n\n return endpoint(*args, **kwargs)\n except jwt.ExpiredSignatureError:\n print('User access JWT has expired')\n return json.dumps({ 'error': 'Token Expired'}), 401\n\n return wrap", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def _getApiAuthToken(self):\n return settings.EBAY_API_AUTH_TOKEN", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_endpoint_auth_method\")", "def token_endpoint_auth_method(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_endpoint_auth_method\")", "def generate_auth_token(self):\n token = Serializer(\n app.config['API_SECRET_KEY'],\n expires_in=app.config['JWT_TOKEN_EXPIRATION']\n )\n return token.dumps({'id': self.id})", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def check_user():\n token = request.headers['Authorization'].replace('Bearer ', '')\n return jsonify({\"access_token\": token}), 200", "def get_auth_token(self):\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)", "def getUser():\n\n if 'token' in session:\n return \"Authenticated\"\n else:\n return \"Unauthenticated\"", "def get_token(request):\n capability = TwilioCapability(\n settings.TWILIO_ACCOUNT_SID,\n settings.TWILIO_AUTH_TOKEN)\n \"\"\"Allow our users to make outgoing calls with Twilio Client\"\"\"\n capability.allow_client_outgoing(settings.TWIML_APPLICATION_SID)\n\n \"\"\"Allow our users to accept incoming calls from pyphon\"\"\"\n capability.allow_client_incoming('caller')\n\n \"\"\"Generate the capability token\"\"\"\n token = capability.generate()\n\n return JsonResponse({'token': token})", "def getUser(self, authenticationToken):\r\n pass", "def auth_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_token\")", "def getToken(email, password):\n r = requests.post(r\"https://opendata.hopefully.works/api/login\", json={\"email\":email, \"password\":password})\n if r.status_code == 200: \n return r.json()[\"accessToken\"]\n else:\n return \"\"", "def get_token():\n # get authorization header:\n auth = request.headers.get('Authorization', None)\n \n # authorization header should be included:\n if auth is None:\n raise JWTError(\n {\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, \n 401\n )\n \n # authorization header should be 'Bearer [JWT]'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, \n 401\n )\n elif len(parts) == 1:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, \n 401\n )\n elif len(parts) > 2:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, \n 401\n )\n\n # extract JWT:\n token = parts[1]\n\n return token", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def get_admin_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.admin_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.admin_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def token(self):\n payload = {\n 'id': str(self.id),\n 'username': self.username,\n \"exp\": datetime.now() + timedelta(days=2)\n }\n return jwt.encode(payload, SECRET_KEY).decode('utf-8')", "def get(self, service_name):\n try:\n token = self.generate(request.user.name, service_name)\n except AuthTokenGenerationException:\n return {'message': \"error, could not generate auth token\"}, 400\n return {'token': token}, 200", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def get_token(self, obj):\n jwt_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(obj)\n token = jwt_encode_handler(payload)\n\n return token", "def access_token(self):\n return self.access_token_str", "def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )", "def get(self):\n user = get_current_user()\n\n if user is None:\n context = {\n 'authenticated': False,\n }\n else:\n context = {\n 'authenticated': True,\n 'user': user,\n }\n\n return self.respond(context)", "def get_token(request):\n # Create a TwilioCapability token with our Twilio API credentials\n capability = ClientCapabilityToken(\n settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN\n )\n\n capability.allow_client_outgoing(settings.TWILIO_ACCOUNT_SID)\n capability.allow_client_incoming('support_agent')\n token = capability.to_jwt()\n\n return JsonResponse({'token': token.decode('utf-8')})", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def extract_bearer_token(request):\n return request.headers['Authorization'].split(\" \")[-1].strip()", "def get_token(email, password):\n data = {'username': email, 'password': password}\n res = requests.post(TOKEN_ENDPOINT, data=data)\n\n return res.json()['token']", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)", "def service(decorated_function):\n @wraps(decorated_function)\n def wrapper(*args, **kwargs):\n if 'Authorization' not in request.headers:\n raise UnauthorizedException(\"Unauthorized\")\n principal_id = _authenticate_jwt(request.headers['Authorization'])\n if principal_id != '00000000-0000-4000-8000-000000000000':\n raise ForbiddenException(\"This endpoint may only be called internally\")\n return decorated_function(*args, **kwargs)\n return wrapper", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def get_token():\n\n try:\n url = params.BASE_URL\n payload={}\n headers = {}\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n response = json.loads(response.text)\n base_url = response.get(params.CONTEXT)\n token = base_url.split(\"/\")[-2]\n return (True, token)\n except Exception as e:\n return (False, str(e))", "def current_token() -> object:\n return get_async_backend().current_token()", "def get_api_token(settings: Settings, url: str = DEFAULT_PROOFDOCK_API_URL) \\\n -> str:\n return settings.get('auths', {}).get(urlparse(url).netloc, {}).get('value', '')", "def access_token(*args, **kwargs):\n return None", "def get_token(client, email_or_username, password):\n\turl = 'account/token'\n\tbasic_auth = (email_or_username, password)\n\treturn client._request(url, Request.GET, basic_auth=basic_auth)", "def decorator(*args, **kwargs):\n token = request.args.get('token') # taking token as an argument\n if not token:\n return jsonify({'message': \"Token is missing\"}), 403 # if token not provided in the url\n try:\n data = jwt.decode(token, app.config['SECRET_KEY']) # check if token decoded successfully\n user = functions.get_user(data)\n if user['Login'] == \"False\":\n return jsonify({'message': 'Token is expired'}), 403\n except:\n return jsonify({'message': 'Token is invalid'}), 403 # if token not decoded that means token is invalid\n return f(*args, **kwargs)", "def test_get_main_route_auth():\n set_token(client, \"user0011\")\n\n response = client.get(url)\n assert response.status_code == 202", "def get_token(self):\n url = '/auth-token/'\n data = self._http_post(url, self.credentials)\n token = data['token']\n assert len(token) == 40, 'The length of seahub api auth token should be 40'\n self.token = 'Token ' + token", "def get_token():\n global vault_token\n global vault_token_time\n current_app.logger.info('************* GET TOKEN METHOD **************')\n return 'root'\n if validate_token():\n vault_duration = None\n try:\n auth_type = current_app.config.get('VAULT_AUTH', 'TOKEN')\n current_app.logger.info('*********** Auth Type: ' + auth_type)\n if auth_type == 'TOKEN':\n vault_token = current_app.config.get('VAULT_AUTH_TOKEN')\n elif auth_type == 'USERPASS':\n vault_token, vault_duration = authenticate_userpass()\n elif auth_type == 'LDAP':\n vault_token, vault_duration = authenticate_ldap()\n elif auth_type == 'CERT':\n vault_token, vault_duration = authenticate_certificate()\n elif auth_type == 'GCP':\n vault_token, vault_duration = authenticate_gcp()\n elif auth_type == 'APPROLE':\n vault_token, vault_duration = authenticate_approle()\n else:\n current_app.logger.info('Vault: VAULT_AUTH not configured correctly.')\n raise RuntimeError('Vault: VAULT_AUTH not configured correctly.')\n if vault_duration is not None:\n vault_token_time = datetime.datetime.now() + datetime.timedelta(seconds=int(vault_duration))\n \n current_app.logger.info('*********** TOKEN: ' + vault_token) \n\n except ConnectionError as ConnError:\n current_app.logger.info('Vault: There was an error while connecting to Vault server.')\n raise ConnError\n\n return vault_token", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")", "def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")" ]
[ "0.7302796", "0.7302796", "0.71899337", "0.7180431", "0.7098359", "0.69854575", "0.696279", "0.68940157", "0.6877278", "0.68370014", "0.67778414", "0.6764868", "0.67499906", "0.6745955", "0.67155045", "0.6713305", "0.6693445", "0.66762966", "0.6661236", "0.6643763", "0.6611044", "0.65745187", "0.65630054", "0.65611136", "0.65611136", "0.65611136", "0.65551287", "0.65508467", "0.6543364", "0.6510205", "0.6469771", "0.6458921", "0.6456665", "0.6429487", "0.640388", "0.6403273", "0.63828874", "0.6376157", "0.63702136", "0.63653976", "0.63610137", "0.634749", "0.63406765", "0.6335003", "0.63210505", "0.6311812", "0.6311791", "0.63019544", "0.6293785", "0.6281584", "0.62739533", "0.62551546", "0.6236078", "0.6234485", "0.6234485", "0.6224302", "0.6222667", "0.6202371", "0.6182765", "0.6177974", "0.61713386", "0.6170875", "0.61707914", "0.61467665", "0.61434424", "0.61431766", "0.614059", "0.6136991", "0.61347944", "0.61318636", "0.61318636", "0.61318636", "0.6123013", "0.6111201", "0.6109844", "0.6103173", "0.6090429", "0.60807824", "0.6066074", "0.60488534", "0.60447687", "0.6029928", "0.60280335", "0.6025829", "0.60255444", "0.60175866", "0.6013376", "0.60133064", "0.600907", "0.60072005", "0.60048103", "0.599874", "0.5993449", "0.59850144", "0.5984531", "0.5984531", "0.5984531", "0.5984531", "0.5984531", "0.5984531", "0.5984531" ]
0.0
-1
Checks that the email has a User object associated with it and that the User object is active
def clean_email(self): e = self.cleaned_data['email'] try: user = User.objects.get(email=e) if not user.is_active: msg = 'This user account has not been confirmed yet' raise forms.ValidationError(msg) except User.DoesNotExist: pass # msg = 'This email is not associated with an account' # raise forms.ValidationError(msg) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkIsEmailAvailable(self, email):\n\n return User.objects.filter(email=email).exists()", "def has_validated_email(self):\n return self.user.email_user is not None", "def ref_user_flag(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return True\n except ObjectDoesNotExist:\n return False", "def check_email(request):\n\temail_id = str(request.GET['id'])\n\tuser = User.objects.filter(username=email_id,is_active=1).exists()\n\tif user:\n\t\treturn HttpResponse(1)\n\telse:\n\t\treturn HttpResponse(0)", "def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active", "def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)", "def clean_email(self):\n UserModel = get_user_model()\n email = self.cleaned_data[\"email\"]\n self.users_cache = UserModel._default_manager.filter(email__iexact=email)\n if not len(self.users_cache):\n raise forms.ValidationError(self.error_messages['unknown'])\n if not any(user.is_active for user in self.users_cache):\n # none of the filtered users are active\n raise forms.ValidationError(self.error_messages['unknown'])\n return email", "def is_email_taken(email):\n if User.objects.filter(email=email).exists():\n return True\n return False", "def is_existing_user(email):\n if not email:\n return False\n user = session.query(KlaxerUser).filter(KlaxerUser.email==email).first()\n return True if user else False", "def validate_email(self, data):\n user = account_models.User.objects.filter(username__iexact=data, is_active=True)\n if user:\n return data\n raise serializers.ValidationError(\"Email address not verified for any user account\")", "def does_user_exist(self, email_address: str) -> bool:\n search = {'email': email_address}\n result = self._collection.find(search)\n return True if result.retrieved == 1 else False", "def check_user_exists(self):\n is_exists = False\n if auth.UserInfo.objects.filter(\n user_id__username=self.username,\n is_active=True).exists():\n is_exists = True\n return is_exists", "def _checkUserInactive(username,self):\r\n active = False\r\n user = _findUser(username)\r\n \r\n if user is not None:\r\n active = user.getIsActive()\r\n \r\n return active", "def is_active(self):\n return self.status == ACTIVE_USER", "def check_user_from_db(username: str, email: str) -> bool:\n if User.objects.filter(Q(username=username) | Q(email=email)).first():\n raise UniqueUser(\"Пользователь уже существует\")\n else:\n return True", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def user_exists(self, email):\n user = UserModels.fetch_user_by_email(email)\n if user:\n return {\n \"status\": 400,\n \"error\": \"That email already exists\"\n }", "def clean_email(self):\n # NOTE: all emails are stored in lower-case\n e = self.cleaned_data['email'].lower()\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n msg = 'This email is not associated with an account'\n raise forms.ValidationError(msg)\n return e", "def test_activate_user(self):\n activated_user = (RegistrationProfile.objects\n .activate_user(self.activation_key))\n self.assertTrue(activated_user.registrationprofile.activated)\n self.assertFalse(activated_user.is_active)", "def email_exists(self, email):\n user = [user for user in ALL_USERS if user['email'] == email]\n if user:\n return True\n return False", "def check_email_exist(request):\n email = request.POST.get(\"email\")\n user_obj = User.objects.filter(email=email).exists()\n if user_obj:\n return HttpResponse(True)\n else:\n return HttpResponse(False)", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def test_activate_active_user(self):\n activate_user(self.user, self.request)\n self.assertEqual(self.user.is_active, True)", "def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n return UserAS.objects.filter(as_ptr=obj).exists()", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def test_user_creation_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(len(mail.outbox), 1)", "def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)", "def user_exists(email):\n user = lookup_user_by_email(email)\n return user_to_dict(user, include_name=True, include_active=True)", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def check_if_user_exists(self, email):\n for user in self.users.values():\n if user['email'] == email:\n return user['id']\n else:\n return False", "def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)", "def user_exists(email):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_email(email)\n except ex.TickeeError, e:\n transaction.abort()\n return dict(exists=False)\n else:\n transaction.commit()\n return dict(exists=True,\n id=user.id)", "def post(self, request):\n if 'email' in self.request.POST:\n if (Person.objects.filter(user__email=self.request.POST['email'])\n .exists()):\n user = (Person.objects\n .filter(user__email=self.request.POST['email'])\n .last().user)\n return Response({'status': True, 'user_id': user.id})\n return Response({'status': False, 'user_id': 0})", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def userObjExists(self, user : bbUser.bbUser) -> bool:\n return self.userIDExists(user.id)", "def _verify_user_existence(self, user_exists, social_link_exists, user_is_active=None, username=None):\n users = User.objects.filter(username=(username if username else \"test_username\"))\n assert users.exists() == user_exists\n if user_exists:\n assert users[0].is_active == user_is_active\n self.assertEqual(\n UserSocialAuth.objects.filter(user=users[0], provider=self.BACKEND).exists(),\n social_link_exists\n )\n else:\n assert UserSocialAuth.objects.count() == 0", "def email_exist(email):\n return User.objects.filter(email=email).first()", "def _is_initiated(self, context):\n user_data = context.user_data\n has_attr = 'id' in user_data and 'email' in user_data\n has_values = self._id_valid(user_data['id'])\n return has_attr and has_values", "def confirm(key):\n manager = EmailManager.find_key(key)\n if not manager:\n # If key is wrong, return False\n return False\n\n if manager.is_active:\n # Do not reactivate users\n return False\n\n if manager.other_email:\n # If other_email\n if EmailManager.email_used(manager.other_email):\n # Other_email already being used by someone\n return False\n # Other email is not being used by anybody else, make it the active one\n\n # if username == email, set it as new email\n if manager.user.email == manager.user.username:\n manager.user.username = manager.other_email\n manager.user.email = manager.other_email\n\n manager.user.is_active = True\n manager.user.save()\n\n # Activate email\n manager.active = True\n manager.save()\n\n # Returns the activated User's obj\n return manager.user", "def email_exist():\n if request.method == 'POST' and request.get_json():\n data = request.get_json()\n user = ecomap_user.get_user_by_email(data['email'])\n return jsonify(isValid=bool(user))", "def checkUserExists(self, email, username):\n query = \"SELECT * FROM User WHERE Email='\"+email+\"' OR UserName = '\"+username+\"';\"\n self.cur.execute(query)\n\n data = self.cur.fetchall()\n if len(data):\n return True\n else:\n return False", "def is_profile_complete(self):\n return bool(self.fullname and self.username and self.email)", "def test_registered_user_is_inactive(self):\n self.register_bob()\n the_user = User.objects.filter(username='russellszn')\n self.assertFalse(the_user[0].is_active)", "def user_exists(email):\n data = [i['email'] for i in Data.users if email == i['email']]\n return \"\".join(data) == email", "def validate_email(self, email_field):\n\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError(\"There already is a user with this email address.\")", "def get_in_users(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user in obj.users.all():\n return True\n else:\n return False", "def test_manually_registered_account(self):\n active_user = (self.registration_profile.objects\n .create_inactive_user(\n site=Site.objects.get_current(),\n username='bob',\n password='secret',\n email='bob@example.com'))\n active_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n active_user.is_active = True\n active_user.save()\n\n deleted_count = self.registration_profile.objects.delete_expired_users()\n self.assertEqual(deleted_count, 0)\n self.assertEqual(self.registration_profile.objects.count(), 1)\n self.assertEqual(UserModel().objects.get(username='bob'), active_user)", "def test_user_creation_no_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(),\n send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)", "def user_exists(mail_or_id) -> bool:\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n if type(mail_or_id) is int:\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE id=?\n \"\"\", (mail_or_id,))\n else: #mail\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE mail=?\n \"\"\", (mail_or_id,))\n \n conn.commit()\n \n exists = bool(len(list(c)))\n \n conn.close()\n\n return exists", "def is_authorized(self, user_view):\n authenticated = False\n for email in self.emails:\n if user_view[email]:\n self.valid_email = email\n authenticated = True\n return authenticated", "def isEmailUsed(self, email):\n\n\t\ttestq = {\"email\": email};\n\t\ttest_result = self.db.request(\"getOne\", testq);\n\n\t\tif test_result:\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def is_active_user(self):\n\n return self.is_active", "def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False", "def clean_email(self):\r\n email = self.cleaned_data[\"email\"]\r\n #The line below contains the only change, removing is_active=True\r\n self.users_cache = User.objects.filter(email__iexact=email)\r\n if not len(self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unknown'])\r\n if any((user.password == UNUSABLE_PASSWORD)\r\n for user in self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unusable'])\r\n return email", "def is_active(self):\n return self.user.is_active", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def test_resend_activation_email_activated_user(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n\n profile = self.registration_profile.objects.get(user=user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n # Outbox has one mail, admin approve mail\n\n self.assertEqual(len(mail.outbox), 1)\n admins_emails = [value[1] for value in settings.REGISTRATION_ADMINS]\n for email in mail.outbox[0].to:\n self.assertIn(email, admins_emails)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n _, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertFalse(activated)", "def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertEqual(user, new_user)\n self.assertFalse(activated)", "def test_func(self):\n return self.request.user.is_active # any active user", "def user_exists(cls, *args, **kwargs):\r\n user_model = cls.user_model()\r\n query = get_query_by_dict_param(user_model, kwargs)\r\n return user_model.select().where(query).count() > 0", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def validate_email(self, email_field):\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError('An account with this email address already exists')\n return True", "def check_user(self):\n return self.client.service.checkUser(self.authentication).accountDetails", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c", "def test_expired_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIs(user, False)\n self.assertFalse(activated)\n\n new_user = UserModel().objects.get(username='alice')\n self.assertFalse(new_user.is_active)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertFalse(profile.activated)", "def require_user( request ):\n\n db = get_db()\n\n if ( not 'users_id' in session ):\n return False;\n\n users_id = session[ 'users_id' ]\n\n user = db.execute( text( \"select users_id, name, email from users where users_id = :id and is_active\" ), id = users_id ).fetchone()\n\n if ( not user ):\n return False;\n\n return user", "def test_activation_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].to, [self.user_info['email']])", "def _has_data(cls):\n return User.objects.count() > 0", "def test_not_active(self):\n self.user.is_active = False\n self.user.save()\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 101)\n self.assertEqual(UserFitbit.objects.count(), 1)", "def testInitialUserInactivated(self):\r\n u = User()\r\n u.email = gen_random_word(10)\r\n DBSession.add(u)\r\n\r\n self.assertEqual(\r\n False,\r\n u.activated,\r\n 'A new signup should start out deactivated by default')\r\n self.assertTrue(\r\n u.activation.code is not None,\r\n 'A new signup should start out as deactivated')\r\n self.assertEqual(\r\n 'signup',\r\n u.activation.created_by,\r\n 'This is a new signup, so mark is as thus')", "def confirm_email(self, request, email_address):\n email_address.verified = True\n email_address.set_as_primary(conditional=True)\n email_address.save()\n\n u = get_user_model().objects.get(pk=email_address.user.id)\n u.is_active = True\n u.save()", "def validate_user_existence():\n from sfa_api.utils.storage import get_storage\n storage = get_storage()\n if not storage.user_exists():\n try:\n info = request_user_info()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n return False\n else:\n if not info.get('email_verified', False):\n # User has a valid token, but their email\n # is yet to be verified\n return False\n storage.create_new_user()\n return True", "def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp", "def validate_recipient_email(self, email):\n email_query = EmailAddress.objects.filter(\n email=email, is_verified=True\n )\n\n if not email_query.exists():\n raise serializers.ValidationError(\n ugettext(\"No Know Me user owns the provided email address.\")\n )\n\n self._recipient_email_inst = email_query.get()\n\n return email", "def check_user(self, requestor, requestee, default=None):\n self.lock.acquire()\n self.users.add(requestor)\n ok = requestee in self.users\n self.lock.release()\n\n # if this isn't the default app, also add the user to the default app\n if default != self and default != None:\n default.check_user(requestor, requestee)\n\n return ok", "def validate_email(self, data, field_name):\n existing = User.objects.filter(email__iexact=data['email'])\n if existing.exists():\n raise fields.ValidationError(\"A user with that email already exists.\")\n else:\n return data", "def test_create_user(self):\r\n self._auto_auth()\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertTrue(User.objects.all()[0].is_active)", "def activated_user(self):\n user = self.signup_user_two()\n user.is_active = True\n user.save()\n return user", "def email_exists(form, field):\n if User.select().where(User.email == field.data).exists():\n raise ValidationError('A user with that E-mail already exists.')", "def activate_user(self, user):\n if not user.active:\n user.active = True\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return", "def validate_email(self, data):\n users = User.objects.filter(email=data)\n if not self.instance and len(users) != 0:\n raise serializers.ValidationError(data+\" ya esta registrado\")\n\n elif self.instance and self.instance.username != data and len(users) != 0:\n raise serializers.ValidationError(data+\" ya esta registrado\")\n\n else:\n return data", "def activate_user(self, activation_key):\n if SHA1_RE.search(activation_key):\n try:\n profile = RegistrationProfile.objects.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n profile.activation_key = \"ALREADY_ACTIVATED\"\n profile.save()\n return user\n\n return False", "def validate_email(self, email_field):\n user = User.query.filter_by(email=email_field.data).first()\n if user:\n if user.email:\n current_app.logger.error('{} tried to register user with email {} but user already exists.'.format(\n user.email, email_field.data))\n else:\n current_app.logger.error('Anonymous user tried to register user with email {} but user already exists.'.\n format(email_field.data))\n raise ValidationError('An account with this email address already exists')", "def test_already_existing_user(self):\n self.user.registration(\n \"Githeri\", \"githeri.man@yahoo.com\", \"iwantgitheri\", \"iwantgitheri\")\n msg = self.user.registration(\"Githeri\",\n \"githeri.man@yahoo.com\",\n \"iwantgitheri\",\n \"iwantgitheri\")\n self.assertEqual(msg, \"Your Account Already Active. Proceed to login\")", "def test_resend_activation_email_expired_user(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n new_user.date_joined -= datetime.timedelta(\n days=settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user.save()\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activation_key_expired())\n\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError(\"Email already registered.\")", "def get_is_interested(self, obj):\n # pylint: disable=no-member\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n profile = UserProfile.objects.get(user=user)\n return profile in obj.interested_users.all()", "def test_user_exists(self):\n data = {'email': self.user.email}\n response = self.client.post(self.url, data=data)\n\n expected_response_code = 200\n self.assertEqual(expected_response_code, response.status_code)\n self.assertTrue(response.data.get('exists'))", "def userExists(self, user_uuid):\n return self.getUser(user_uuid) is not None", "def get_users(self, email):\n active_users = UserModel._default_manager.filter(**{\n '%s__iexact' % UserModel.get_email_field_name(): email,\n 'is_active': True,\n })\n return (u for u in active_users if u.has_usable_password())", "def activate_user(self, activation_key):\r\n # Make sure the key we're trying conforms to the pattern of a\r\n # SHA1 hash; if it doesn't, no point trying to look it up in\r\n # the database.\r\n if SHA1_RE.search(activation_key):\r\n try:\r\n profile = self.get(activation_key=activation_key)\r\n except self.model.DoesNotExist:\r\n return False\r\n if not profile.activation_key_expired():\r\n user = profile.user\r\n user.is_active = True\r\n user.save()\r\n profile.activation_key = \"ALREADY_ACTIVATED\"\r\n profile.save()\r\n return user\r\n return False", "def testSignupWorks(self):\r\n email = u'testing@newuser.com'\r\n UserMgr.signup_user(email, u'testcase')\r\n\r\n activations = Activation.query.all()\r\n\r\n self.assertTrue(len(activations) == 1)\r\n act = activations[0]\r\n\r\n self.assertEqual(\r\n email,\r\n act.user.email,\r\n \"The activation email is the correct one.\")", "def test_email(self):\r\n \r\n self.assertEqual('maryc123@yahoo.com', self.user.email)" ]
[ "0.76105195", "0.7152086", "0.71156937", "0.7044124", "0.6917909", "0.69042623", "0.6899563", "0.68861634", "0.6773868", "0.67668664", "0.67640114", "0.67633206", "0.67286646", "0.6708247", "0.6693293", "0.6690005", "0.66814554", "0.66811293", "0.6654798", "0.6637169", "0.66262937", "0.6566447", "0.6516866", "0.6510395", "0.6508781", "0.6505733", "0.6495337", "0.64943624", "0.6486955", "0.648401", "0.64816105", "0.6478448", "0.64664", "0.64487636", "0.64487636", "0.64464235", "0.64435524", "0.64239854", "0.64231056", "0.64217937", "0.64123064", "0.64078957", "0.63957155", "0.6390597", "0.63887805", "0.63843644", "0.6382657", "0.6372601", "0.63541734", "0.63388807", "0.6337768", "0.6333329", "0.63307995", "0.6329224", "0.63291764", "0.6323609", "0.63232243", "0.63182", "0.6314163", "0.63117146", "0.63107765", "0.62959254", "0.6281605", "0.627328", "0.626519", "0.6234503", "0.6222271", "0.6219243", "0.62189037", "0.62034553", "0.6203412", "0.617399", "0.6166235", "0.61641574", "0.61564434", "0.61548954", "0.61515963", "0.61471635", "0.614266", "0.61269695", "0.6119533", "0.61108345", "0.6095627", "0.6087891", "0.6082826", "0.60644585", "0.60567284", "0.60553706", "0.6051968", "0.6049174", "0.60421205", "0.60383093", "0.6029234", "0.60284114", "0.6023655", "0.6015657", "0.6015211", "0.601421", "0.6009165", "0.59959424" ]
0.68864995
7
Returns the User object if the form is valid
def get_username(self): if not self.is_valid(): return None try: # NOTE: all emails stored in lower-case email = self.clean_email().lower() return User.objects.get(email=email).username except User.DoesNotExist: pass return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self):\n c = super(UserForm, self).clean()\n if (self.instance.pk is None and\n c.get('email') and\n user_exists(c.get('email'),\n c.get('last_name'),\n c.get('first_name'),\n self.current_round_name)):\n raise forms.ValidationError(\n ugettext('APPLICATION_EXISTS PLEASE_LOGIN'))\n return c", "def clean(self):\n user = authenticate(**self.cleaned_data)\n if user is not None and user.is_active:\n self.user = user\n return self.cleaned_data\n raise forms.ValidationError(_(\"Your log in data could not be found. Please check your input and try again.\"))", "def get_user(self):\n if not self.is_valid():\n return None\n # error checking done in: clean_email\n # NOTE: all emails are stored in lower-case\n e = self.clean_email().lower()\n return User.objects.get(email=e)", "def form_valid(self, form):\n usuario = form.save(commit=False)\n usuario.usuario = User.objects.get(username=self.request.user)\n usuario.save()\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n user = form.save(commit=False)\n # print(user)\n messages.success(self.request, 'Successfully registered')\n user.save()\n login(self.request, user)\n return redirect('post:home')\n\n return kwargs", "def _form_request_user(request):\n userid = request.params[\"userid\"].strip()\n user_service = request.find_service(name=\"user\")\n user = user_service.fetch(userid)\n\n if user is None:\n raise UserNotFoundError(f\"Could not find user with userid {userid}\")\n\n return user", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n\n user_data = {\n 'user': 'admin' # add user if needed or export to separate API call\n }\n\n return JsonResponse(user_data, status=200)", "def get_user(self, validated_data):\n user = CustomUser.objects.get(pk=validated_data['user_id'])\n return user", "def form_valid(self, form):\n new_user = form.save()\n return super(FormView, self).form_valid(form)", "def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)", "def form_valid(self, form):\n obj = form.save(commit=False)\n obj.user = self.request.user\n obj.save()\n return super().form_valid(form)", "def validateUser(self,admin):\n \n res=admin.helper.getOneUser(self.name)\n if res == False:\n return True\n else:\n return False", "def form_valid(self, form):\n login(self.request, form.get_user())\n return redirect('profile', id=form.get_user().id)", "def create_form_user(self, **kwargs):\n user = User.objects.create_user(\n **kwargs\n )\n return user", "def validate_user_id(self, user_id: int) -> APIUser:\n if (\n organization_service.check_membership_by_id(\n user_id=user_id, organization_id=self.organization.id\n )\n is None\n or (user := user_service.get_user(user_id=user_id)) is None\n ):\n raise serializers.ValidationError(\"This member does not exist.\")\n return user", "def form_valid(self, form, request):\n data = form.data\n\n user = authenticate(request=request, username=data.get(\n 'username'), password=data.get('password'))\n if user is None:\n return 'Incorrect Username or Password'\n\n login(request, user)\n return", "def clean(self):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n if not email and password:\n return\n user = auth.authenticate(username=email, password=password)\n if user and user.is_active:\n try:\n profile = user\n if profile:\n # This is the only code path to login success.\n self.cleaned_data.update(user=user, profile=profile)\n return self.cleaned_data\n else:\n self._errors.setdefault('__all__', []).append('This user is not authorized.')\n logger.warn(\"%s: tried to login but is not authorized.\" % email)\n except profile.DoesNotExist:\n self._errors.setdefault('__all__', []).append('This user has no profile.')\n logger.warn(\"%s: tried to login but is not connected to a profile.\" % email)\n else:\n self._errors.setdefault('__all__', []).append('Invalid username or password.')\n logger.warn(\"%s: failed to login.\" % email)\n # At this point login has failed and we return None.\n return None", "def form_valid(self, form): # 1\n user = form.save() # 3\n if user is not None:\n # print(\"self.request, user\" ,self.request, user)\n # >>> <WSGIRequest: POST '/register/'> testUser2\n login(self.request, user) # 4\n \"This finally validates form and reverse_lazy methof redirects user\"\n return super(RegisterPage, self).form_valid(form) # 4", "def clean(self):\n cleaned_data = self.cleaned_data\n\n username = cleaned_data.get('username')\n password = cleaned_data.get('password')\n\n if username and password:\n self.user = authenticate(username=username, # XXX: actually email\n password=password)\n\n if self.user is not None:\n if self.user.is_active:\n return cleaned_data\n else:\n raise forms.ValidationError('Your account has been disabled.')\n\n raise forms.ValidationError('Wrong login or password. Please try again.')", "def clean_to_user(self):\n return self.cleaned_data[\"to_user\"]", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def _assert_form(self, site, email, is_valid_form=False):\n error = ''\n instance = None\n form = AllowedAuthUserForm({'site': site.id, 'email': email})\n if is_valid_form:\n assert form.is_valid()\n instance = form.save()\n else:\n assert not form.is_valid()\n error = form.errors['email'][0]\n return error, instance", "def get_user(request: Request) -> User:\n\n return _check_and_extract_user(request)", "def _validate_user(_):\n pass", "def form_valid(self, form):\n self.object = User.objects.create_user(\n form.cleaned_data['username'],\n form.cleaned_data['email'],\n form.cleaned_data['password']\n )\n\n self.object.first_name = form.cleaned_data['first_name']\n self.object.last_name = form.cleaned_data['last_name']\n self.object.save()\n\n try:\n g = Group.objects.get(name='api')\n except Group.DoesNotExist:\n g = Group(name='api')\n g.save()\n token = Token.objects.create(user=self.object)\n TokenControl.objects.create(token=token, last_used=datetime.now())\n g.user_set.add(self.object)\n\n data = {\n 'pk': self.object.pk,\n }\n return JsonResponse(data)", "def form_valid(self, form):\n request = self.request\n user = authenticate(\n email=form.cleaned_data['email'],\n password=form.cleaned_data['password']\n )\n if user is not None:\n login(request, user)\n else:\n return redirect(reverse_lazy('auth_ex:register'))\n return super().form_valid(form)", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def get_object(self):\n return User.objects.get(username=self.request.user.username)", "def form_valid(self, form, profile_form):\n self.object = form.save()\n # print(self.object.shopuserprofile)\n # print(self.object)\n # print(self.request.user)\n\n return HttpResponseRedirect(self.get_success_url())", "def get_user(self, user_id):\n try:\n User = get_user_model()\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def get_user(self, user_id):\n User = get_user_model()\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.administrator = self.request.user\n form.save()\n self.object.save()\n return HttpResponseRedirect(self.get_success_url())", "def validate_usuario(self, data):\n\t\tuser = Usuario.objects.filter(usuario=data)\n\t\t# Si estoy creando (no hay instancia) comprobar si hay usuarios con ese\n\t\t# username\n\t\tif not self.instance and len(user) != 0:\n\t\t\traise ValidationError(u\"Ya existe un usuario con ese usuario\")\n\t\t# Si estoy actualizando (hay instancia) y estamos cambiando el username\n\t\t# y existen usuarios con el nuevo username\n\t\telif self.instance.usuario != data and len(user) != 0:\n\t\t\traise ValidationError(u\"Ya existe un usuario con ese usuario\")\n\t\telse:\n\t\t\treturn data", "def test_valid_form_true(self):\n form = UserRegisterForm(data=self.data)\n self.assertTrue(form.is_valid())", "def getUser():\n username = post_param('username', '')\n if username == '':\n username = get_param('username', '')\n password = get_param('password', '')\n else:\n password = post_param('password', '')\n \n if username == '':\n return None\n else:\n return User(username, password)", "def get_user(self, email):\n try:\n return RegisterUser.objects.get(email=email)\n except:\n return None", "def form_valid(self, form):\n login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')", "def form_valid(self, form):\n\n form.save()\n email = form.cleaned_data.get('email')\n _password = form.cleaned_data.get('password1')\n user = authenticate(email=email, password=_password)\n if user:\n login(self.request, user)\n return super().form_valid(form)", "def get_user(cls, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None", "def test_form_valid(self):\n form = Mock()\n form.cleaned_data = Mock()\n self.view.form_valid(form)\n self.assertIsInstance(form.instance.modified_by, User)\n self.assertEqual(form.instance.modified_by, self.request.user)", "def form_valid(self, form):\n print(self.object)\n User.objects.filter(username=self.object).update(\n first_name = form.cleaned_data['first_name'],\n last_name = form.cleaned_data['last_name'],\n email = form.cleaned_data['email'],\n gender = form.cleaned_data['gender'],\n date_of_birth = form.cleaned_data['date_of_birth'],\n )\n messages.success(self.request, 'Edited successfully')\n return super().form_valid(form)", "def validate(self, data):\n\t\tif self.instance:\n\t\t\treturn super(UserSerializer, self).validate(data)\n\t\telse:\n\t\t\tuser = User(username=data['username'])\n\t\t\tdata['password'] = make_password(data['password'])\n\t\t\treturn data", "def post(self):\r\n return create_user(request)", "def test_has_form(self):\n form = self.response.context['form']\n self.assertIsInstance(form, UserForm)", "def clean(self):\n\n # Getting cleaned email and username data.\n email = self.cleaned_data.get('email')\n username = self.cleaned_data.get('username')\n\n # Get possible user objects based on email and username.\n user_email = User.objects.filter(email=email)\n user_uname = User.objects.filter(username=username)\n\n # If the user has changed his email\n # and if the email already exists.\n if email != self.user.email:\n if user_email:\n raise forms.ValidationError(\"Email address is already taken\")\n\n # If the user has changed his username\n # and if the username already exists.\n if username != self.user.username:\n if user_uname:\n raise forms.ValidationError(\"Username is already taken\")", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def clean_creator(self):\n creator = self.cleaned_data['creator']\n if not creator:\n return None\n try:\n creator = User.objects.get(pk=creator)\n except User.DoesNotExist:\n raise ValidationError(_(\"invalid user pk\"))\n return creator", "def form_valid(self, form):\n\n self.object = form.save()\n\n messages.success(self.request, 'Profile details updated.')\n\n return redirect(settings.LOGIN_REDIRECT_URL)", "def login(self):\n self.user = self.serializer.validated_data['user'] or self.serializer.validated_data['email']", "def validate_user(user):\n username = user.get(\"username\")\n if username is None:\n return False, USERNAME_NOT_AVAILABLE\n\n total = user.get(\"total\")\n if total is None:\n return False, TOTAL_NOT_AVAILABLE\n\n if mongo.db.users.find_one({\"username\": username}):\n return False, USER_ALREADY_EXISTS\n\n return True, SUCCESSFUL_VALIDATION_MESSAGE", "def validates_user():\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n # age = request.form.get(\"age\")\n # zipcode = request.form.get(\"zipcode\")\n\n # is_user_there = User.query.filter(User.email == email).all()\n\n is_user_there = db.session.query(User).filter(User.email == email).first()\n\n if is_user_there:\n flash(\"You're already registered!\")\n return redirect(\"/login\")\n\n else:\n new_user = User(email=email, password=password)\n db.session.add(new_user)\n db.session.commit()\n\n flash(\"Success! You were registered!\")\n\n return redirect(\"/\")", "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "def authenticate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n user = self.username.data\n\n cur = get_cursor()\n if email_exists(cur, user):\n user = get_username(cur, user)\n\n if username_exists(cur, user):\n pw_hash = get_pw_hash(cur, user)\n\n if check_password(self.password.data, pw_hash):\n self.username.data = user\n return True\n\n return False", "def get_user(self, instance, name):\n return instance.get_user(name)", "def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user", "def save(self):\n new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],\n password=self.cleaned_data['password1'],\n email=self.cleaned_data['email'],\n firstname=self.cleaned_data['first_name'],\n lastname=self.cleaned_data['last_name'],\n agree=self.cleaned_data['tos'])\n return new_user", "def validate(self, data):\n if 'username' not in data:\n return\n try:\n account = get_user_model().objects.get(username=data['username'])\n except get_user_model().DoesNotExist:\n account = None\n\n if account is not None:\n raise UserAlreadyExists()\n\n if data['password']:\n if validate_password(data['password']):\n data['password'] = make_password(data['password'])\n\n return data", "def form_valid(self, form):\n valid = super().form_valid(form)\n messages.success(\n self.request, HELLO_MSG.format(self.request.user.username)\n )\n return valid", "def test_validate_when_user_found(self, view, mget_user):\n assert view.validate() is None", "def validate(self, credentials):\n user = authenticate(**credentials)\n if user and user.is_active:\n return user\n raise serializers.ValidationError('Incorrect Credentials')", "def clean(self):\n\n # Fetch cleaned email and username data.\n email = self.cleaned_data.get('email')\n username = self.cleaned_data.get('username')\n\n # Fetch possible user objects from the database\n # based on provided and email and password.\n user_email = User.objects.filter(email=email)\n user_uname = User.objects.filter(username=username)\n\n # If user exists based on the email address or username,\n # raise validation error.\n if user_email:\n self._errors[\"email\"] = \"Email address is already associated with another account\"\n\n if user_uname:\n self._errors[\"username\"] = \"Usename is already associated with another account\"", "def validate(self, validated_data):\n user = authenticate(username=validated_data['username'],\n password=validated_data['password'])\n\n if not user:\n raise serializers.ValidationError({\"error\": \"User not found\"})\n else:\n validated_data['user'] = user\n return validated_data", "def get(self, **kwargs):\n user_id = kwargs[\"id\"]\n user = models.User.get_by_id(user_id)\n if user is None:\n self.abort(422, \"could not find user\")\n\n self.set_response_view_model(view_models.User.view_contract())\n self.api_response = view_models.User.form(user)\n self.send_response()", "def form_valid(self, form):\n\t\tusername = form.cleaned_data['name']\n\t\tpw = form.cleaned_data['password']\n\n\t\tuser = User.objects.create_user(username, password=pw)\n\t\tif user:\n\t\t\tlogin(self.request, user)\n\n\t\tp = Person(user=user)\n\t\tp.save()\n\t\treturn super(PersonCreate, self).form_valid(form)", "def validate_user_id(self, value):\n if not User.objects.filter(id=value).exists():\n raise serializers.ValidationError('User with this id does not exist.')\n return value", "def get_user(self, validated_token):\n try:\n user_id = validated_token[api_settings.USER_ID_CLAIM]\n except KeyError:\n raise InvalidToken(\n _('Token contained no recognizable user identification')) from KeyError\n\n user_model = get_user_model()\n try:\n user = user_model.objects.get(\n **{api_settings.USER_ID_FIELD: user_id})\n except user_model.DoesNotExist:\n raise AuthenticationFailed(\n _('User not found'), code='user_not_found') from user_model.DoesNotExist\n\n if not user.is_active:\n raise AuthenticationFailed(\n _('User is inactive'), code='user_inactive')\n\n if not user.has_finished_onboarding and not user.is_admin:\n raise HasNotFinishedOnboarding(detail={\n 'detail': 'User hasn\\'t finished the onboarding',\n 'onboarding_session': user.onboarding_session\n })\n\n return user", "def clean_member(self):\n lookup = self.cleaned_data['member']\n\n # Look up user emails first, see if a verified user can be added\n try:\n validator = EmailValidator(code='lookup not an email')\n validator(lookup)\n\n member = (\n User.objects.filter(\n emailaddress__verified=True,\n emailaddress__email=lookup,\n is_active=True,\n ).first()\n )\n if member is not None:\n return self.validate_member_user(member)\n\n invite = TeamInvite(\n organization=self.team.organization,\n team=self.team,\n email=lookup,\n )\n\n return self.validate_member_invite(invite)\n except ValidationError as error:\n if error.code != 'lookup not an email':\n raise\n\n # Not an email, attempt username lookup\n try:\n member = User.objects.get(username=lookup, is_active=True)\n return self.validate_member_user(member)\n except User.DoesNotExist:\n raise forms.ValidationError('User not found')", "def form_valid(self, form, request):\n data = form.data\n\n # Password hashing\n password = make_password(data.get('password1'))\n\n # Checkbox has value 'on' instead of True\n volunteer = False\n flag = data.get('volunteer')\n if flag is not None and flag != 'false' and flag != 'False':\n volunteer = True\n\n # Break first_name and last_name\n names = data.get('name').strip().split(' ')\n first_name = names[0]\n last_name = ''\n if len(names) > 1:\n last_name = ' '.join(names[1:])\n\n err = self.register(data.get('username'), data.get('email'), data.get(\n 'phone_number'), volunteer, password, first_name, last_name)\n return err", "def edit(request, pk):\n\n try:\n object = User.objects.get(pk=pk)\n except:\n object = User()\n\n if request.method == 'POST': # If the form has been submitted...\n form = UserForm(request.POST, instance=object)\n\n if form.is_valid(): # If the form is valid\n object = form.save()\n\n messages.success(request, _('The user has been saved.'))\n\n return redirect('users.views.list')\n else:\n form = UserForm(instance=object)\n\n return render(request, 'users/users/edit.html', {'form': form})", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def get_user(self) -> User:\n return self.__user", "def validate(self, data):\n request = self.context['request']\n data.setdefault('user', request.user)\n data.setdefault('device_user_token', None)\n\n if not request.user.is_authenticated():\n raise serializers.ValidationError('user is not logged in.')\n\n try:\n self.instance = DeviceUser.objects.get(**data)\n\n except DeviceUser.DoesNotExist:\n raise serializers.ValidationError('invalid device')\n\n return data", "def clean(self):\n if self.edit_user is None and len(self.cleaned_data['password1']) == 0:\n raise forms.ValidationError(_(u'You must supply a password when creating a user'))\n return super(RegisterUserForm, self).clean()", "def clean_username(self):\n username = self.cleaned_data['username']\n\n try:\n User.objects.get(email=username)\n except ObjectDoesNotExist:\n raise forms.ValidationError('Selected user does not exist.')\n\n return username", "def get_user(user_id):\n try:\n return UserModel.objects.get(id=user_id)\n except UserModel.DoesNotExist:\n return None", "def form_valid(self, form):\n # Associate Job to user if they are logged in\n if self.request.user.is_authenticated():\n form.instance.creator = self.request.user\n return super().form_valid(form)", "def authenticated_user(self):\n if self.is_authenticated() and self.user:\n if isinstance(self.user, User):\n return SimpleUser(self.user.email)\n return UnauthenticatedUser()", "def form_valid(self, form):\n redirect_url = self.accept_auth_request(form.get_user())\n return HttpResponseRedirect(redirect_url)", "def form_valid(self, form):\n form.instance.founder = self.request.user\n print('Project Create user:', self.request.user)\n form.save()\n\n tc_lib.generate_user_matches(form)\n\n return super(ProjectCreate, self).form_valid(form)", "def post(self, request, *args, **kwargs):\n\n data = {}\n form = self.form_class(request.POST)\n # import pdb\n # pdb.set_trace()\n if form.is_valid():\n data['first_name'] = form.cleaned_data['first_name']\n data['last_name'] = form.cleaned_data['last_name']\n data['email'] = form.cleaned_data['email']\n data['username'] = form.cleaned_data['username']\n password = form.cleaned_data['password']\n password_cnf = form.cleaned_data['password_cnf']\n\n if password == password_cnf:\n try:\n data['password'] = make_password(password, salt=\"blog\")\n user = User.objects.create(**data)\n except:\n import sys\n print sys.exc_value\n # user.delete()\n messages.error(request, \"Something went wrong. Please try again.\")\n return self.form_invalid(form)\n\n else:\n messages.error(request, \"Passwords did not match.\")\n return self.form_invalid(form)\n\n if user is not None:\n user = authenticate(username=data['username'], password=password)\n login(request, user)\n request.session['USER_ID'] = user.pk\n request.session['USER_NAME'] = user.first_name\n\n return HttpResponseRedirect(reverse('index'))\n messages.error(request, \"Wrong username and Password combination.\")\n return self.form_invalid(form)\n\n else:\n return self.form_invalid(form)", "def validate(self) -> bool:\n if not super().validate():\n return False\n\n # Does the user exist\n user = User.query.filter_by(username=self.username.data).first()\n if not user:\n self.username.errors.append('Invalid username or password')\n return False\n\n # Does given password match user's password\n if not user.check_password(self.password.data):\n self.username.errors.append('Invalid username or password')\n return False\n\n return True", "def validate(self, data):\n user_type = 3\n return validate_login_user(self, data, user_type)", "def save(self):\n if not self.errors:\n username = self.cleaned_data['username']\n\n try:\n with transaction.atomic():\n return User.objects.create_user(\n username=username,\n password=self.cleaned_data['password1'],\n email=self.cleaned_data['email'],\n first_name=self.cleaned_data['first_name'],\n last_name=self.cleaned_data['last_name'])\n except Exception:\n # We check for duplicate users here instead of clean, since\n # it's possible that two users could race for a name.\n if User.objects.filter(username=username).exists():\n self.errors['username'] = self.error_class(\n [_('Sorry, this username is taken.')])\n else:\n raise\n\n return None", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def is_valid(self):\n return self.user.is_authenticated", "def test_invalid_user_already_exists_userregisterform(self):\n User.objects.create_user(**self.credentials)\n form = UserRegisterForm(\n data={\n \"username\": \"BobRobert\",\n \"email\": \"test_bob@test.f\",\n \"password1\": \"fglZ9fYmr%?,\",\n \"password2\": \"fglZ9fYmr%?,\",\n \"robot\": True,\n }\n )\n self.assertFalse(form.is_valid())", "def get_one_user():", "def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user", "def get_form(self):\n kwargs = {\n \"instance\": self.profile if self.form_object == \"profile\" else self.user,\n \"prefix\": self.name,\n }\n\n if self.request.method == \"POST\":\n return self.form_class(self.request.POST, self.request.FILES, **kwargs)\n else:\n return self.form_class(**kwargs)", "def validate(self, data):\n user_type = 3\n return validate_register_user(self, data, user_type)", "def getEditUser(self, request):\n\n userForm = UserEditForm(instance=request.user)\n profileForm = ProfileEditForm(instance=request.user.profile)\n return (userForm, profileForm)", "def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form) # runs parent func", "def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form) # runs parent func", "def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n return user" ]
[ "0.6947391", "0.6922856", "0.67644674", "0.66740084", "0.657634", "0.65609896", "0.65185124", "0.649799", "0.64326835", "0.64067334", "0.640667", "0.6404592", "0.6355719", "0.6345825", "0.6341826", "0.63241357", "0.632235", "0.6290305", "0.62621236", "0.62143874", "0.62017405", "0.6198287", "0.61597574", "0.61456615", "0.61452985", "0.6115221", "0.6088924", "0.604493", "0.60443497", "0.6027574", "0.6026947", "0.60244596", "0.6013727", "0.60099995", "0.60099995", "0.599844", "0.59867626", "0.59762853", "0.5969352", "0.5966557", "0.5964864", "0.5964864", "0.59423065", "0.59415823", "0.5929693", "0.5917374", "0.5911939", "0.591097", "0.5908652", "0.59079796", "0.589605", "0.5887986", "0.5887986", "0.5877418", "0.5850469", "0.5846637", "0.5832312", "0.5816303", "0.5795005", "0.5782983", "0.57816255", "0.5779052", "0.5778195", "0.5776592", "0.5774737", "0.57662874", "0.57604975", "0.57535523", "0.57513833", "0.5744483", "0.57406574", "0.574007", "0.57284707", "0.5722198", "0.5719913", "0.5716352", "0.5712909", "0.5711081", "0.5710267", "0.57095534", "0.57035196", "0.57007587", "0.5698902", "0.56972396", "0.56962544", "0.5690142", "0.56885856", "0.56857145", "0.56822014", "0.56768584", "0.5671629", "0.56713146", "0.56706893", "0.5667782", "0.5667696", "0.56643665", "0.5662201", "0.5658454", "0.56526285", "0.56526285", "0.56504256" ]
0.0
-1
Checks that the email is not already in use
def clean_email(self): # NOTE: all emails are stored in lower case e = self.cleaned_data['email'].lower() if User.objects.filter(email=e).count() > 0: raise forms.ValidationError('An existing account is using that email address.') return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')", "def clean_email(self):\n existing = User.objects.filter(email__iexact=self.cleaned_data['email'])\n if existing.exists():\n raise forms.ValidationError(_(\"This email address is already in use. Please enter a different email \"\n \"address!\"))\n else:\n return self.cleaned_data['email']", "def is_email_address_already_assigned(email_address: str) -> bool:\n return _do_users_matching_filter_exist(DbUser.email_address, email_address)", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': 'testing@dummy.com'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def test_duplicate_email(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n rv = self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.assertIn(b'Sorry email already exist', rv.data)", "def clean_email(self):\r\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\r\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))\r\n return self.cleaned_data['email']", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError('This email address is already in use. Please supply a different email address.')\n return self.cleaned_data['email']", "def is_duplicate_email(email):\n users = User.objects.filter(email=email).values()\n if len(users):\n return True\n return False", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n if User.objects.filter(email__iexact=self.cleaned_data['email']):\n raise forms.ValidationError(_(\"This email address is already in use. Please supply a different email address.\"))\n return self.cleaned_data['email']", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n except User.MultipleObjectsReturned:\n pass\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.')\n )", "def test_registeration_duplicate_user_email(self):\n self.signup_a_user(self.user_data)\n response_duplicate = self.signup_a_user(self.user_data_duplicate_email)\n self.assertEqual(response_duplicate.status_code,\n status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response_duplicate.data[\"errors\"][\"email\"],\n [\"user with this email already exists.\"])\n self.assertNotIn(\"token\", response_duplicate.data)", "def is_email_taken(email):\n if User.objects.filter(email=email).exists():\n return True\n return False", "def test_validates_email_not_already_invited_on_update(self):\n # Make an invitation that we will modify\n invitation = self.project.invitations.create(email = 'jane.doe@example.com')\n # Make a second invitation with a different email address\n self.project.invitations.create(email = 'Joe.Bloggs@example.com')\n # Try to update the invitation\n invitation.email = 'joe.bloggs@example.com'\n expected_errors = {\n 'email': ['Email address already has an invitation for this project.'],\n }\n with self.assertValidationErrors(expected_errors):\n invitation.full_clean()", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def isEmailUsed(self, email):\n\n\t\ttestq = {\"email\": email};\n\t\ttest_result = self.db.request(\"getOne\", testq);\n\n\t\tif test_result:\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def clean_email(self):\r\n email = self.cleaned_data.get(\"email\")\r\n\r\n if not email: \r\n return email\r\n\r\n if User.objects.filter(email__iexact=email).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That e-mail is already used.\")\r\n else:\r\n return email", "def validate_unique_email(email):\n if AssociatedEmail.objects.filter(email=email.lower(), is_primary_email=False):\n raise ValidationError(_(\"User with this email already exists.\"),\n code='email_not_unique',)\n if User.objects.filter(email=email.lower()):\n raise ValidationError(_(\"User with this email already exists.\"),\n code='email_not_unique',)", "def validate_email(self, email_field):\n\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError(\"There already is a user with this email address.\")", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = JOSReservation.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def clean_email(self):\n email = self.cleaned_data['email']\n email_taken = User.objects.filter(email=email).exists()\n if email_taken:\n raise forms.ValidationError(\n 'El email ya se encuentra en uso. Prueba otro!')\n return email", "def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))", "def clean_email(self):\r\n email = self.cleaned_data[\"email\"]\r\n #The line below contains the only change, removing is_active=True\r\n self.users_cache = User.objects.filter(email__iexact=email)\r\n if not len(self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unknown'])\r\n if any((user.password == UNUSABLE_PASSWORD)\r\n for user in self.users_cache):\r\n raise forms.ValidationError(self.error_messages['unusable'])\r\n return email", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n # Check if user exists already, error early\n if User.objects.filter(email=email).exists():\n LOGGER.debug(\"email already exists\", email=email)\n raise ValidationError(_(\"Email already exists\"))\n return email", "def test_already_registered_email_validation(self):\n\n main_page = pages.mainpage.MainPage(self.driver)\n main_page.click_sign_in_button()\n \n sign_in_page = pages.signinpage.SignInPage(self.driver)\n sign_in_page.enter_create_account_email_addres('test@test.test')\n sign_in_page.click_create_account_button() \n\n self.assertTrue(sign_in_page.check_if_account_create_error_is_visible(), 'Email validation failed')", "def check_unique(self):\n pass", "def validate_email(self, email_field):\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError('An account with this email address already exists')\n return True", "def test_email_not_unique(bot):\n expect_error(register, InputError, \"a\", \"abcdef\", \"a\", \"a\", bot.email)", "def _validate_secondary_email_doesnt_exist(email):\n if email is not None and AccountRecovery.objects.filter(secondary_email=email).exists():\n # pylint: disable=no-member\n raise errors.AccountEmailAlreadyExists(accounts.EMAIL_CONFLICT_MSG.format(email_address=email))", "def validate_email(form, field):\n\n user = User.query.filter_by(email=form.email.data).first()\n\n if user and not user == g.user:\n form.email.errors = [\n \"Email already associated with account!\",\n *form.email.errors\n ]\n raise ValidationError", "def clean_email(self):\n email = self.cleaned_data['email'].lower()\n if User.objects.filter(email__iexact=email).exists():\n raise ValidationError(_('A user with that email already exists.'))\n return email", "def test_already_validated_email(self):\n token = self.authenticate_user(self.auth_user_data).data[\"token\"]\n verification_url = reverse(\n 'authentication:verify_email', kwargs={'token': token})\n\n response = self.client.get(\n verification_url,\n HTTP_AUTHORIZATION=f'token {token}'\n )\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def unique_email(cls, email):\n user_db = User.get_by('email', email)\n if user_db:\n raise ValueError('Sorry, this email is already taken.')\n return email", "def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n qs = User.objects.exclude(id=self.instance.id).filter(email=email)\n if len(qs) == 0:\n return email\n raise forms.ValidationError(\n ugettext(\"This email is already registered\"))", "def test_invalid_email_when_logging_in(self):\n pass", "def _email_allowed(self, tool):\n if 'emails' not in self.watchdb[tool]:\n self.watchdb[tool]['emails'] = []\n\n sent = self.watchdb[tool]['emails']\n now = time.time()\n limit_minute = now - 300\n if sum(e > limit_minute for e in sent) >= 1:\n return False\n\n limit_max = now - 3600\n if sum(e > limit_max for e in sent) >= 5:\n return False\n\n self.watchdb[tool]['emails'] = [e for e in sent if e > limit_max]\n self.watchdb[tool]['emails'].append(now)\n return True", "def test_signup_dupe_email(self):\n\n invalid_u = User.signup(\"allison@allison.com\", \"testuser\", \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def unique_registered_email(value):\n from .models import Person\n if Person.objects.filter(email=value).exists():\n raise ValidationError(_('Email already registered.'))", "def test_validates_email_not_already_invited_on_create(self):\n # Make an invitation with the same email address, but with different capitalisation\n self.project.invitations.create(email = 'Joe.Bloggs@example.com')\n invitation = Invitation(project = self.project, email = 'joe.bloggs@example.com')\n expected_errors = {\n 'email': ['Email address already has an invitation for this project.'],\n }\n with self.assertValidationErrors(expected_errors):\n invitation.full_clean()", "def email_exists(self, email):\n user = [user for user in ALL_USERS if user['email'] == email]\n if user:\n return True\n return False", "def test_register_existing_email(self):\n response = self.client.post('/api/v2/auth/signup',\n data=json.dumps(users[0]),\n headers=self.admin_headers,\n content_type='application/json')\n self.assertEqual(response.status_code, 409)\n self.assertIn('user with email already registred', str(response.data))", "def test_duplicate_email(self):\n self.duplicate_email = {'user': {\n \"username\": \"remmy\",\n \"email\": \"remmyk@test.com\",\n \"password\": \"@Password123\"\n }}\n\n self.duplicate_email2 = {'user': {\n \"username\": \"remmyk\",\n \"email\": \"remmyk@test.com\",\n \"password\": \"@Password123\"\n }\n }\n self.client.post(\n self.reg_url,\n self.duplicate_email,\n format=\"json\")\n response = self.client.post(\n self.reg_url,\n self.duplicate_email2,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"email provided is already in use\", response.content)", "def validate_email(form, field):\n if User.query.filter_by(email=form.email.data).first():\n form.email.errors.append(\n \"Email already associated with account!\")\n raise ValidationError", "def test_create_email_account_twice(self):\n email_addr = 'testcreatetwins@' + self.email_dom\n acc = SpokeEmailAccount(self.org_name, self.user_id)\n self.assertRaises(error.AlreadyExists, acc.create, email_addr)", "def test_resend_activation_email_nonunique_email(self):\n user1 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n user2_info = copy(self.user_info)\n user2_info['username'] = 'bob'\n user2 = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **user2_info)\n self.assertEqual(user1.email, user2.email)\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def clean_email(self):\n UserModel = get_user_model()\n email = self.cleaned_data[\"email\"]\n self.users_cache = UserModel._default_manager.filter(email__iexact=email)\n if not len(self.users_cache):\n raise forms.ValidationError(self.error_messages['unknown'])\n if not any(user.is_active for user in self.users_cache):\n # none of the filtered users are active\n raise forms.ValidationError(self.error_messages['unknown'])\n return email", "def email_not_in_use(has_user_field: bool = False) -> Callable:\n def _email_not_in_use(form, field):\n user_id = -1 if not has_user_field else form.user.id\n user = User.query.filter(User.email == field.data).first()\n if user is not None and user.id != user_id and len(field.data) > 0:\n raise ValidationError('This address is already in use')\n\n return _email_not_in_use", "def checkIsEmailAvailable(self, email):\n\n return User.objects.filter(email=email).exists()", "def check_for_duplicate_subject_identifier(self):\n pass", "def validate_email(self, data, field_name):\n existing = User.objects.filter(email__iexact=data['email'])\n if existing.exists():\n raise fields.ValidationError(\"A user with that email already exists.\")\n else:\n return data", "def test_resend_activation_email_nonexistent_user(self):\n self.assertFalse(self.registration_profile.objects.resend_activation_mail(\n email=self.user_info['email'],\n site=Site.objects.get_current(),\n ))\n self.assertEqual(len(mail.outbox), 0)", "def validate_email(form, field):\n if User.query.filter_by(email = field.data).first():\n raise ValidationError(\"Email already registed.\")", "def test_create_email_address_twice(self):\n email_addr = 'testcreatetwins@' + self.email_dom\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n self.assertRaises(error.AlreadyExists, addr.create, email_addr)", "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError(\"Email already registered.\")", "def verify_player_pending(self, player_email):\n try:\n self.pending_players.index(player_email)\n return True\n except ValueError:\n return False", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n return True", "def validate_email(self, email_field):\n user = User.query.filter_by(email=email_field.data).first()\n if user:\n if user.email:\n current_app.logger.error('{} tried to register user with email {} but user already exists.'.format(\n user.email, email_field.data))\n else:\n current_app.logger.error('Anonymous user tried to register user with email {} but user already exists.'.\n format(email_field.data))\n raise ValidationError('An account with this email address already exists')", "def validate_email(self, email):\n if email and email_address_exists(email):\n raise serializers.ValidationError(\n \"A user is already registered with this e-mail address.\")\n\n return email", "def validate_email(self, email):\n email = email.lower()\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError('Email already registered.')\n return email", "def clean_email(self):\n if self.data.get(\"selected_item\") != self.AGENT_ID:\n # resume normal invite flow\n return super().clean_email()\n\n email = self.cleaned_data[\"email\"]\n email = get_invitations_adapter().clean_email(email)\n try:\n self._agent_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n return super().clean_email()\n\n if self._agent_user.account_type != AccountType.agent_user.value:\n raise forms.ValidationError(\n _(\"An active non-agent user is using this e-mail address\")\n )\n if self._agent_user.organisations.filter(\n id=self.instance.organisation.id\n ).exists():\n raise forms.ValidationError(\n _(\"This agent is already active for this organisation\")\n )\n\n return email", "def clean_email(self):\n e = self.cleaned_data['email']\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n pass\n # msg = 'This email is not associated with an account'\n # raise forms.ValidationError(msg)\n return e", "def validate_unique(self, exclude=None):\n MyUser.objects.filter(email=self.email, is_active=False).delete()\n super(MyUser, self).validate_unique(exclude)", "def unique_email_validator(form, field):\n\n user_email = Clients.objects(\n Q(email=field.data) & Q(type_client=form.type_client.data)\n ).count()\n\n if form.id.data:\n user_manager = Clients.objects().get(id=form.id.data)\n\n if field.data != user_manager.email and user_email >= 1:\n raise wtf.ValidationError(\"Adresse Courriel existant\")\n else:\n if user_email >= 1 and field.data:\n raise wtf.ValidationError(\"Adresse Courriel existant\")", "def existing_email(cls, email):\n user_db = User.get_by('email', email)\n if not user_db:\n raise ValueError('This email is not in our database.')\n return email", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(\"A user with that email already exists.\"))", "def validate_email(self, email):\n if email is None:\n raise ValueError(\"Missing email value\")\n elif type(email) is not str:\n raise ValueError(\"Invalid email value, expect str\")\n\n normalized_email = self.normalize_email(email)\n\n existing_email = \\\n self.model.objects.filter(email=normalized_email).first()\n\n if existing_email:\n raise Exception(\"This email is already assigned to another User\")\n\n return normalized_email", "def clean_email(self):\n # NOTE: all emails are stored in lower-case\n e = self.cleaned_data['email'].lower()\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n msg = 'This email is not associated with an account'\n raise forms.ValidationError(msg)\n return e", "def clean_email_address(self):\n c_d = self.cleaned_data\n if User.objects.exclude(id=c_d['id']).filter(\n email=c_d['email_address']):\n raise forms.ValidationError(u'The email is already registered.')\n return c_d['email_address']", "def clean_email(self):\n email = self.cleaned_data.get('email')\n email_exists = User.objects.filter(email=email).exists()\n\n if email_exists:\n raise forms.ValidationError(\n 'El correo ya existe en nuestros registros.'\n )\n\n return email", "def testUsernameAlreadyThere(self):\r\n email = 'testing@gmail.com'\r\n new_user = UserMgr.signup_user(email, u'invite')\r\n DBSession.add(new_user)\r\n\r\n transaction.commit()\r\n\r\n user = DBSession.query(User).filter(User.username == email).one()\r\n\r\n url = quote('/{0}/reset/{1}'.format(\r\n user.email,\r\n user.activation.code\r\n ))\r\n\r\n res = self.app.post(\r\n url,\r\n params={\r\n 'password': u'testing',\r\n 'username': user.username,\r\n 'code': user.activation.code,\r\n 'new_username': u'admin',\r\n })\r\n self.assertIn('Username already', res.body)", "def clean(self):\n cleaned_data = super().clean()\n email = cleaned_data.get('email')\n is_subscribed = Subscriber.objects \\\n .filter(email__iexact=email, status=Status.SUBSCRIBED, mailing_list=self.mailing_list) \\\n .exists()\n if is_subscribed:\n email_validation_error = ValidationError(\n gettext('The email address \"%(email)s\" is already subscribed to this list.'),\n params={'email': email},\n code='already_subscribed_error'\n )\n self.add_error('email', email_validation_error)\n return cleaned_data", "def email_exists(self, input_email):\n # SQL query\n query = u\"SELECT * FROM tbl_users WHERE email = %s;\"\n inputs = input_email\n all_users = get_query(query, inputs)\n #checks if list is empty\n if all_users:\n for find_email in all_users:\n if find_email['email'] == inputs:\n return True\n return False", "def _check_if_duplicate_subject_identifier(self, using):\n if not self.pk and self.subject_identifier:\n if self.__class__.objects.using(using).filter(subject_identifier=self.subject_identifier):\n raise IdentifierError('Attempt to insert duplicate value for '\n 'subject_identifier {0} when saving {1} '\n 'on add.'.format(self.subject_identifier, self))\n else:\n if self.__class__.objects.using(using).filter(\n subject_identifier=self.subject_identifier).exclude(pk=self.pk):\n raise IdentifierError('Attempt to insert duplicate value for '\n 'subject_identifier {0} when saving {1} '\n 'on change.'.format(self.subject_identifier, self))\n self.check_for_duplicate_subject_identifier()", "def clean_email(self):\n\t\temail = self.cleaned_data['email']\n\t\tif User.objects.filter(email=email):\n\t\t\traise forms.ValidationError('Ya existe un email igual en la db.')\n\t\treturn email", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "def test_duplicate_email(self):\n params = {\n 'first_name': \"David\",\n 'last_name': \"Smith\",\n 'password': '******',\n 'email': \"david.smith@mom.com\",\n 'phone_number': \"012-345-6789\"\n }\n self.register(params)\n response = self.register(params)\n self.assertEqual(response.status_code, 400)\n self.assertDictContainsSubset({'message': \"Phone number/email already exists\"}, response.json())", "def test_register_duplicate_email():\n app = create_ctfd()\n with app.app_context():\n register_user(app, name=\"user1\", email=\"user1@ctfd.io\", password=\"password\")\n register_user(app, name=\"user2\", email=\"user1@ctfd.io\", password=\"password\")\n team_count = app.db.session.query(app.db.func.count(Teams.id)).first()[0]\n assert team_count == 2 # There's the admin user and the first created user\n destroy_ctfd(app)", "def test_email_address_uniqueness(self):\n # create a user with an email address first\n user = User.objects.create_user('mike', 'mike@example.com', '2secret')\n\n # test against User.email\n # test a unique email address\n form = forms.EmailAddressForm(user=user, data={'email': 'john@example.com'})\n self.failUnless(form.is_valid())\n\n # test a duplicated email address\n form = forms.EmailAddressForm(user=user, data={'email': 'mike@example.com'})\n self.failIf(form.is_valid())\n self.assertEqual(form.errors['email'],[u\"This email address already in use.\"])\n\n # test against EmailAddress.email\n email = EmailAddress(**{'user': user, 'email': 'alvin@example.com'})\n email.save()\n \n # test a duplicated email address\n form = forms.EmailAddressForm(user=user, data={'email': 'alvin@example.com'})\n self.failIf(form.is_valid())\n self.assertEqual(form.errors['email'],[u\"This email address already in use.\"])\n\n # test a unique email address\n form = forms.EmailAddressForm(user=user, data={'email': 'sam@example.com'})\n self.failUnless(form.is_valid())", "def test_already_existing_user(self):\n self.user.registration(\n \"Githeri\", \"githeri.man@yahoo.com\", \"iwantgitheri\", \"iwantgitheri\")\n msg = self.user.registration(\"Githeri\",\n \"githeri.man@yahoo.com\",\n \"iwantgitheri\",\n \"iwantgitheri\")\n self.assertEqual(msg, \"Your Account Already Active. Proceed to login\")", "def check_employee_emails(self, pr_number):\n pr = self.repo.get_pull(pr_number)\n email_pattern = re.compile(r'^.*@suse\\.(com|cz|de)$')\n\n for commit in pr.get_commits():\n sha = commit.sha\n author = commit.author\n # Not sure why we need to use the nested commit for the email\n email = commit.commit.author.email\n user_id = f'{author.login}({email})'\n\n if email_pattern.fullmatch(email):\n print(f'Commit {sha} is from SUSE employee {user_id}. Moving on...')\n continue\n\n print(f'Checking if {user_id} is part of the SUSE organization...')\n\n if self.org.has_in_members(commit.author):\n print(f'{user_id} is part of SUSE organization but a SUSE e-mail address was not used for commit: {sha}')\n sys.exit(1)\n\n print(f'PR-{pr_number} commit email(s) verified.')", "def email_exists(form, field):\n if User.select().where(User.email == field.data).exists():\n raise ValidationError('A user with that E-mail already exists.')", "def notify_already_registered(email):\n notifications_client = NotificationsAPIClient(settings.GOV_NOTIFY_API_KEY)\n\n notifications_client.send_email_notification(\n email_address=email,\n template_id=settings.GOV_NOTIFY_ALREADY_REGISTERED_TEMPLATE_ID,\n personalisation={\n 'login_url': (settings.SSO_BASE_URL + reverse('account_login')),\n 'password_reset_url': (settings.SSO_BASE_URL + reverse('account_reset_password')),\n 'contact_us_url': urls.domestic.CONTACT_US,\n },\n )", "def test_existing_email(self):\n response = self.client.post(\n self.reset_password_url, {\"email\": \"joel@gmail.com\"}, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertEqual(response.data['detail'], \"Not found.\")", "def test_add_user_duplicate_email(self):\n with self.client:\n auth_headers = login_test_user(self.client)\n payload = json.dumps(dict(email=\"neilb14@mailinator.com\",username=\"neilb14\",password=\"password123\"))\n self.client.post('/users',\n data = payload,\n content_type='application/json',\n headers = auth_headers\n )\n response = self.client.post('/users',\n data = payload,\n content_type='application/json',\n headers = auth_headers\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertIn('User already exists', data['message'])\n self.assertIn('fail', data['status'])", "def change_email(self, token):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n if data.get(\"user_id\") != self.id:\n return False\n new_email = data.get(\"new_email\")\n if new_email is None:\n return False\n # check to see if another user has this email\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = data.get(\"new_email\")\n db.session.add(self)\n return True", "def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)", "def test_nonexist_email_password_reset(self):\r\n\r\n bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + \"makeItFail\"})\r\n bad_email_resp = password_reset(bad_email_req)\r\n # Note: even if the email is bad, we return a successful response code\r\n # This prevents someone potentially trying to \"brute-force\" find out which\r\n # emails are and aren't registered with edX\r\n self.assertEquals(bad_email_resp.status_code, 200)\r\n obj = json.loads(bad_email_resp.content)\r\n self.assertEquals(obj, {\r\n 'success': True,\r\n 'value': \"('registration/password_reset_done.html', [])\",\r\n })", "def test_register_already_exist(self):\n response_decoded_json = requests.post(URL_AUTH['url_register'],\n data=json.dumps(AUTH_PAYLOADS['payload_user']),\n headers=HEADER['header'])\n mes = response_decoded_json.json()\n assert \"Email already exists in database\" == mes, \"There is no verification of existing email on register\"\n assert 400 == response_decoded_json.status_code, \"You have BAD REQUEST\"", "def test_manage_user_without_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg", "def validate_email(request):\r\n # get submitted email.\r\n email = request.GET.get('email', None)\r\n try:\r\n # check if an account with this email already exists, in case of editing user's profile.\r\n is_email_taken = User.objects.filter(email__iexact=email).exclude(email__iexact=request.user.email).exists()\r\n except: \r\n # check if an account with this email already exists, in case of registering new user.\r\n is_email_taken = User.objects.filter(email__iexact=email).exists()\r\n data = {'is_email_taken':is_email_taken}\r\n if data['is_email_taken']:\r\n data['error_message'] = 'An account with this Email already exists.'\r\n return JsonResponse(data)", "def validate_email(request):\r\n # get submitted email.\r\n email = request.GET.get('email', None)\r\n try:\r\n # check if an account with this email already exists, in case of editing user's profile.\r\n is_email_taken = User.objects.filter(email__iexact=email).exclude(email__iexact=request.user.email).exists()\r\n except: \r\n # check if an account with this email already exists, in case of registering new user.\r\n is_email_taken = User.objects.filter(email__iexact=email).exists()\r\n data = {'is_email_taken':is_email_taken}\r\n if data['is_email_taken']:\r\n data['error_message'] = 'An account with this Email already exists.'\r\n return JsonResponse(data)", "def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email=email):\n raise forms.ValidationError('Ya existe un email igual al registrado.')\n return email", "def is_valid_email_address(self, addr):\n\t\t# the call is blocking, so only syntactic analysis performed\n\t\t# To check if the SMTP server exists change check_mx to True\n\t\t# to check if email address exists change verify to true\n\t\treturn addr is not None and validate_email(addr, verify=False, check_mx=False)", "def check_duplicate(self, state):\n pass", "def user_exists(email):\n data = [i['email'] for i in Data.users if email == i['email']]\n return \"\".join(data) == email", "def mark_email(args):\n cache.get_default().set_email(args.address, args.is_valid)\n print('{!r:} marked as {:s}valid.'.format(args.address, '' if args.is_valid else 'in'))", "def user_exists(self, email):\n user = UserModels.fetch_user_by_email(email)\n if user:\n return {\n \"status\": 400,\n \"error\": \"That email already exists\"\n }", "def test_user_cannot_register_twice(self):\n self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n response2 = self.client.post(SIGNUP_URL,\n data=json.dumps(self.user_data), content_type='application/json')\n self.assertEqual(response2.status_code, 203)\n result = json.loads(response2.data.decode())\n self.assertEqual(result[\"message\"], \"User already exists\")", "def test_manage_user_with_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook',\r\n email='f@f.com', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg" ]
[ "0.7703849", "0.69526696", "0.6862401", "0.6811217", "0.6774223", "0.67012995", "0.6641258", "0.6557139", "0.65535825", "0.6535207", "0.6535207", "0.6481211", "0.64502555", "0.63732386", "0.63573235", "0.63533545", "0.633444", "0.63318545", "0.63180923", "0.6309124", "0.6306436", "0.6282732", "0.62824756", "0.6279812", "0.62739724", "0.62636673", "0.625805", "0.62515044", "0.62441653", "0.62290955", "0.6226677", "0.6221131", "0.62125987", "0.62088954", "0.61918867", "0.61673534", "0.614815", "0.61322063", "0.6111506", "0.6101157", "0.60860854", "0.60777754", "0.6064864", "0.6045973", "0.6043651", "0.6034711", "0.59911066", "0.59777313", "0.597424", "0.5951642", "0.5943892", "0.59431183", "0.59383607", "0.5910907", "0.5906561", "0.59026325", "0.59019905", "0.5896201", "0.5885157", "0.58556277", "0.58235234", "0.5817198", "0.5816955", "0.5807606", "0.5805399", "0.5793238", "0.5785018", "0.57810044", "0.57711625", "0.57495177", "0.57350606", "0.5734991", "0.57261795", "0.5697986", "0.56963664", "0.5695616", "0.5693217", "0.56906676", "0.5687015", "0.5684032", "0.568099", "0.567996", "0.566248", "0.5630092", "0.5629195", "0.562533", "0.5620912", "0.56164724", "0.5610619", "0.5593719", "0.5584622", "0.5584622", "0.55794317", "0.557854", "0.5571808", "0.55716705", "0.5569154", "0.5565178", "0.55579716", "0.55572283" ]
0.6098911
40
Checks that the passwords are the same
def clean_password2(self): password1 = self.cleaned_data.get('password1', '') password2 = self.cleaned_data['password2'] if password1 != password2: raise forms.ValidationError('The passwords did not match.') return password2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_passwords_match(self, password1, password2):\n return password1 == password2", "def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)", "def check_pass(self):\n if self.validated_data['new_password'] != self.validated_data['confirm_password']:\n raise serializers.ValidationError({\"error\":\"Please enter matching passwords\"})\n return True", "def check_password(password1: str, password2: str) -> bool:\n if password1 == password2:\n return True\n else:\n raise ValueError('Пароли не совпадают')", "def PasswordMatch(self, *args):\n pass1 = self.password.get().lstrip().rstrip()\n pass2 = self.confirm_pass.get().lstrip().rstrip()\n \n if (pass1 and pass1 == pass2):\n self.pass_match_label['text'] = 'Passwords match'\n self.pass_match_label['fg'] = 'green'\n return True\n else:\n self.pass_match_label['text'] = 'Password don\\'t match'\n self.pass_match_label['fg'] = 'red'\n return False", "def test_if_pwd_equals_confirmed(self):\n msg = self.user.registration(\"Githeri\", \"githeri.man@yahoo.com\",\n \"iwantgitheri\",\n \"iwantsgitheri\")\n self.assertEqual(msg, \"Your passwords should match\")", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def clean_password(self):\n password1 = self.cleaned_data.get('password1')\n password2 = self.cleaned_data.get('password2')\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError('Passwords do not match')\n return password2", "def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contrasenas no coinciden.')\n\t\treturn repetir_password", "def check_password(self, password):\n return self.password == password", "def change_password_tests(new_password1, new_password2):\n error = None\n if not new_password1 == new_password2:\n error = \"New passwords do no match\"\n elif new_password1 in common_passwords:\n error = \"New password is frequently used. Please use another password.\"\n elif not is_complex(new_password1):\n error = \"New password not complex enough\"\n\n return error", "def clean_password2(self):\n if self.clean_data.get('password1', None) and self.clean_data.get('password2', None) and \\\n self.clean_data['password1'] == self.clean_data['password2']:\n return self.clean_data['password2']\n raise forms.ValidationError(u'You must type the same password each time')", "def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contraseñas no coinciden.')\n\t\treturn repetir_password", "def clean_password_repeat(self):\n if 'password' in self.cleaned_data and 'password_repeat' in self.cleaned_data:\n if self.cleaned_data['password'] != self.cleaned_data['password_repeat']:\n raise forms.ValidationError('The password fields didn\\'t match: Password confirmation failed.')\n return self.cleaned_data['password_repeat']", "def clean_password_repeat(self):\n password = self.cleaned_data.get(\"password\")\n password_repeat = self.cleaned_data.get(\"password_repeat\")\n if password != password_repeat:\n raise ValidationError(_(\"Passwords don't match\"))\n return self.cleaned_data.get(\"password_repeat\")", "def password_is_valid_task_2(row):\n # XOR the two positions in the password\n return (row['letter'] == row['password'][row['policy'][0] - 1]) != \\\n (row['letter'] == row['password'][row['policy'][1] - 1])", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def clean(self):\r\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\r\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\r\n raise forms.ValidationError(_(u'You must type the same password each time'))\r\n return self.cleaned_data", "def test_user_logged_in_new_passwords_match_but_too_short(self):\n form_data = {\n \"old_password\": self.password,\n \"new_password\": \"pw\",\n \"new_password2\": \"pw\"\n }\n login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(login)\n post_response = self.client.post(self.change_password_url, form_data)\n form = post_response.context.get('form')\n self.assertEqual(post_response.status_code, 200)\n self.assertContains(post_response, \"Change Your Password\")\n self.assertIsInstance(form, ChangePasswordForm)\n self.client.logout()\n re_login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(re_login)", "def match(self,pwdmarked,password):\n pwd1 = self.cleanPassword(pwdmarked)\n pwd2 = self.cleanPassword(password)\n if not (pwdmarked or '').startswith('plain:{'):\n pwd2 = crypt(password,self.settings.authenSalt,10000)\n return pwd1==pwd2", "def clean_password1(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data['password1']", "def validate_password_repeat(form, field) -> None:\n if field.data != form.password.data:\n raise ValidationError('The password needs to match the new password')", "def passwords_match_2(cls, v: Any, values: Any, **kwargs: Any) -> Any:\n if not values.get(\"password_new_1\") or v != values[\"password_new_1\"]:\n raise ValueError(\"passwords do not match\")\n return v", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(u'You must type the same password each time')\n return self.cleaned_data", "def test_password_match(self):\r\n\r\n tst = User()\r\n tst._password = self.test_hash\r\n\r\n self.assertTrue(\r\n tst._password == self.test_hash, \"Setting should have hash\")\r\n self.assertTrue(\r\n tst.password == self.test_hash, \"Getting should have hash\")\r\n self.assertTrue(\r\n tst.validate_password(self.test_password),\r\n \"The password should pass against the given hash: \" + tst.password)", "def clean_paswword(self):\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Los password no coinciden.')\n return password", "def verify_match(password, verify):\n return password == verify", "def correct_password(username, password, db):\n\tquery = db((db.User.username == username) & (db.User.password == password))\n\treturn query.count() > 0", "def clean(self):\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data", "def clean(self):\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data", "def clean(self):\n if 'password' in self.cleaned_data and 'password_repeat' in self.cleaned_data:\n if self.cleaned_data['password'] != self.cleaned_data['password_repeat']:\n raise forms.ValidationError('The two password fields didn\\'t match.')\n return self.cleaned_data", "def test_check_password(self):\n user = User.query.filter_by(username='eschoppik').first()\n self.assertTrue(bcrypt.check_password_hash(user.password, 'secret'))\n self.assertFalse(bcrypt.check_password_hash(user.password, 'notsecret'))", "def password_validation(pass1,pass2):\n errors = []\n if(pass1 != pass2):\n errors.append(\"Lösenorden matchade inte.\")\n if(len(pass1) < 3):\n errors.append(\"Lösenordet måste vara längre än 3 bokstöver.\")\n \n return errors", "def clean_password2(self):\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Las Claves no coinciden.')\n return password2", "def check_password(self, password):\n\n\t\twith self.lock:\n\t\t\tassert ltrace(TRACE_USERS, 'comparing 2 crypted passwords:\\n%s\\n%s' % (\n\t\t\t\tself.__userPassword,\n\t\t\t\tself.backend.compute_password(password, self.__userPassword)))\n\n\t\t\treturn self.__userPassword == self.backend.compute_password(\n\t\t\t\t\t\t\t\t\t\t\t\tpassword, self.__userPassword)", "def validate_password_repeat(form: CompleteSignupForm, field: PasswordField) -> None:\n if field.data != form.password.data:\n raise ValidationError('The password needs to match the new password')", "def verify_password(self, username, password):\n\n try:\n self.c.execute('SELECT password FROM profiles WHERE name=(?)', (username,))\n\n db_pw = self.c.fetchone()[0]\n print(password)\n\n return db_pw == password\n\n except TypeError:\n return False", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields are different. Please enter the same password \"\n \"in both fields.\"))\n return self.cleaned_data", "def test_reset_password_fails_for_similar_passwords(self):\n self.test_client.post(\n \"/api/v1/auth/register\", data=self.user_data)\n\n resp = self.test_client.post(\n \"/api/v1/auth/login\",\n data=self.user_data)\n data = json.loads(resp.data)\n\n # reset-password should pass provided the new password\n # is not similar to the old saved password\n token = data['token']\n resp = self.test_client.post(\n \"/api/v1/auth/reset-password\",\n headers=dict(Authorization=f\"Bearer {token}\"),\n data={'password': '!0ctoPus', 'confirm password': '!0ctoPus'}\n )\n\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(data[\"status\"], \"failure\")\n self.assertEqual(\n data[\"message\"],\n \"Your new password should not be similar to your old password\")", "def validatePwd(self,pwd2):\n\n if self.pwd==pwd2:\n self.__encryptPwd = bcrypt.hashpw(self.pwd.encode('utf-8'),\n bcrypt.gensalt())\n return True\n else:\n return False", "def clean_password_again(self):\n if 'password' in self.cleaned_data:\n password = self.cleaned_data['password']\n password_again = self.cleaned_data['password_again']\n\n if password == password_again:\n return password\n else:\n return None\n\n raise forms.ValidationError('Passwords do not match.')", "def pass_check(user_found):\n password = ''\n while password != user_found[1]:\n password = stdiomask.getpass(prompt=\"Please enter your password: \", mask='*')\n pass1 = encrypter.encrypt_password(password)\n if user_found[1] == pass1:\n return \"\\nPassword match\\n\"\n else:\n print(\"\\nPassword do not match\\n\")", "def clean_password2(self):\n password1 = self.cleaned_data.get('password1', '')\n password2 = self.cleaned_data['password2']\n if not password1 == password2:\n raise forms.ValidationError(\"The two passwords didn't match.\")\n return password2", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] == self.cleaned_data['password2']:\n return self.cleaned_data['password2']\n raise forms.ValidationError(_(u'You must type the same password each time'))", "def test_uniqueness(self):\n passwords = tuple(generate_password(8) for i in range(100))\n self.assertEqual(len(passwords), len(set(passwords)))", "def __is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n return hmac.compare_digest(\n pw_hash, hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n )", "def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n return password2", "def check_password(self, username, password): # tested\r\n conn = self.get_db()\r\n with conn:\r\n c = conn.cursor()\r\n sql = ('select password from gameuser where username=%s')\r\n c.execute(sql,(username,))\r\n hashedpass = md5.new(password).hexdigest()\r\n u = c.fetchone()\r\n if u == None:\r\n raise NoUserExistsException(username)\r\n # print 'database contains {}, entered password was {}'.format(u[0],hashedpass)\r\n return u[0] == hashedpass", "def test_pbkdf2_sha256_password_reuse(self):\r\n user = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # students need to user at least one different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"different\"))\r\n self._change_password(user, \"different\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n\r\n # staff needs to use at least two different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self._change_password(staff, \"different\")\r\n\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"third\"))\r\n self._change_password(staff, \"third\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))", "def passwords_match_1(cls, v: Any, values: Any, **kwargs: Any) -> Any:\n if not values.get(\"password_old\"):\n raise ValueError(\"old password is required\")\n return v", "def test_user_logged_in_new_passwords_match_but_no_number(self):\n form_data = {\n \"old_password\": self.password,\n \"new_password\": \"testpassword\",\n \"new_password2\": \"testpassword\"\n }\n login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(login)\n post_response = self.client.post(self.change_password_url, form_data)\n form = post_response.context.get('form')\n self.assertEqual(post_response.status_code, 200)\n self.assertContains(post_response, \"Change Your Password\")\n self.assertIsInstance(form, ChangePasswordForm)\n self.client.logout()\n re_login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(re_login)", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] and self.cleaned_data['password2']:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError('The password fields didn\\'t match: Password confirmation failed.')\n return self.cleaned_data['password2']", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test06_password_mixture(self):\n self.set_complexity(length=14, numeric=1, upper=1, lower=1, special=1)\n\n invalid = (\n \"A\",\n \"ACBDEabcde!!!!\",\n \"Tr0ub4dor&3\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n \"1234;.,/]1234\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"Sixteenchars12@_\",\n \"thisis4reallybadPassword!\",\n \"C0rrecthorsebatteryst@ple\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'Password1234\\'\"\"\"\"\"',\n )\n self.set_passwords(valid)", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] and self.cleaned_data['password2']:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError('The two password fields didn\\'t match.')\n return self.cleaned_data", "def old_password_check(form, field):\n old_password = field.data\n password = current_user.password\n r = pwd_context.verify(old_password, current_user.password)\n if not r:\n raise validators.ValidationError('old password is wrong')", "def test_accounts_password_reuse(self):\r\n user = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # students need to user at least one different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"different\"))\r\n self._change_password(user, \"different\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n\r\n # staff needs to use at least two different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self._change_password(staff, \"different\")\r\n\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"third\"))\r\n self._change_password(staff, \"third\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))", "def test_invalid_password(self):\n pass", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data", "def verify_password(self, password):\n return self.PASSWORD == password", "def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )", "def check_entry_password(username, password, entry_password):\n if entry_password.startswith('$apr1$'):\n salt = entry_password[6:].split('$')[0][:8]\n expected = apache_md5crypt(password, salt)\n elif entry_password.startswith('{SHA}'):\n import sha\n expected = '{SHA}' + sha.new(password).digest().encode('base64').strip()\n else:\n import crypt\n expected = crypt.crypt(password, entry_password)\n return entry_password == expected", "def verify_password(self, password):\n return self.PASS == password", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data['password2']", "async def check_password(self, login, password):", "def test_password_and_confirm_not_match(self):\n user = {\n 'email': 'not_a_user@example.com',\n 'password': 'nope',\n 'confirm_password': 'nope2'\n }\n # register user with the bad password match\n result = self.client().post(AuthTestCase.registration, data=user)\n # returns a bad request with appropriate message\n self.assertEqual(result.status_code, 400)\n self.assertIn(\"password and confirm_password have to match\", str(result.data))", "def verify_password(stored_password, provided_password):\n #print(provided_password)\n salt = stored_password[:64]\n stored_password = stored_password[64:]\n pwdhash = hashlib.pbkdf2_hmac('sha512', \n provided_password.encode('utf-8'), \n salt.encode('ascii'), \n 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n #print(pwdhash)\n return pwdhash == stored_password", "def test_old_password_login_check(self):\n old_password = self.user['password1']\n self.change_password()\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': old_password})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def clean(self):\n cleaned_data = super(RegistroForm, self).clean()\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(\"Passwords don't match. Please enter both fields again.\")\n return self.cleaned_data", "def check_password(self, author, password):\n return author.hashed_password == generate_hashed_passwd(password, author.hashed_password)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n pw_hash = get_pw_hash(cur, session['username'])\n if not check_password(self.old_password.data, pw_hash):\n self.old_password.errors.append('Did not find a match.')\n return False\n\n return True", "def valid_pwd(name, password, h):\n salt = h.split(',')[0]\n return h == make_pwd_hash(name, password, salt)", "def test_equality(cleartextpw, cryptedpw=\"\"):\n if not cryptedpw:\n return crypt.crypt(cleartextpw, '$6${}$'.format(salt(83)))\n else:\n if cryptedpw == 'x' or cryptedpw == '*':\n raise NotImplementedError(\n \"Sorry, currently no support for shadow passwords\")\n\n return crypt.crypt(cleartextpw, cryptedpw) == cryptedpw", "def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))", "def checkPassword(self, password):\n if password is None:\n return False\n if self.hashed:\n (salt, _) = self.password.split('|')\n return (self.password == utils.saltHash(password, salt=salt))\n else:\n return (self.password == password)", "def correct_password(name, password):\n if not User.created(name):\n return False\n user = User.get_user(name)\n return user.info['password'] == password", "def matches_password_verify(password, verify):\n if password and not password == verify:\n return \"Your passwords didn't match.\"\n else:\n return \"\"", "def password_is_correct(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def check_password(self, password: str) -> bool:\n\n hash_of_given_password = hashlib.new('md5', bytes(password, encoding='utf8'))\n return hash_of_given_password.hexdigest() == self.hashed_password", "def clean(self):\n cleaned_data = super(RegisterForm, self).clean()\n password = cleaned_data.get(\"password\")\n confirm_password = cleaned_data.get(\"confirm_password\")\n if password != confirm_password:\n raise forms.ValidationError(\n \"The passwords entered do not match.\"\n )", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def clean(self):\n cleaned_data = super(MyRegistrationForm, self).clean()\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(\"Passwords don't match. Please enter both fields again.\")\n return self.cleaned_data", "def test_valid_password(self):\n newpass = 'Just Complex Enough'\n m = hashlib.sha512()\n m.update(newpass.encode('utf-8'))\n m.update(self.request.user.salt)\n hashed = m.digest()\n self.request.json_body = deepcopy(self.good_dict)\n self.assertNotEqual(self.request.user.password, hashed)\n self.request.json_body['password'] = newpass\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, dict_from_row(self.request.user, remove_fields=removals))\n self.assertEqual(self.request.user.password, hashed)", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def is_correct_user(self, login, password):\n pass", "def clean_password_new_again(self):\n if 'password_new' in self.cleaned_data:\n password_new = self.cleaned_data['password_new']\n password_new_again = self.cleaned_data['password_new_again']\n\n if password_new == password_new_again:\n return password_new\n else:\n return None\n\n raise forms.ValidationError('Passwords do not match.')", "def is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n return hmac.compare_digest(\n pw_hash, hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n )", "def validate_password(self, password):\n return self._password == encrypt_password(password,\n b64decode(str(self._salt)))", "def _check_password(self, body):\n if not self.config.security_initialize:\n raise RuntimeError(\"First set a password\")\n\n password = hash_password(body[ATTR_PASSWORD])\n if password != self.config.security_password:\n raise RuntimeError(\"Wrong password\")", "def clean_new_password2(self):\n password1 = self.cleaned_data.get('new_password1')\n password2 = self.cleaned_data.get('new_password2')\n if password1 and password2:\n validate_password(password1, self.instance)\n if password1 != password2:\n self.add_error('new_password2',\n _(\"The two password fields didn't match.\"))\n else:\n self.change_password = True\n return password2", "def verify_password(stored_passwd, provided_passwd):\n salt = stored_passwd[:64]\n stored_password = stored_passwd[64:]\n pwdhash = hashlib.pbkdf2_hmac(\n 'sha512', provided_passwd.encode('utf-8'), salt.encode('ascii'), 100000\n )\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def test_41_password_change(self):\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': password,\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n assert \"Yay, you changed your password succesfully!\" in res.data, res.data\r\n\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': \"wrongpassword\",\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Your current password doesn't match the one in our records\"\r\n assert msg in res.data\r\n\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': '',\r\n 'new_password':'',\r\n 'confirm': '',\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Please correct the errors\"\r\n assert msg in res.data", "def passwd_check(request, passphrase):\n import hashlib\n hashed_passphrase = request.registry.settings.get('phoenix.password', u'')\n \n try:\n algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)\n except (ValueError, TypeError):\n return False\n\n try:\n h = hashlib.new(algorithm)\n except ValueError:\n return False\n\n if len(pw_digest) == 0:\n return False\n\n try:\n h.update(passphrase.encode('utf-8') + salt.encode('ascii'))\n except:\n return False\n\n return h.hexdigest() == pw_digest", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def check_password(self, password):\n check = False\n if self.password is not None:\n logger.critical('Old style password exists.')\n if check_password_hash(self.password, password):\n self.passwd = password\n self.password = None\n db.session.add(self)\n db.session.commit()\n logger.critical('Old style password replaced.')\n else:\n return check\n try:\n check = bcrypt.check_password_hash(self._password.encode('utf8'), password.encode('utf8'))\n except:\n logger.critical('Error in password check.')\n finally:\n return check", "def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))" ]
[ "0.77910745", "0.7613175", "0.7546211", "0.7338358", "0.7300746", "0.7270298", "0.7236492", "0.7210182", "0.7177891", "0.716393", "0.71617705", "0.715105", "0.7121701", "0.7119943", "0.70778507", "0.70771366", "0.7053473", "0.704067", "0.70307827", "0.70246506", "0.7013039", "0.7005679", "0.6986126", "0.6985254", "0.6982743", "0.6977606", "0.697296", "0.6959909", "0.6945861", "0.6945861", "0.694555", "0.6942421", "0.6935249", "0.69277793", "0.6919702", "0.69102407", "0.69059867", "0.6902924", "0.69002694", "0.6894756", "0.6890408", "0.68853015", "0.68849874", "0.687659", "0.68739295", "0.6858424", "0.6857962", "0.68505114", "0.684667", "0.68370324", "0.68130904", "0.6799669", "0.6763958", "0.6760015", "0.6758928", "0.67426425", "0.673988", "0.6735125", "0.6735007", "0.6735007", "0.6728473", "0.67241323", "0.6717947", "0.6706123", "0.67027634", "0.6698876", "0.6687866", "0.6680093", "0.66782576", "0.66770697", "0.6670912", "0.6665151", "0.66635", "0.6661842", "0.66340345", "0.6632052", "0.6631455", "0.662822", "0.6614664", "0.6608855", "0.6602591", "0.66006523", "0.65950763", "0.6590053", "0.6582553", "0.65778524", "0.65739775", "0.65612376", "0.6552006", "0.65473217", "0.65408677", "0.6533973", "0.6532722", "0.6532722", "0.6530178", "0.6528909", "0.6522996", "0.6518685", "0.6512728" ]
0.6925312
34
Creates a User object (it will be inactive)
def create_user(self): if not self.is_valid(): return None # generate a username ids = User.objects.values_list('id', flat=True).order_by('-id')[:1] if len(ids) > 0: # ids[0] will be the maximum value (due to order_by: '-id') idnum = ids[0] + 1 else: idnum = 1 # create User object username = "user%s" % idnum # NOTE: store email in lower case email = self.clean_email().lower() password = self.clean_password2() user = User(username=username, email=email, password='tmp') user.save() # set the real password user.set_password(password) # make user inactive (until user has confirmed account) user.is_active = False # update user.save() return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def create_user(self):\n return User.objects.create_user(**self.user_data)", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_new_user(self):\n username = 'pseudo'\n email = 'carole@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='test_email@example.com', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u", "def create_user(self, **kwargs):\n\n user = self.user_model(**self._prepare_create_user_args(**kwargs))\n return self.put(user)", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def create_user(self, **kwargs):\n kwargs = self._prepare_create_user_args(**kwargs)\n user = self.user_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(user)", "def create_user(uname,password):\n new_user = User(uname,password)\n return new_user", "def create_user(first_name,last_name,email,password):\n\n\tnew_user = User(first_name,last_name,email,password)\n\treturn new_user", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(self):\n return UserFactory.create()", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def user():\n\n user = User.objects.create(name='Janek', surname='Kowalski',\n internal_id='PUHgjdJ', is_administrator=True,\n is_payment_creator=True, is_payment_approver=False,\n can_delete_payment=True)\n return user", "def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)", "def create_user(self, password=None, **data):\n\n user = self.model(**data)\n user.set_password(password)\n user.is_active = False\n user.save()\n\n return user", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def create_user():\n body = request.get_json(silent=True)\n if body is None:\n abort(400, jsonify(error=\"Not a JSON\"))\n if 'email' not in body:\n abort(400, jsonify(error=\"Missing email\"))\n if 'password' not in body:\n abort(400, jsonify(error=\"Missing password\"))\n user = models.user.User(**body)\n models.storage.new(user)\n models.storage.save()\n return make_response(jsonify(user.to_dict()), 201)", "def create_user(user_name: str):\n user = User()\n user.username = user_name\n user.save()\n return user", "def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def create_user(email, password, fname, lname):\n\n user = User(email=email, password=password, fname=fname, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def _create_user(self, email, password, first_name, last_name, **extra_fields):\n now = timezone.now()\n email = self.normalize_email(email)\n user = self.model(email=email,\n first_name=first_name,\n last_name=last_name,\n is_active=True,\n last_login=now,\n date_joined=now, **extra_fields)\n\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "def new_user(first_name, sur_name, user_name, email, password):\n new_user = User(first_name, sur_name, user_name, email, password)\n return new_user", "def create_a_user(self, username='fry', email='fry@futur.ama', password='Qwerty!234'):\n user = User.objects.create_user(username, email, password)\n user.save()\n return user", "def _create_user(self,email,password,**extra_fields):\n\t\tif not email:\n\t\t\traise ValueError('The given email must be set')\n\n\t\ttry:\n\t\t\twith transaction.atomic():\n\t\t\t\tuser = self.model(email=email,**extra_fields)\n\t\t\t\tuser.set_password(password)\n\t\t\t\tuser.save(using=self._db)\n\t\t\t\treturn user\n\t\texcept:\n\t\t\traise", "def create_user(self, username, password, email, name):\n\n duplicate_check = User.query.filter_by(username=username).first()\n if duplicate_check is not None:\n return\n user = User(username=username, password=password, email=email, name=name)\n db.session.add(user)\n db.session.commit()", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('Username is required.')\n if not email:\n raise ValueError('Email is required.')\n if not password:\n raise ValueError('Password is required.')\n try:\n with transaction.atomic():\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n print(\"create user\")\n return user", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def create_user(email, password):\n email_used = AuthUser.query.filter_by(email=email).first()\n if email_used:\n return False, \"Email address has already been used\"\n account = Account(email)\n account.plan_key = 'BASIC'\n account.is_active = True\n account.created = datetime.datetime.now()\n db.session.add(account)\n user = AuthUser(email, password, account)\n user.created = datetime.datetime.now()\n db.session.add(user)\n db.session.commit()\n return user.id, None", "def create_user(\n *,\n user_in: schemas.UserCreate,\n) -> schemas.User:\n next_user_id = users[-1].id + 1 # type: ignore\n user = schemas.User(\n id=next_user_id,\n email=user_in.email,\n is_active=user_in.is_active,\n is_superuser=user_in.is_superuser,\n full_name=user_in.full_name,\n )\n users.append(user)\n return user", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def _create_user(self, password, is_active, is_staff, is_superuser, **extra_fields):\n now = timezone.now()\n if not self.model.USERNAME_FIELD:\n raise ValueError('User model must have set USERNAME_FIELD')\n identifier = extra_fields.get(self.model.USERNAME_FIELD)\n if not identifier:\n raise ValueError((\"User's %s must be set\", self.model.USERNAME_FIELD))\n user = self.model(is_active=is_active, is_staff=is_staff, is_superuser=is_superuser,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user():\n try:\n\n user = User(username=request.json.get(\"username\"), score=0,)\n\n user.insert()\n\n response = jsonify({\"success\": True, \"created_user_id\": user.id})\n\n except AttributeError:\n abort(400)\n\n return response", "def create_user(email, password):\n\n user = User(email=email, password=password)\n \n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user():\n body = request.json\n username = body.get('username')\n password = body.get('password')\n validation = validate_user(username, password)\n password = md5(password.encode('utf-8')).hexdigest()\n if validation != \"OK\":\n return HTTPResponse(status=500, body={\"message\":validation})\n try:\n with db.atomic():\n user = User.create(username=username, password=password)\n user.save()\n ret = json.dumps({'message':'user created'})\n return HTTPResponse(status=200, body=ret)\n except IntegrityError:\n ret = json.dumps({'message':'user already exists'})\n return HTTPResponse(status=500, body=ret)", "def create_user(self, instance, **attrs):\n instance = self._get_resource(_instance.Instance, instance)\n return self._create(_user.User, instance_id=instance.id, **attrs)", "def create_user(fname, lname, email, password):\n\n user = User(fname=fname, lname=lname, email=email, password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(username,password):\n return User.objects.create_user(username=username,password=password)", "def create_user():\n try:\n payload = _validatePayload(request)\n timestamp = int(time.time() * 1000)\n user = {\n 'name': payload.get('name'),\n 'email': payload.get('email'),\n 'password': _encodePassword(payload.get('password')),\n 'createdAt': timestamp,\n 'updatedAt': timestamp,\n }\n\n resp = table.put_item(\n Item=user,\n Expected={'email': {'Exists': False}}\n )\n return jsonify(user), 200\n except Exception as e:\n logger.info('ERROR {}'.format(str(e)))\n return _customizeErrorMessage(e)", "def create_user(fname, lname, email, password):\n\n user = User(fname=fname, \n lname=lname, \n email=email, \n password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def new_user(cls, user):\r\n pass", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_user():\n first_name = request.form['first_name'].capitalize()\n last_name = request.form['last_name'].capitalize()\n image_url = request.form['image_url']\n\n new_user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_user(username, password):\n if User.query.filter_by(username=username).first():\n raise RuntimeError(f\"{username} ja esta cadastrado\")\n user = User(username=username, password=password)\n db.session.add(user)\n db.session.commit()\n return user", "def create_user(self, username, password):\n return self.User.objects.create_user(username, password=password)", "def make_new_user():\n\n new_user = User(\n first_name=request.form['first_name'],\n last_name=request.form['last_name'],\n image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_inactive_user(self, username, email, password, first_name=None, last_name=None):\n\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save()\n\n registration_profile = self.create_registration_profile(new_user)\n registration_profile.send_activation_email()\n\n if not registration_profile:\n return None\n\n return new_user", "def sample_user(email='john7ric@mail.com', password='open@123'):\n return get_user_model().objects.create_user(email, password)", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def sample_user(email=\"student@test.com\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)", "def _create_user(self, password, **extra_fields):\n try:\n user = self.model(**extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n except:\n raise ValueError('ValueError: Cannot create new user')", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, username, email, password):\n\t\tnow = datetime.now()\n\t\tif username is None:\n\t\t\traise ValueError('Must include username')\n\t\tif email is None:\n\t\t\traise ValueError('Must include email')\n\t\temail = self.normalize_email(email)\n\t\tuser = self.model(\n\t\t\temail=self.normalize_email(email),\n\t\t\tusername=username,\n\t\t\tdate_joined=now\n\t\t)\n\t\tuser.set_password(password)\n\t\tuser.save(using=self._db)\n\t\treturn user", "def create_user(email, password, home_zipcode):\n\n user = User(email=email, password=password, home_zipcode=home_zipcode)\n db.session.add(user)\n db.session.commit()\n return user", "def create_user(username, email, password):\n return User.objects.create_user(username=username, email=email, password=password)", "def create_user():\r\n data = request.get_json() or {}\r\n print(data)\r\n # some data checks\r\n if 'username' not in data or 'password' not in data:\r\n return bad_request('must include username and password fields')\r\n if User.query.filter_by(username=data['username']).first():\r\n return bad_request('please use a different username')\r\n user = User()\r\n # add user to database\r\n user.add_user(data)\r\n # check that the transaction was successful\r\n res = User.query.filter_by(username=data['username']).one_or_none()\r\n # return added user as query response\r\n if res:\r\n response = jsonify(res.to_dict())\r\n response.status_code = 201\r\n # else return error\r\n else:\r\n response.status_code = 403\r\n response.headers['Location'] = url_for('api.get_user', id=user.id)\r\n return response", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(_('The Email must be set'))\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n extra_fields.setdefault('is_active', True)\n user.save()\n return user", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email='user@example.com', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)", "def create_user(self, email, password=None, full_name=None, active=None, is_hospital=False):\n if not email:\n raise ValueError('Users must have an email address')\n if not password:\n raise ValueError('Users must have password')\n\n user_obj = self.model(\n email=self.normalize_email(email),\n full_name=full_name,\n is_hospital=is_hospital,\n )\n\n user_obj.set_password(password)\n user_obj.save(using=self._db)\n return user_obj", "def _create_user(self, username, email, password, is_staff, is_superuser, first_name, last_name):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email,username=username,\n first_name=first_name, last_name=last_name,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now)\n user.uuid = generate_uuid()\n user.uniqueid = user.uuid[:4]\n user.set_password(password)\n user.save(using=self._db)\n return user", "def sample_user(email='sajal@gmail.com', password='password'):\n return get_user_model().objects.create_user(email, password)", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def sample_user(email='test@gmail.com', password='testpass'):\n return get_user_model().objects.create_user(email, password)", "def _create_user(self, username, password, is_staff, **extra_fields):\n now = timezone.now()\n if not username:\n raise ValueError('The given username must be set')\n user = self.model(username=username,\n is_staff=is_staff, is_active=True,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user_service(username: str, email: str, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user = User(username=username, email=email, password=hashed_password)\n db.session.add(user)\n db.session.commit()", "def create(self, validated_data: dict):\n return User.objects.create_user(**validated_data)", "def createUser(name, email, picture):\n u = User(name=name, email=email, picture=picture)\n session.add(u)\n session.commit()\n print 'User \"' + name + '\" created.'\n return u", "def create_user(self, email, name, phone1, password=None, signed_up=timezone.localtime(),):\n if not email:\n raise ValueError(_('Users must have an email address'))\n\n user = self.model(\n email=self.normalize_email(email),\n name=name,\n phone1=phone1,\n signed_up=signed_up,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n MyUserProfile.objects.create(myuser=user) \n NotifClick.objects.create(myuser=user) \n\n return user", "def create(self,validated_data):\n user_obj = User.objects.create(**validated_data)\n return user_obj", "def new_user(cls, user):\n pass", "def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError(\"The given email must be set\")\n try:\n with transaction.atomic():\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.generate_activation_code()\n user.save(using=self._db)\n return user\n except:\n raise", "def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")", "def create_user(fname, lname, email, username, password, category, country):\n try:\n user = User(fname=fname,\n lname=lname,\n email=email,\n username=username,\n password=password,\n preferred_category_id=category,\n preferred_country_id=country)\n\n db.session.add(user)\n db.session.commit()\n return user\n\n except IntegrityError:\n db.session.rollback()\n return None", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.password = make_password(password)\n user.save(using=self._db)\n return user", "def create(self, validated_data):\n\n # Here we actually create a new user.\n user = models.UserProfile(\n email = validated_data['email'],\n name = validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n\n # Here we save the object to the database.\n user.save()\n\n return user", "def make_user(username=None):\r\n user = User()\r\n\r\n if not username:\r\n username = random_string(10)\r\n\r\n user.username = username\r\n\r\n DBSession.add(user)\r\n DBSession.flush()\r\n return user", "def create_user(self, username=None, email=None, password=None):\n\t\treturn self._create_user(username, email, password)", "def create_user(self, username=\"foo\", email=\"foo@foo.com\", pwd=\"password\"):\n with app.app_context():\n user = User(username=username,\n email=email,\n pwd=bcrypt.generate_password_hash(pwd))\n db.session.add(user)\n db.session.commit()", "async def create_user(user_request: UserRequestModel):\n\n user = User.create(\n username=user_request.username,\n email=user_request.email\n )\n\n return user", "def _create_user(self, email, password,username, **extra_fields):\r\n if not email:\r\n raise ValueError('The given email must be set')\r\n if not username:\r\n raise ValueError('The given username must be set')\r\n email = self.normalize_email(email)\r\n user = self.model(email=email,username=str.strip(username), **extra_fields)\r\n user.set_password(password)\r\n user.save(using=self._db)", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(username, password):\n\n user = User(username=username, password=password)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def save(self):\n new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],\n password=self.cleaned_data['password1'],\n email=self.cleaned_data['email'],\n firstname=self.cleaned_data['first_name'],\n lastname=self.cleaned_data['last_name'],\n agree=self.cleaned_data['tos'])\n return new_user", "def _create_user(self, username, email, password, **extra_fields):\n if not username:\n raise ValueError('The given username must be set')\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user" ]
[ "0.8247268", "0.82456094", "0.8141204", "0.81273985", "0.80339557", "0.8026633", "0.80089855", "0.7969125", "0.7943305", "0.792896", "0.7926916", "0.79243636", "0.78791255", "0.7877891", "0.7829092", "0.7828892", "0.77510595", "0.77483684", "0.77474916", "0.7718734", "0.77142614", "0.7701713", "0.7690464", "0.7687068", "0.7686724", "0.7675724", "0.7672091", "0.7666917", "0.7658336", "0.7655974", "0.7653033", "0.76287854", "0.7624617", "0.7624099", "0.76199853", "0.7615702", "0.7611978", "0.7603581", "0.7602983", "0.76012516", "0.75900054", "0.7587608", "0.7587566", "0.75819683", "0.75787276", "0.75565314", "0.7549831", "0.75361556", "0.7534567", "0.75253487", "0.7520665", "0.75203824", "0.7517934", "0.7512978", "0.75129557", "0.7506569", "0.75064105", "0.74971336", "0.7496371", "0.7493428", "0.7490707", "0.7488371", "0.7487614", "0.74873996", "0.7485844", "0.7484657", "0.7480773", "0.7479791", "0.7479161", "0.7477435", "0.74762756", "0.747352", "0.7471522", "0.746885", "0.74676096", "0.7465693", "0.7465693", "0.7465693", "0.7454219", "0.7453042", "0.74516547", "0.7449682", "0.7446876", "0.74451077", "0.74448216", "0.74402356", "0.7436764", "0.7427027", "0.74263513", "0.7424754", "0.742328", "0.74230504", "0.74199694", "0.74188256", "0.74153507", "0.7414565", "0.74135864", "0.7409426", "0.74068856", "0.74001133" ]
0.8240706
2
Checks that the passwords are the same
def clean_password2(self): password1 = self.cleaned_data.get('password1', '') password2 = self.cleaned_data['password2'] if password1 != password2: raise forms.ValidationError('The passwords did not match.') return password2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_passwords_match(self, password1, password2):\n return password1 == password2", "def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)", "def check_pass(self):\n if self.validated_data['new_password'] != self.validated_data['confirm_password']:\n raise serializers.ValidationError({\"error\":\"Please enter matching passwords\"})\n return True", "def check_password(password1: str, password2: str) -> bool:\n if password1 == password2:\n return True\n else:\n raise ValueError('Пароли не совпадают')", "def PasswordMatch(self, *args):\n pass1 = self.password.get().lstrip().rstrip()\n pass2 = self.confirm_pass.get().lstrip().rstrip()\n \n if (pass1 and pass1 == pass2):\n self.pass_match_label['text'] = 'Passwords match'\n self.pass_match_label['fg'] = 'green'\n return True\n else:\n self.pass_match_label['text'] = 'Password don\\'t match'\n self.pass_match_label['fg'] = 'red'\n return False", "def test_if_pwd_equals_confirmed(self):\n msg = self.user.registration(\"Githeri\", \"githeri.man@yahoo.com\",\n \"iwantgitheri\",\n \"iwantsgitheri\")\n self.assertEqual(msg, \"Your passwords should match\")", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def clean_password(self):\n password1 = self.cleaned_data.get('password1')\n password2 = self.cleaned_data.get('password2')\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError('Passwords do not match')\n return password2", "def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contrasenas no coinciden.')\n\t\treturn repetir_password", "def check_password(self, password):\n return self.password == password", "def change_password_tests(new_password1, new_password2):\n error = None\n if not new_password1 == new_password2:\n error = \"New passwords do no match\"\n elif new_password1 in common_passwords:\n error = \"New password is frequently used. Please use another password.\"\n elif not is_complex(new_password1):\n error = \"New password not complex enough\"\n\n return error", "def clean_password2(self):\n if self.clean_data.get('password1', None) and self.clean_data.get('password2', None) and \\\n self.clean_data['password1'] == self.clean_data['password2']:\n return self.clean_data['password2']\n raise forms.ValidationError(u'You must type the same password each time')", "def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contraseñas no coinciden.')\n\t\treturn repetir_password", "def clean_password_repeat(self):\n if 'password' in self.cleaned_data and 'password_repeat' in self.cleaned_data:\n if self.cleaned_data['password'] != self.cleaned_data['password_repeat']:\n raise forms.ValidationError('The password fields didn\\'t match: Password confirmation failed.')\n return self.cleaned_data['password_repeat']", "def clean_password_repeat(self):\n password = self.cleaned_data.get(\"password\")\n password_repeat = self.cleaned_data.get(\"password_repeat\")\n if password != password_repeat:\n raise ValidationError(_(\"Passwords don't match\"))\n return self.cleaned_data.get(\"password_repeat\")", "def password_is_valid_task_2(row):\n # XOR the two positions in the password\n return (row['letter'] == row['password'][row['policy'][0] - 1]) != \\\n (row['letter'] == row['password'][row['policy'][1] - 1])", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def clean(self):\r\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\r\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\r\n raise forms.ValidationError(_(u'You must type the same password each time'))\r\n return self.cleaned_data", "def test_user_logged_in_new_passwords_match_but_too_short(self):\n form_data = {\n \"old_password\": self.password,\n \"new_password\": \"pw\",\n \"new_password2\": \"pw\"\n }\n login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(login)\n post_response = self.client.post(self.change_password_url, form_data)\n form = post_response.context.get('form')\n self.assertEqual(post_response.status_code, 200)\n self.assertContains(post_response, \"Change Your Password\")\n self.assertIsInstance(form, ChangePasswordForm)\n self.client.logout()\n re_login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(re_login)", "def match(self,pwdmarked,password):\n pwd1 = self.cleanPassword(pwdmarked)\n pwd2 = self.cleanPassword(password)\n if not (pwdmarked or '').startswith('plain:{'):\n pwd2 = crypt(password,self.settings.authenSalt,10000)\n return pwd1==pwd2", "def clean_password1(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data['password1']", "def validate_password_repeat(form, field) -> None:\n if field.data != form.password.data:\n raise ValidationError('The password needs to match the new password')", "def passwords_match_2(cls, v: Any, values: Any, **kwargs: Any) -> Any:\n if not values.get(\"password_new_1\") or v != values[\"password_new_1\"]:\n raise ValueError(\"passwords do not match\")\n return v", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(u'You must type the same password each time')\n return self.cleaned_data", "def test_password_match(self):\r\n\r\n tst = User()\r\n tst._password = self.test_hash\r\n\r\n self.assertTrue(\r\n tst._password == self.test_hash, \"Setting should have hash\")\r\n self.assertTrue(\r\n tst.password == self.test_hash, \"Getting should have hash\")\r\n self.assertTrue(\r\n tst.validate_password(self.test_password),\r\n \"The password should pass against the given hash: \" + tst.password)", "def clean_paswword(self):\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Los password no coinciden.')\n return password", "def verify_match(password, verify):\n return password == verify", "def correct_password(username, password, db):\n\tquery = db((db.User.username == username) & (db.User.password == password))\n\treturn query.count() > 0", "def clean(self):\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data", "def clean(self):\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data", "def clean(self):\n if 'password' in self.cleaned_data and 'password_repeat' in self.cleaned_data:\n if self.cleaned_data['password'] != self.cleaned_data['password_repeat']:\n raise forms.ValidationError('The two password fields didn\\'t match.')\n return self.cleaned_data", "def test_check_password(self):\n user = User.query.filter_by(username='eschoppik').first()\n self.assertTrue(bcrypt.check_password_hash(user.password, 'secret'))\n self.assertFalse(bcrypt.check_password_hash(user.password, 'notsecret'))", "def password_validation(pass1,pass2):\n errors = []\n if(pass1 != pass2):\n errors.append(\"Lösenorden matchade inte.\")\n if(len(pass1) < 3):\n errors.append(\"Lösenordet måste vara längre än 3 bokstöver.\")\n \n return errors", "def clean_password2(self):\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Las Claves no coinciden.')\n return password2", "def check_password(self, password):\n\n\t\twith self.lock:\n\t\t\tassert ltrace(TRACE_USERS, 'comparing 2 crypted passwords:\\n%s\\n%s' % (\n\t\t\t\tself.__userPassword,\n\t\t\t\tself.backend.compute_password(password, self.__userPassword)))\n\n\t\t\treturn self.__userPassword == self.backend.compute_password(\n\t\t\t\t\t\t\t\t\t\t\t\tpassword, self.__userPassword)", "def validate_password_repeat(form: CompleteSignupForm, field: PasswordField) -> None:\n if field.data != form.password.data:\n raise ValidationError('The password needs to match the new password')", "def verify_password(self, username, password):\n\n try:\n self.c.execute('SELECT password FROM profiles WHERE name=(?)', (username,))\n\n db_pw = self.c.fetchone()[0]\n print(password)\n\n return db_pw == password\n\n except TypeError:\n return False", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields are different. Please enter the same password \"\n \"in both fields.\"))\n return self.cleaned_data", "def test_reset_password_fails_for_similar_passwords(self):\n self.test_client.post(\n \"/api/v1/auth/register\", data=self.user_data)\n\n resp = self.test_client.post(\n \"/api/v1/auth/login\",\n data=self.user_data)\n data = json.loads(resp.data)\n\n # reset-password should pass provided the new password\n # is not similar to the old saved password\n token = data['token']\n resp = self.test_client.post(\n \"/api/v1/auth/reset-password\",\n headers=dict(Authorization=f\"Bearer {token}\"),\n data={'password': '!0ctoPus', 'confirm password': '!0ctoPus'}\n )\n\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(data[\"status\"], \"failure\")\n self.assertEqual(\n data[\"message\"],\n \"Your new password should not be similar to your old password\")", "def validatePwd(self,pwd2):\n\n if self.pwd==pwd2:\n self.__encryptPwd = bcrypt.hashpw(self.pwd.encode('utf-8'),\n bcrypt.gensalt())\n return True\n else:\n return False", "def clean_password_again(self):\n if 'password' in self.cleaned_data:\n password = self.cleaned_data['password']\n password_again = self.cleaned_data['password_again']\n\n if password == password_again:\n return password\n else:\n return None\n\n raise forms.ValidationError('Passwords do not match.')", "def pass_check(user_found):\n password = ''\n while password != user_found[1]:\n password = stdiomask.getpass(prompt=\"Please enter your password: \", mask='*')\n pass1 = encrypter.encrypt_password(password)\n if user_found[1] == pass1:\n return \"\\nPassword match\\n\"\n else:\n print(\"\\nPassword do not match\\n\")", "def clean_password2(self):\n password1 = self.cleaned_data.get('password1', '')\n password2 = self.cleaned_data['password2']\n if not password1 == password2:\n raise forms.ValidationError(\"The two passwords didn't match.\")\n return password2", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] == self.cleaned_data['password2']:\n return self.cleaned_data['password2']\n raise forms.ValidationError(_(u'You must type the same password each time'))", "def test_uniqueness(self):\n passwords = tuple(generate_password(8) for i in range(100))\n self.assertEqual(len(passwords), len(set(passwords)))", "def __is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n return hmac.compare_digest(\n pw_hash, hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n )", "def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n return password2", "def check_password(self, username, password): # tested\r\n conn = self.get_db()\r\n with conn:\r\n c = conn.cursor()\r\n sql = ('select password from gameuser where username=%s')\r\n c.execute(sql,(username,))\r\n hashedpass = md5.new(password).hexdigest()\r\n u = c.fetchone()\r\n if u == None:\r\n raise NoUserExistsException(username)\r\n # print 'database contains {}, entered password was {}'.format(u[0],hashedpass)\r\n return u[0] == hashedpass", "def test_pbkdf2_sha256_password_reuse(self):\r\n user = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # students need to user at least one different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"different\"))\r\n self._change_password(user, \"different\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n\r\n # staff needs to use at least two different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self._change_password(staff, \"different\")\r\n\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"third\"))\r\n self._change_password(staff, \"third\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))", "def passwords_match_1(cls, v: Any, values: Any, **kwargs: Any) -> Any:\n if not values.get(\"password_old\"):\n raise ValueError(\"old password is required\")\n return v", "def test_user_logged_in_new_passwords_match_but_no_number(self):\n form_data = {\n \"old_password\": self.password,\n \"new_password\": \"testpassword\",\n \"new_password2\": \"testpassword\"\n }\n login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(login)\n post_response = self.client.post(self.change_password_url, form_data)\n form = post_response.context.get('form')\n self.assertEqual(post_response.status_code, 200)\n self.assertContains(post_response, \"Change Your Password\")\n self.assertIsInstance(form, ChangePasswordForm)\n self.client.logout()\n re_login = self.client.login(username=self.username, password=self.password)\n self.assertTrue(re_login)", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] and self.cleaned_data['password2']:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError('The password fields didn\\'t match: Password confirmation failed.')\n return self.cleaned_data['password2']", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test06_password_mixture(self):\n self.set_complexity(length=14, numeric=1, upper=1, lower=1, special=1)\n\n invalid = (\n \"A\",\n \"ACBDEabcde!!!!\",\n \"Tr0ub4dor&3\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n \"1234;.,/]1234\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"Sixteenchars12@_\",\n \"thisis4reallybadPassword!\",\n \"C0rrecthorsebatteryst@ple\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'Password1234\\'\"\"\"\"\"',\n )\n self.set_passwords(valid)", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] and self.cleaned_data['password2']:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError('The two password fields didn\\'t match.')\n return self.cleaned_data", "def old_password_check(form, field):\n old_password = field.data\n password = current_user.password\n r = pwd_context.verify(old_password, current_user.password)\n if not r:\n raise validators.ValidationError('old password is wrong')", "def test_accounts_password_reuse(self):\r\n user = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # students need to user at least one different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"different\"))\r\n self._change_password(user, \"different\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n\r\n # staff needs to use at least two different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self._change_password(staff, \"different\")\r\n\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"third\"))\r\n self._change_password(staff, \"third\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))", "def test_invalid_password(self):\n pass", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data", "def clean(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data", "def verify_password(self, password):\n return self.PASSWORD == password", "def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )", "def check_entry_password(username, password, entry_password):\n if entry_password.startswith('$apr1$'):\n salt = entry_password[6:].split('$')[0][:8]\n expected = apache_md5crypt(password, salt)\n elif entry_password.startswith('{SHA}'):\n import sha\n expected = '{SHA}' + sha.new(password).digest().encode('base64').strip()\n else:\n import crypt\n expected = crypt.crypt(password, entry_password)\n return entry_password == expected", "def verify_password(self, password):\n return self.PASS == password", "def clean_password2(self):\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(_(\"The two password fields didn't match.\"))\n return self.cleaned_data['password2']", "async def check_password(self, login, password):", "def test_password_and_confirm_not_match(self):\n user = {\n 'email': 'not_a_user@example.com',\n 'password': 'nope',\n 'confirm_password': 'nope2'\n }\n # register user with the bad password match\n result = self.client().post(AuthTestCase.registration, data=user)\n # returns a bad request with appropriate message\n self.assertEqual(result.status_code, 400)\n self.assertIn(\"password and confirm_password have to match\", str(result.data))", "def verify_password(stored_password, provided_password):\n #print(provided_password)\n salt = stored_password[:64]\n stored_password = stored_password[64:]\n pwdhash = hashlib.pbkdf2_hmac('sha512', \n provided_password.encode('utf-8'), \n salt.encode('ascii'), \n 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n #print(pwdhash)\n return pwdhash == stored_password", "def test_old_password_login_check(self):\n old_password = self.user['password1']\n self.change_password()\n response = self.client.post(reverse('users:login'), {'username': self.user['username'], 'password': old_password})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', None, ERROR_MSG)", "def clean(self):\n cleaned_data = super(RegistroForm, self).clean()\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(\"Passwords don't match. Please enter both fields again.\")\n return self.cleaned_data", "def check_password(self, author, password):\n return author.hashed_password == generate_hashed_passwd(password, author.hashed_password)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n pw_hash = get_pw_hash(cur, session['username'])\n if not check_password(self.old_password.data, pw_hash):\n self.old_password.errors.append('Did not find a match.')\n return False\n\n return True", "def valid_pwd(name, password, h):\n salt = h.split(',')[0]\n return h == make_pwd_hash(name, password, salt)", "def test_equality(cleartextpw, cryptedpw=\"\"):\n if not cryptedpw:\n return crypt.crypt(cleartextpw, '$6${}$'.format(salt(83)))\n else:\n if cryptedpw == 'x' or cryptedpw == '*':\n raise NotImplementedError(\n \"Sorry, currently no support for shadow passwords\")\n\n return crypt.crypt(cleartextpw, cryptedpw) == cryptedpw", "def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))", "def checkPassword(self, password):\n if password is None:\n return False\n if self.hashed:\n (salt, _) = self.password.split('|')\n return (self.password == utils.saltHash(password, salt=salt))\n else:\n return (self.password == password)", "def correct_password(name, password):\n if not User.created(name):\n return False\n user = User.get_user(name)\n return user.info['password'] == password", "def matches_password_verify(password, verify):\n if password and not password == verify:\n return \"Your passwords didn't match.\"\n else:\n return \"\"", "def password_is_correct(self, password):\n return Bcrypt().check_password_hash(self.password, password)", "def check_password(self, password: str) -> bool:\n\n hash_of_given_password = hashlib.new('md5', bytes(password, encoding='utf8'))\n return hash_of_given_password.hexdigest() == self.hashed_password", "def clean(self):\n cleaned_data = super(RegisterForm, self).clean()\n password = cleaned_data.get(\"password\")\n confirm_password = cleaned_data.get(\"confirm_password\")\n if password != confirm_password:\n raise forms.ValidationError(\n \"The passwords entered do not match.\"\n )", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def clean(self):\n cleaned_data = super(MyRegistrationForm, self).clean()\n if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n raise forms.ValidationError(\"Passwords don't match. Please enter both fields again.\")\n return self.cleaned_data", "def test_valid_password(self):\n newpass = 'Just Complex Enough'\n m = hashlib.sha512()\n m.update(newpass.encode('utf-8'))\n m.update(self.request.user.salt)\n hashed = m.digest()\n self.request.json_body = deepcopy(self.good_dict)\n self.assertNotEqual(self.request.user.password, hashed)\n self.request.json_body['password'] = newpass\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, dict_from_row(self.request.user, remove_fields=removals))\n self.assertEqual(self.request.user.password, hashed)", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def is_correct_user(self, login, password):\n pass", "def clean_password_new_again(self):\n if 'password_new' in self.cleaned_data:\n password_new = self.cleaned_data['password_new']\n password_new_again = self.cleaned_data['password_new_again']\n\n if password_new == password_new_again:\n return password_new\n else:\n return None\n\n raise forms.ValidationError('Passwords do not match.')", "def is_correct_password(salt: bytes, pw_hash: bytes, password: str) -> bool:\n return hmac.compare_digest(\n pw_hash, hashlib.pbkdf2_hmac(\"sha256\", password.encode(), salt, 100000)\n )", "def validate_password(self, password):\n return self._password == encrypt_password(password,\n b64decode(str(self._salt)))", "def _check_password(self, body):\n if not self.config.security_initialize:\n raise RuntimeError(\"First set a password\")\n\n password = hash_password(body[ATTR_PASSWORD])\n if password != self.config.security_password:\n raise RuntimeError(\"Wrong password\")", "def clean_new_password2(self):\n password1 = self.cleaned_data.get('new_password1')\n password2 = self.cleaned_data.get('new_password2')\n if password1 and password2:\n validate_password(password1, self.instance)\n if password1 != password2:\n self.add_error('new_password2',\n _(\"The two password fields didn't match.\"))\n else:\n self.change_password = True\n return password2", "def verify_password(stored_passwd, provided_passwd):\n salt = stored_passwd[:64]\n stored_password = stored_passwd[64:]\n pwdhash = hashlib.pbkdf2_hmac(\n 'sha512', provided_passwd.encode('utf-8'), salt.encode('ascii'), 100000\n )\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n return pwdhash == stored_password", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def test_41_password_change(self):\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': password,\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n assert \"Yay, you changed your password succesfully!\" in res.data, res.data\r\n\r\n password = \"mehpassword\"\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': \"wrongpassword\",\r\n 'new_password': \"p4ssw0rd\",\r\n 'confirm': \"p4ssw0rd\",\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Your current password doesn't match the one in our records\"\r\n assert msg in res.data\r\n\r\n self.register(password=password)\r\n res = self.app.post('/account/johndoe/update',\r\n data={'current_password': '',\r\n 'new_password':'',\r\n 'confirm': '',\r\n 'btn': 'Password'},\r\n follow_redirects=True)\r\n msg = \"Please correct the errors\"\r\n assert msg in res.data", "def passwd_check(request, passphrase):\n import hashlib\n hashed_passphrase = request.registry.settings.get('phoenix.password', u'')\n \n try:\n algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)\n except (ValueError, TypeError):\n return False\n\n try:\n h = hashlib.new(algorithm)\n except ValueError:\n return False\n\n if len(pw_digest) == 0:\n return False\n\n try:\n h.update(passphrase.encode('utf-8') + salt.encode('ascii'))\n except:\n return False\n\n return h.hexdigest() == pw_digest", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def check_password(self, password):\n check = False\n if self.password is not None:\n logger.critical('Old style password exists.')\n if check_password_hash(self.password, password):\n self.passwd = password\n self.password = None\n db.session.add(self)\n db.session.commit()\n logger.critical('Old style password replaced.')\n else:\n return check\n try:\n check = bcrypt.check_password_hash(self._password.encode('utf8'), password.encode('utf8'))\n except:\n logger.critical('Error in password check.')\n finally:\n return check", "def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))" ]
[ "0.77910745", "0.7613175", "0.7546211", "0.7338358", "0.7300746", "0.7270298", "0.7236492", "0.7210182", "0.7177891", "0.716393", "0.71617705", "0.715105", "0.7121701", "0.7119943", "0.70778507", "0.70771366", "0.7053473", "0.704067", "0.70307827", "0.70246506", "0.7013039", "0.7005679", "0.6986126", "0.6985254", "0.6982743", "0.6977606", "0.697296", "0.6959909", "0.6945861", "0.6945861", "0.694555", "0.6942421", "0.6935249", "0.69277793", "0.6919702", "0.69102407", "0.69059867", "0.6902924", "0.69002694", "0.6894756", "0.6890408", "0.68853015", "0.68849874", "0.687659", "0.68739295", "0.6858424", "0.6857962", "0.68505114", "0.684667", "0.68370324", "0.68130904", "0.6799669", "0.6763958", "0.6760015", "0.6758928", "0.67426425", "0.673988", "0.6735125", "0.6735007", "0.6735007", "0.6728473", "0.67241323", "0.6717947", "0.6706123", "0.67027634", "0.6698876", "0.6687866", "0.6680093", "0.66782576", "0.66770697", "0.6670912", "0.6665151", "0.66635", "0.6661842", "0.66340345", "0.6632052", "0.6631455", "0.662822", "0.6614664", "0.6608855", "0.6602591", "0.66006523", "0.65950763", "0.6590053", "0.6582553", "0.65778524", "0.65739775", "0.65612376", "0.6552006", "0.65473217", "0.65408677", "0.6533973", "0.6532722", "0.6532722", "0.6530178", "0.6528909", "0.6522996", "0.6518685", "0.6512728" ]
0.6925312
35
Changes the password for the given user
def change_password(self, user): if not self.is_valid(): return None password = self.clean_password2() user.set_password(password) user.save() return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_password(self, user, password):\n user.password = hashers.make_password(password)", "def change_user_password(self, user, new_pass):\n return self.update(user, password=new_pass)", "def change_user_password(self, instance, user, new_pass):\n return instance.change_user_password(user, new_pass)", "def _change_password(self, user, password):\r\n user.set_password(password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()", "def doChangeUser(self, login, password, **kwargs):\n IUserChanger(self.context).setPassword(password)", "def view_update_user(self, user, new_pw, old_pw):\r\n user.realm._checker.passwd(user.userID, new_pw, old_pw)", "def changeUserPassword(self, name, password):\n self._client.changeUserPassword(name, password)", "def change_password(self, new_pass):\n self.manager.change_user_password(self, new_pass)", "def change_user_password(self, user, new_pass):\n return self._user_manager.change_user_password(user, new_pass)", "def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1", "def change_password(self, username=None, user_data=None):\n if not username:\n raise ValueError(\"Please provide a username.\")\n\n if not user_data:\n raise ValueError(\"Please provide correct user information.\")\n\n user_data = self._to_string(data=user_data)\n uri = 'json/users/' + username + '?_action=changePassword'\n data = self._post(uri=uri, data=user_data, headers=self.headers)\n if data.status_code == 200:\n return True\n else:\n return False", "def set_password(user_id):\n user = _get_user_or_404(user_id)\n\n form = SetPasswordForm(request.form)\n if not form.validate():\n return set_password_form(user.id, form)\n\n new_password = form.password.data\n initiator_id = g.user.id\n\n password_service.update_password_hash(user.id, new_password, initiator_id)\n\n flash_success(\n gettext(\n \"New password has been set for user '%(screen_name)s'.\",\n screen_name=user.screen_name,\n )\n )\n\n return redirect_to('.view', user_id=user.id)", "def change_user():\n _ = db.change_password(auth.username(), generate_password_hash(request.json['password']))\n return str(_)", "def change_password(self, user, current_password, password):\n\n if not password:\n raise DoorstepError('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise DoorstepError('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def view_update_user(self, user, username, password):\r\n user.realm._checker.passwd(username, password, True)", "def set_password(self, user, password):\n hashed_password = self.hash_password(password)\n server_name = self.get_server_name()\n hookenv.log(\"Storing hash: {}\".format(hashed_password), hookenv.DEBUG)\n result = self.pgsql_query(\n \"UPDATE users SET password_hash = '{}' WHERE name = '@{}:{}';\".format(\n hashed_password, user, server_name\n )\n )\n return result", "def update_password(self, username, password):\n self.update(('Password', password), username)", "def updatePassword(con, options, dbName, userName, userInfo):\n if checkUsername(userName):\n trace(\"For dbName='%s', alter user '%s' password\" % (dbName, userName))\n userPassword = userInfo[\"password\"]\n optionalDbExecute(con, options, \"alter user %s with password '%s'\" % (userName, userPassword))", "def change_password(change_account):\n change_data(change_account, changed_data='password')", "def setpassword(self, pwd):\n pass", "def change_password(self, user, current_password, password):\n\n if not password:\n raise Exception('New password can\\'t be blank.')\n\n # Changing user's password if old password verifies\n user = self.get(id=user.id)\n\n if not user.check_password(current_password):\n raise Exception('Your current password is wrong.')\n\n user.set_password(password)\n user.save()", "def change_user(self, username, password):\n self.creds['username'] = username\n self.creds['password'] = password", "def change_db_user_password(self, username, password):\n\n self.sql(\"ALTER USER %s WITH PASSWORD '%s'\" % (username, password))", "def update_user_password(context, params):\n\n user = User.objects.filter(id=params.get('id')).first()\n if not user:\n raise ValueError(\"user not found\")\n # user.edited_by = context.user\n if params.get('password'):\n user.set_password(params.get('password'))\n\n user.save()\n return user", "def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))", "def setUserPassword(self,value):\n self.PDFreactorConfiguration.in1[\"userPassword\"] = value", "def update_user_password(self, user_id, password, original_password):\n update_user = {\n 'password': password,\n 'original_password': original_password\n }\n update_user = json.dumps({'user': update_user})\n resp, _ = self.post('users/%s/password' % user_id, update_user)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp)", "def change_password(self, password, newpassword):\n cred = {\"newpasswd\": newpassword, \"passwd\": password}\n return self.put(\"passwd\", cred)", "def update_user_password(self, username):\n parser_password.add_argument('password',\n type=validate_password, required=True,\n nullable=False,\n help=\"Password must be at least 6 characters\"\n )\n args = parser_password.parse_args()\n password = self.set_password(request.json.get('password'))\n\n query = \"\"\"UPDATE users SET password=%s WHERE username=%s\"\"\"\n values = password, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True", "def userPassword(self, password=None):\n\n\t\tdisplay = False\n\n\t\tif password is None:\n\t\t\tdisplay = True\n\t\t\tpassword = hlstr.generate_password(\n\t\t\t\t\t\t\t\tLMC.configuration.users.min_passwd_size)\n\t\telif password == '':\n\t\t\tlogging.warning(_(u'Setting an empty password for user {0}. '\n\t\t\t\t'This is dangerous and totally insecure!').format(\n\t\t\t\t\tstylize(ST_LOGIN, self.__login)))\n\n\t\twith self.lock:\n\t\t\tif self.__already_created:\n\t\t\t\tLicornEvent('user_pre_change_password', user=self.proxy, password=password).emit(synchronous=True)\n\n\t\t\tprefix = '!' if self.__locked else ''\n\n\t\t\tif password == '':\n\t\t\t\tself.__userPassword = prefix\n\t\t\telse:\n\t\t\t\tself.__userPassword = '%s%s' % (prefix,\n\t\t\t\t\t\t\t\t\tself.backend.compute_password(password))\n\n\t\t\t# 3600*24 get us to the number of days since epoch.\n\t\t\tself.__shadowLastChange = int(time.time() / 86400)\n\n\t\t\tif self.__already_created:\n\t\t\t\tself.serialize()\n\t\t\t\tLicornEvent('user_post_change_password', user=self.proxy, password=password).emit(synchronous=True)\n\n\t\t\t\tif self.__already_created:\n\t\t\t\t\t# don't forward this event on user creation, because we\n\t\t\t\t\t# already have the \"user_added\" for this case.\n\t\t\t\t\tLicornEvent('user_userPassword_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tif display:\n\t\t\t\tlogging.notice(_(u'Set password for user {0} to {1}.').format(\n\t\t\t\t\tstylize(ST_NAME, self.__login),\n\t\t\t\t\tstylize(ST_IMPORTANT, password)),\n\t\t\t\t\t# don't display the clear-text password in the daemon's log.\n\t\t\t\t\tto_local=False)\n\t\t\telse:\n\t\t\t\tif self.__already_created:\n\t\t\t\t\tlogging.notice(_(u'Changed password for user {0}.').format(\n\t\t\t\t\t\t\t\t\t\t\tstylize(ST_NAME, self.__login)))", "def update_password(self, username, password): #WORKS\n password_hash = generate_password_hash(password)\n try:\n self.cur.execute(\"UPDATE users SET password = \\\"{}\\\" WHERE username = \\\"{}\\\"\".format(password_hash, username))\n self.db.commit()\n except:\n self.db.rollback()", "def set_password(self, password):\n self.cloudserver.change_password(password)", "def set_password(username, new_password):\n if not validate_password(new_password):\n return \"salasana on väärää muotoa\"\n new_password_hash = generate_password_hash(new_password)\n sql = \"UPDATE users \" \\\n \"SET password=:new_pw \" \\\n \"WHERE username=:username\"\n db.session.execute(sql, {\"new_pw\": new_password_hash, \"username\": username})\n db.session.commit()\n return \"ok\"", "def reset_user_password_service(user: User, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user.password = hashed_password\n db.session.commit()", "def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))", "def change_password(self, user, old_password, new_password):\n\n if not user.check_password(old_password):\n raise InvalidPassword('The provided old password is incorrect.')\n\n user.set_password(new_password)\n user.save()\n\n return user", "def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)", "def set_password(self, request, pk=None):\n user = User.objects.get(id=pk)\n serializer = PasswordSerializer(data=request.data)\n\n if serializer.is_valid():\n if not user.check_password(serializer.data.get('old_password')):\n return Response({'old_password': ['Wrong password.']},\n status=status.HTTP_400_BAD_REQUEST)\n # set_password also hashes the password that the user will get\n user.set_password(serializer.data.get('new_password'))\n user.save()\n return Response({'status': 'password set'}, status=status.HTTP_200_OK)\n\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)", "def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password", "def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")", "def set_password(self, password):\n self.password = password", "def set_password(self, password):\n self.password_hash = generate_password_hash(f\"{password}{self.user_salt}\")", "def set_password(self, service, username, password):\n segments = range(0, len(password), self._max_password_size)\n password_parts = [password[i : i + self._max_password_size] for i in segments]\n for i, password_part in enumerate(password_parts):\n curr_username = username\n if i > 0:\n curr_username += '{{part_%d}}' % i\n self._keyring.set_password(service, curr_username, password_part)", "async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}", "def sipserver_user_update(self, user: str, password: str) -> None:\n self.update_endpoint_in_sipserver(endpoint=user, password=password)", "def test_set_user_password(self):\n pass", "def set_new_password(self, new_password):\n self.password = new_password", "def change_user_password():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n user = get_user_by_id(user_id)\n if request.method == 'POST':\n old_password = request.form['old-password']\n new_password = request.form['new-password']\n confirm_password = request.form['confirm-password']\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(user_id)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if check_authentication(session_id, user_id):\n is_password_updated = update_user_password(user_id, old_password, new_password, confirm_password)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)\n if is_password_updated == \"OK\":\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=\"Password successfully updated!\", today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=False,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=is_password_updated, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)", "def updateWebAppUserPwd( self, username, password ):\n try:\n crypt_pass = crypt(password, username)\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n con.cursor().callproc('update_web_app_user_password', [username, crypt_pass])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def set_password(self, password):\n self.password = self.hash_password(password)", "def _update_password(self, email, new_password):\r\n user = User.objects.get(email=email)\r\n user.set_password(new_password)\r\n user.save()\r\n history = PasswordHistory()\r\n history.create(user)", "def set_password(ctx, new_password, remember):\n ensure_validated(ctx, prompt='Enter your current password')\n if not new_password:\n new_password = click.prompt(\n 'Enter your new password',\n hide_input=True,\n confirmation_prompt=True,\n err=True)\n\n controller = ctx.obj['controller']\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n key = controller.set_password(new_password)\n click.echo('Password updated.')\n if remember:\n keys[controller.id] = b2a_hex(key).decode()\n settings.write()\n click.echo('Password remembered')\n elif controller.id in keys:\n del keys[controller.id]\n settings.write()", "def set_password(self, value):\n # Salt need to be generated before set password\n m = hashlib.sha256()\n m.update('-'.join([\n str(datetime.now()),\n config.get('security.password_salt')\n ]))\n self.salt = m.hexdigest()\n self.password_pending = False\n self.password = self.__encrypt(value)", "def passwd(self, uname, pw):\n username = self.prefixed(uname)\n if not pw:\n logger.info(\n f\"sftp_user {uname} disabled.\"\n )\n return\n\n logger.debug(\n \"Changing password for local user={}\".format(username)\n )\n proc = Popen(\n ['/usr/bin/passwd', username],\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE\n\n )\n proc.stdin.write(\n bytes(pw + \"\\n\", encoding='utf-8')\n )\n proc.stdin.write(\n bytes(pw, encoding='utf-8')\n )\n proc.stdin.flush()\n stdout, stderr = proc.communicate()\n\n logger.debug(f\"stdout={stdout} stderr={stderr}\")", "def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)", "def _set_user_password(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..40']}), is_leaf=True, yang_name=\"user-password\", rest_name=\"password\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Password of the user', u'alt-name': u'password'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='user-passwd', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"user_password must be of a type compatible with user-passwd\"\"\",\n 'defined-type': \"brocade-aaa:user-passwd\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..40']}), is_leaf=True, yang_name=\"user-password\", rest_name=\"password\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Password of the user', u'alt-name': u'password'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='user-passwd', is_config=True)\"\"\",\n })\n\n self.__user_password = t\n if hasattr(self, '_set'):\n self._set()", "def set_password(self, password):\n self.authentication.password = password", "def _set_password(self, cr, uid, id, password, context=None):\n encrypted = self._crypt_context(\n cr, uid, id, context=context).encrypt(password)\n print(password)\n print(encrypted)\n self._set_encrypted_password(cr, uid, id, encrypted, context=context)\n self._set_password_again(cr, uid, id, password, context=context)", "def set_password(self, password):\n self.password = generate_password_hash(password)", "def change_password(username, current_password, new_password):\n\n if current_password == \"\": # nosec (not a hardcoded password)\n current_password = getpass.getpass()\n\n is_password_ok = authenticate_user(username, current_password)\n if not is_password_ok:\n return False\n\n if new_password == \"\": # nosec (not a hardcoded password)\n new_password = getpass.getpass()\n\n global db\n if db is None:\n init_db()\n user_model = Query()\n user = db.search(user_model.username == username)[0]\n\n salt = user['salt']\n password = hash_password(new_password, salt)\n api_key = gen_api_key(username)\n\n user_id = db.update({'password': password, 'api_key': api_key}, doc_ids=[user.doc_id])\n\n return {\n 'result': 'success',\n 'eid': user_id,\n 'user_created': user,\n 'api_key': api_key\n }", "def set_password(self, password):\n self.PASS = password", "def update_password(self, user_id, password):\n user = self.get(user_id, raise_error=True)\n if user is None:\n raise KeyError\n salt_hashedpassword = ''.join(self.get_salt_hashedpassword(password))\n user.password = salt_hashedpassword\n self.session.add(user)", "def set_password(self, password):\n self.PASSWORD = password", "def _set_password(self, password):\n self._password = generate_password_hash(password)", "def update_password(self, pwd):\n self.password = bcrypt.generate_password_hash(pwd).decode('utf8')", "def change_password(self, old_password, new_password):\n data = dict(password = new_password)\n data['old-password'] = old_password\n return self.app.post('/_changePassword', data = data, follow_redirects = True)", "def change_password():\n\n from .forms import ChangeCredentialsForm\n\n username = current_user.get_id()\n form = ChangeCredentialsForm(request.form)\n\n if form.validate_on_submit():\n logger.info(username + \" wants to change something.\")\n if request.form['username'] != username:\n logger.info(\"User \" + username + \" wants to change the username.\")\n app.rename_user(username, request.form['username'],\n request.form['newPassword1'])\n else:\n logger.info(\"Changing password of user \" + username + \".\")\n app.add_user_and_password(request.form['username'],\n request.form['newPassword1'])\n\n logger.info(\"Successfully changed credentials of \"\n + username + '.')\n return redirect(url_for('home'))\n\n else:\n return render_template('change-credentials.html',\n form=form,\n username=username)", "def test_update_password(self):\n\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = UsersAndGroups()\n auag.add_user(\n User(name=\"userx\", mail=\"userx@email.addr\", display_name=\"User X\", password=\"password1\")\n )\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n sync.update_user_password(\n userid=\"userx\", currentpassword=TS_PASSWORD, password=\"password2\"\n )", "def set_password(self, password):\n self.password_hash = generate_password_hash(str(password))", "def testEditPassword(self):\n self._login_user('eschoppik','secret')\n response = self.client.post('/users/1/edit_password?_method=PATCH',\n data=dict(new_password='newpass', confirm_password='newpass',\n old_password='secret'), follow_redirects=True)\n user = User.query.filter_by(username='eschoppik').first()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bcrypt.check_password_hash(user.password, 'newpass'),True)", "def set_password(self, password):\n self.password_hash = generate_password_hash(password)", "def set_password(self, password):\n self.password_hash = generate_password_hash(password)", "def set_password(self, raw_password: str):\n self.new_password = raw_password", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def set_password(self, value):\n hashed = bcrypt.encode(value)\n self._password = unicode(hashed)", "def password(self, password):\n self.password_hash = generate_password_hash(password)\n self.password_set = True", "def set_admin_password(self, instance, new_pass):\n pass", "def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)", "def set_password(self, user, password, create_user=True):\n\n if not self.check_prereqs():\n return False\n\n hash = self.hash_method.generate_hash(user,password)\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_update_password_query,{'username_field':self.sql_username_field,'password_field':self.sql_password_field,'username':user,'password':hash})\n self.log.debug(\"sqlflexibleauthstore: set_password: %s\" % (query,))\n cursor.execute(query)\n\n if cursor.rowcount > 0:\n self.log.debug('sqlflexibleauthstore: set_password: an existing user was updated')\n db.commit()\n if create_user:\n '''only return False when a user was updated, and create_user is true, because a user was not created'''\n return False\n else:\n '''the user was succesfully updated and no user should be created'''\n return True\n elif not create_user:\n self.log.debug('sqlflexibleauthstore: set_password: user doesnt exist, and none should be created')\n '''no existing user was updated, an none should be created either'''\n return False\n query=self.create_query(self.sql_create_user_query,{'username_field':self.sql_username_field,'password_field':self.sql_password_field,'username':user,'password':hash})\n self.log.debug(\"sqlflexibleauthstore: set_password: %s\" % (query,))\n cursor.execute(query)\n\n db.commit()\n return True", "def set_user_passwd(self, sUserName, sUserPasswd, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_SetUserPasswd(self.handle, sUserName, sUserPasswd, nFlags)[0])", "def set_password(self, password):\n from kalon.auth import encrypt_password\n self.document.password = encrypt_password(password)", "def update_user():", "def ChangePassword(self):\n \n username = self.username.get().lstrip().rstrip()\n if not username:\n messagebox.showerror('Error', 'No username entered.')\n return False\n \n if not self.PasswordMatch():\n messagebox.showerror('Error', 'Password fields do not match.')\n return False\n password = self.password.get().lstrip().rstrip()\n \n for user in self.user_db:\n if user['User'] == username:\n if user['Password'] == password:\n messagebox.showerror('Error',\n 'New password unchanged from the ' \\\n 'old password.')\n return False\n user['Password'] = password\n messagebox.showinfo('Success!', 'Password updated!')\n return True\n \n messagebox.showerror('Error', f'{username} not found in database.')\n return False", "def set_password(self, password):\n\n self.password = bcrypt.generate_password_hash(password)", "def set_password(self, password):\n self.password = md5crypt(password, gen_salt())", "def set_password(self, password):\n self.password = generate_password_hash(password, method='pbkdf2:sha256')", "def change(username, password):\n\tco = None\n\ttry:\n\t\tlo, pos = univention.admin.uldap.getAdminConnection()\n\texcept:\n\t\tlo, pos = univention.admin.uldap.getMachineConnection()\n\n\tmodule=univention.admin.modules.get('users/user')\n\n\tunivention.admin.modules.init(lo,pos,module)\n\n\tif username.find('@') > 0: #krb5Principal\n\t\tfilter='krb5PrincipalName=%s' % username\n\telse:\n\t\tfilter='uid=%s' % username\n\tobjects = module.lookup(co, lo, filter, superordinate=None, unique=1, required=1, timeout=-1, sizelimit=0)\n\n\t# search was unique and required\n\tobject = objects[0]\n\n\tobject.open()\n\tobject['password']=unicode(password)\n\tdn=object.modify()", "def setpassword(self, search_filter, password,\n force_change_at_next_login=False, username=None):\n self.transaction_start()\n try:\n res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,\n expression=search_filter, attrs=[])\n if len(res) == 0:\n raise Exception('Unable to find user \"%s\"' % (username or search_filter))\n if len(res) > 1:\n raise Exception('Matched %u multiple users with filter \"%s\"' % (len(res), search_filter))\n user_dn = res[0].dn\n if not isinstance(password, text_type):\n pw = password.decode('utf-8')\n else:\n pw = password\n pw = ('\"' + pw + '\"').encode('utf-16-le')\n setpw = \"\"\"\ndn: %s\nchangetype: modify\nreplace: unicodePwd\nunicodePwd:: %s\n\"\"\" % (user_dn, base64.b64encode(pw).decode('utf-8'))\n\n self.modify_ldif(setpw)\n\n if force_change_at_next_login:\n self.force_password_change_at_next_login(\n \"(distinguishedName=\" + str(user_dn) + \")\")\n\n # modify the userAccountControl to remove the disabled bit\n self.enable_account(search_filter)\n except:\n self.transaction_cancel()\n raise\n else:\n self.transaction_commit()", "def set_credentials(self, user, password):\n self.user = user\n self._set_opener(user, password)", "def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()", "def update_user_and_pw(cls, userid, user_email, user_password, user_phone):\n\n user_to_edit = User.query.filter_by(user_id=userid).one()\n\n user_to_edit.email = user_email\n user_to_edit.password = user_password\n user_to_edit.mobile_phone = user_phone\n\n db.session.commit()\n return user_to_edit", "def update_password(self, new_password=None):\n\n self.password = generate_password_hash(new_password)\n\n if self.save(verbose=False):\n self.logger.warn('Updated password! %s' % self)\n else:\n raise AttributeError('Password update failed!')", "def update_password(self, user, old_password, new_password):\n try:\n cur = self.conn.cursor()\n author_dict = self.get_author_by_name(user)\n author_id = author_dict['author_id']\n condition = self.password_check(user, old_password)\n\n if condition is not False:\n hashed_password = hash_string(new_password)\n query = ('UPDATE password '\n 'SET password = ? '\n 'WHERE password.author_id = ? ')\n cur.execute(query, (hashed_password, author_id))\n self.conn.commit()\n return True\n else:\n return condition\n\n except TypeError:\n return False", "def put(self, request):\n try:\n user = request.user\n if user.check_password(request.data[\"current_password\"]):\n user.set_password(request.data[\"password\"])\n user.save()\n return Response(status=status.HTTP_200_OK)\n else:\n return Response(\"Current password is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except KeyError:\n return Response(\"The data format is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def post(self):\n # userId is retrieved from jwt identity\n userId = get_jwt_identity()\n data = ChangePasswordInputSchema().load(request.json)\n UserLoginService.change_password(userId,\n existing_password=data[\"existingPassword\"],\n new_password=data[\"newPassword\"])\n return {}, 200", "def fusion_api_change_administrator_password(self, host, body, api=None, headers=None):\n return self.user.change_administrator_password(host, body, api, headers)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def change_my_password():\n form = ChangePassword()\n if request.method == 'GET':\n return render_template('changemypassword.html', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n old_password = form.password.data\n new_password_hash = generate_password_hash(form.password1.data)\n account = db.check_item(\"username\", username)\n if account is not None:\n if check_password_hash(str(account['password_hash']), old_password):\n db.update_password_username(username, new_password_hash)\n flash('Your password has been changed')\n return redirect(url_for('login'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n flash('Invalid username or password')\n return redirect(url_for('change_my_password'))\n else:\n return render_template('changemypassword.html', form=form)", "def put(self, request):\n try:\n user = request.user\n if user.check_password(request.data[\"current_password\"]):\n user.set_password(request.data[\"password\"])\n user.save()\n return Response(status=status.HTTP_200_OK)\n else:\n return Response(\"Current password is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except KeyError:\n return Response(\"The data format is incorrect\", status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)" ]
[ "0.8684988", "0.8561367", "0.83150464", "0.8308839", "0.81240356", "0.8089846", "0.7987024", "0.79581326", "0.79197747", "0.79159844", "0.79039943", "0.78411984", "0.78052634", "0.7772818", "0.7770018", "0.77554065", "0.774026", "0.77302045", "0.77212405", "0.77205837", "0.77172655", "0.7661487", "0.76608", "0.764069", "0.7610624", "0.75219446", "0.7491108", "0.74878067", "0.7485031", "0.746163", "0.7452426", "0.7429094", "0.7395562", "0.73675656", "0.73568743", "0.7356784", "0.73453104", "0.7319287", "0.7315409", "0.73090273", "0.7298011", "0.72502166", "0.72296864", "0.72235066", "0.7218753", "0.72164625", "0.71934086", "0.71833897", "0.71812093", "0.7178191", "0.71713173", "0.714764", "0.71446276", "0.7140387", "0.711279", "0.7100975", "0.7099245", "0.70931435", "0.7092939", "0.708702", "0.7068737", "0.7045875", "0.70446795", "0.7036946", "0.70311385", "0.70287645", "0.70272464", "0.7019197", "0.7009139", "0.7003094", "0.7000983", "0.7000331", "0.7000331", "0.6995843", "0.6984191", "0.6981478", "0.6980485", "0.6973125", "0.696521", "0.6962409", "0.6959943", "0.6933673", "0.69214386", "0.6921435", "0.6886885", "0.6886082", "0.6878559", "0.68726426", "0.6862911", "0.6852726", "0.6840036", "0.68361247", "0.6810273", "0.68014497", "0.6797867", "0.6795536", "0.6791179", "0.67885995", "0.67853487", "0.678015" ]
0.82297784
4
Checks that the email is valid
def clean_email(self): # NOTE: all emails are stored in lower-case e = self.cleaned_data['email'].lower() try: user = User.objects.get(email=e) if not user.is_active: msg = 'This user account has not been confirmed yet' raise forms.ValidationError(msg) except User.DoesNotExist: msg = 'This email is not associated with an account' raise forms.ValidationError(msg) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_email(self, email):\n rex = \"^[\\w]+[\\d]?@[\\w]+\\.[\\w]+$\"\n return re.match(rex, email)", "def validate_email(self):\n # source: https://docs.python.org/2/howto/regex.html\n if not re.match(r\"[^@.]+@[A-Za-z]+\\.[a-z]+\", self.email):\n return 'Invalid email address!'\n return self.email", "def valid_email(self, email):\n # uses regular expressions\n reg_email = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n return re.match(reg_email, email)", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if not email_exists(cur, self.email.data):\n self.email.errors.append('Please check your email address.')\n return False\n\n return True", "def _email_is_valid(email):\n return VALID_EMAIL_REGEXP.match(email) is not None", "def IsEmailValid(email):\n return email and EMAIL_RE.search(email)", "def is_valid_email_address (email):\n return valid_email.search(email)", "def test_is_valid_email(self):\n self.assertTrue(is_valid_email('abc@example.com'))", "def is_email_valid(e_mail):\n pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n result = False\n if pattern.match(e_mail):\n result = True\n return result", "def verify_email(entered_email):\n return EMAIL_RE.match(entered_email)", "def is_valid_email(email):\n assert email is not None\n return validate_email(str(email))", "def __validate_email(email):\n pattern = r\"\\\"?([-a-zA-Z0-9.`?{}]+@\\w+\\.\\w+)\\\"?\"\n pattern = re.compile(pattern)\n if not re.match(pattern, email):\n logging.critical(\"Incorrect email entered, email entered is -->{}\"\n .format(email))\n raise ValueError(\"You failed to match %s\" % email)\n return True", "def valid_email(line):\n email = line.o_email\n is_valid = validate_email(email)\n if not is_valid:\n rule = 'Email validation'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def isvalid(email):\n pattern = re.compile(r\"^([a-zA-Z0-9_\\-]+)@([a-zA-Z0-9]+)\\.\\w{,3}$\")\n return bool(pattern.match(email))", "def check_email_validity(email):\n if email.count('@') != 1:\n return False\n if len(email.split('@')[0]) == 0:\n return False\n if '.' not in email.split('@')[1]:\n return False\n return True", "def is_valid_email(email):\n if re.search(EMAIL_REGEX, email):\n return True\n else:\n return False", "def is_email_address_valid(email):\n if not re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\.[a-zA-Z0-9-]+)*$\", email):\n return False\n return True", "def IsValidEmail(s):\n return RE_EMAIL_ONLY.match(s)", "def validateEmail(email): \n \n # Make a regular expression \n regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'\n \n if(re.search(regex,email)): \n return True \n else: \n print(\"\\n**The specified email \\\"\"+str(email)+\"\\\" is invalid. Please try again by re-entering the email \") \n return False", "def validate_email(email):\r\n\r\n\t\tstatus = 'valid'\r\n\r\n\t\ttry:\r\n\t\t\tvalidate_email(email)\r\n\t\t\tuser = AuthTools.get_user_by_email(email)\r\n\r\n\t\t\tif user is not None:\r\n\t\t\t\tstatus = 'taken'\r\n\r\n\t\texcept:\r\n\t\t\tstatus = 'invalid'\r\n\r\n\t\treturn status", "def is_email_valid(email):\n\n result = requests.get(\n f'https://api.hunter.io/v2/email-verifier?email={email}&api_key={settings.HUNTER_IO_API_KEY}'\n ).json()\n\n return True if result.get('data').get('status') == 'valid' else False", "def email_validator(email):\n if len(email) > 6:\n if re.match(REGEX_EXP, email) != None:\n return True\n return False", "def is_valid_email(email):\n return \"@\" in email and \".\" in email", "def validate_email(self, field):\n print(\"Validating email...\")\n if User.query.filter_by(email=field.data.lower()).first():\n print(\"Invalid\")\n raise ValidationError(\"Email already registered.\")\n print(\"VAlid email\")", "def validate_email(email:str) -> bool:\r\n return email.count(\"@\") == 1 and email.count(\".\") >= 1 and len(email) > 6", "def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False", "def validate_email(self, email):\n data = {\n \"address\": email\n }\n resp = self.get(_u.build_uri(\"address.validate\"), data)\n return utils.handle_response(resp)", "def emailValidate(form, field):\n\n if ' ' in field.data:\n raise ValidationError(message='Invalid e-mail address')\n\n if field.data.count('.') < 1:\n raise ValidationError(message='Invalid e-mail address')\n\n if field.data.count('@') < 1:\n raise ValidationError(message='Invalid e-mail address')", "def is_valid_email(form, value):\n if '@' not in value or len(value) > 200:\n raise forms.ValidationError(_('Invalid email address'))", "def is_valid(self, email=None):\n if not email:\n return False\n\n # RFC 3696\n # In addition to restrictions on syntax, there is a length limit on email addresses.\n # That limit is a maximum of 64 characters (octets) in the \"local part\" (before the \"@\")\n # and a maximum of 255 characters (octets) in the domain part (after the \"@\") for a total\n # length of 320 characters. However, there is a restriction in RFC 2821 on the length of\n # an address in MAIL and RCPT commands of 254 characters. Since addresses that do not fit\n # in those fields are not normally useful, the upper limit on address lengths should\n # normally be considered to be 254.\n\n if len(email) > 254:\n return False\n\n parts = email.split('@')\n if len(parts) > 2 or len(parts[0]) > 64 or len(parts[1]) > 255:\n return False\n\n if not re.match('[a-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~\\-]+(?:\\.[a-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~\\-]+)*', email.lower()):\n return False\n # A valid mail exchange server is configured!\n return self.valid_mx(parts[1])", "def is_email(address):\n try:\n validate_email(address)\n return True\n except:\n return False", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')", "def validate_email(form, field):\n if not User.query.filter_by(email = field.data).first():\n raise ValidationError(\"Email is incorrect.\")", "def validateEmail(email):\r\n # Email address must be at least 6 characters in total.\r\n if len(email) < 6:\r\n\t\treturn False # Address too short.\r\n # Split up email address into parts.\r\n try:\r\n\t\tlocalpart, domainname = email.rsplit('@', 1)\r\n\t\thost, toplevel = domainname.rsplit('.', 1)\r\n except ValueError:\r\n\t\treturn False # Address does not have enough parts.\r\n if localpart[:1] == \".\" or localpart[-1] == \".\":\r\n\t\treturn False # Dots at the beginning or end of the localpart\r\n # Check for Country code length.\r\n if len(toplevel)<2 or len(toplevel)>6:\r\n\t\treturn False # Not a domain name.\r\n # Check for allowed characters\r\n for i in '-_.%+':\r\n\t\tlocalpart = localpart.replace(i, \"\")\r\n for i in '-_.':\r\n\t\thost = host.replace(i, \"\")\r\n if localpart.isalnum() and host.isalnum():\r\n\t\treturn True # Email address is fine.\r\n else:\r\n\t\treturn False # Email address has funny characters.\r", "def validate_email(input_email: str) -> bool:\n regex = r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b\"\n if re.fullmatch(regex, input_email):\n return True\n return False", "def validate_email_address(self, address):\n if self._re_email.search(address.lower()) is None:\n return False\n return True", "def verify_email(email):\n email_reg_exp = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return not email or email_reg_exp.match(email)", "def is_valid_email(email):\n return re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\").fullmatch(email)", "def is_valid_email(email):\n pattern = '[\\w\\.-]+@[\\w\\.-]+[.]\\w+'\n return re.match(pattern, email)", "def is_valid_email(variable):\n if re.match(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+[a-zA-Z0-9-.]+$)\",\n variable):\n return True\n return False", "def validate_email(self, email):\n return models.Email.normalize_address(email)", "def is_valid_email(email):\n try:\n split = email.split('@')\n assert len(split) == 2\n domain = split[1]\n assert '.' in domain\n except AssertionError:\n return False\n return True", "def test_is_invalid_email(self):\n self.assertFalse(is_valid_email('helloworld'))", "def valid_email(email):\n email_regex = re.compile(r\"^[\\S]+@[\\S]+.[\\S]+$\")\n return email and email_regex.match(email)", "def validate_email_address(self, email_address):\n return re.match(r\"[a-zA-Z0-9]+(\\.?[a-zA-Z0-9]+)*@[a-zA-Z0-9]+(\\.?[a-zA-Z0-9]+)*\\.[a-zA-Z0-9]{2,}\", email_address)", "def validate_email(email):\n\t\ttemp = email.split(\"@\")\n\n\t\tif len(temp) != 2:\n\t\t\treturn False\n\n\t\tif not temp[0] or not temp[1]:\n\t\t\treturn False\n\n\t\tif \".\" not in temp[1]:\n\t\t\treturn False\n\n\t\ttemp = temp[1].split(\".\")\n\n\t\tfor domain_part in temp:\n\t\t\tif not domain_part:\n\t\t\t\treturn False\n\n\t\treturn True", "def validate_email( email ):\n message = ''\n if not( VALID_EMAIL_RE.match( email ) ):\n message = \"Please enter a real email address.\"\n elif len( email ) > 255:\n message = \"Email address exceeds maximum allowable length.\"\n return message", "def valid_email(email):\n # Ensure email is a string\n if not type(email) == str:\n return False\n\n # Find @ and . in the email address\n if re.match(\"[^@]+@[^@]+.[^@]+\", email):\n return True\n\n else:\n return False", "def invalid_email(email):\n email_pattern = re.compile(r\"[^@]+@[^@]+\\.[^@]+\")\n if email_pattern.match(email):\n return False\n return True", "def clean_email(self):\n email = self.cleaned_data['email']\n email_taken = User.objects.filter(email=email).exists()\n if email_taken:\n raise forms.ValidationError(\n 'El email ya se encuentra en uso. Prueba otro!')\n return email", "def email_checker(email):\n regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'\n if re.search(regex, email):\n return True\n else:\n return False", "def email_is_valid(email: Optional[str]) -> bool:\n if email is None:\n return True\n\n if re.match(r\"^[a-zA-Z0-9]+[\\.]?[a-zA-Z0-9]+[@]\\w+[.]\\w{2,3}$\", email):\n return True\n\n return False", "def validate_email(value):\n if not EMAIL_REGEX.match(value):\n raise ValidationError('Invalid email address')\n return value", "def is_valid_email(email):\n regex = r'[\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+'\n if re.search(regex, email):\n return True\n return False", "def is_valid_email(email):\n regex = r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b\"\n if re.fullmatch(regex, email):\n return True\n else:\n return False", "def is_valid_email_address(self, addr):\n\t\t# the call is blocking, so only syntactic analysis performed\n\t\t# To check if the SMTP server exists change check_mx to True\n\t\t# to check if email address exists change verify to true\n\t\treturn addr is not None and validate_email(addr, verify=False, check_mx=False)", "def test_valid_email(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_3)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Invalid email address.\")", "def check_email_address_validity(email_address):\n\n try:\n validate_email(email_address)\n valid_email = True\n\n except ValidationError:\n valid_email = False\n\n return valid_email", "def test_invalid_email_when_logging_in(self):\n pass", "def validate_email(email: str) -> str:\n\n # from https://stackoverflow.com/a/43937713/12580727\n email_regex = r'^[\\w\\.\\+\\-]+\\@[\\w]+\\.[a-z]{2,3}$'\n\n if email is None:\n raise ValueError(('An email address is required in order'\n ' to work with the Unpaywall API'))\n\n if not re.match(email_regex, email):\n raise ValueError(('No valid email address entered. Enter a'\n ' valid email address'))\n\n if 'example.com' in email:\n raise ValueError('Do not use example.com')\n\n return email", "def is_allowed_email(email):\n if email and not is_regex_email(email):\n return \"That is not a valid email.\"\n else:\n return \"\"", "def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def validate(self):\n rv = Form.validate(self)\n if not rv:\n return False\n\n cur = get_cursor()\n if email_exists(cur, self.email.data):\n self.email.errors.append('This email already exists!')\n return False\n\n return True", "def validate_email(email):\n try:\n user, domain = str(email).split(\"@\")\n if not re.match(\"^[A-Za-z]*$\", user):\n abort(make_response(jsonify({\n \"status\": 400, \"Error\": \"Email is Invalid\"}), 400))\n except ValueError:\n abort(make_response(jsonify(\n status=400,\n error=\"Email is Invalid\"), 400))\n if not user or not domain:\n abort(make_response(jsonify(error=\"Email is Invalid\"), 400))\n\n # Check that domain is valid\n try:\n domain_1, domain_2 = domain.split(\".\")\n if not re.match(\"^[A-Za-z]*$\", domain_1):\n abort(make_response(jsonify({\n \"status\": 400, \"Error\": \"Email is Invalid\"}), 400))\n if not re.match(\"^[A-Za-z]*$\", domain_2):\n abort(make_response(jsonify({\n \"status\": 400, \"Error\": \"Email is Invalid\"}), 400))\n except ValueError:\n abort(make_response(jsonify(\n status=400,\n error=\"Email is Invalid\"), 400))\n if not domain_1 or not domain_2:\n abort(make_response(jsonify(\n status=400,\n error=\"Email is Invalid\"), 400))\n\n return email", "def is_valid_email(email):\n subdomain = HunterService._get_domain_for_email(email)\n try:\n result = hunter.domain_search(subdomain)\n except HTTPError as e:\n logging.info('Skipping hunter.io services. REASON: %s', str(e))\n\n return True\n\n return result['webmail'] or bool(result['emails'])", "def validate_email(form, field):\n if User.query.filter_by(email = field.data).first():\n raise ValidationError(\"Email already registed.\")", "def clean_email(self):\r\n email_domain = self.cleaned_data['email'].split('@')[1]\r\n if email_domain in self.bad_domains:\r\n raise forms.ValidationError(_(u'Registration using free email addresses is prohibited. Please supply a different email address.'))\r\n return self.cleaned_data['email']", "def validate_email(self, field):\n if User.query.filter_by(email=field.data).first():\n raise ValidationError(\"Email already registered.\")", "def validate_email(email_address):\n email_regex = re.compile(\"[a-z0-9!#$%&'*+/=?^_`{|}~-]+\"\n \"(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*\"\n \"@\"\n \"(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+\"\n \"[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\")\n\n if email_regex.match(email_address) is None:\n return False\n else:\n return True", "def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email=email):\n raise forms.ValidationError('Ya existe un email igual al registrado.')\n return email", "def is_valid_email_address(email_address):\n\t# requirements = re\n\treturn EMAIL_REGEX.match(email_address) != None", "def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email=email):\n raise forms.ValidationError('Ya existe un email igual registado.')\n return email", "def validate_email(self, value):\n verifier = EmailVerifier(value)\n if not verifier.is_valid():\n if verifier.errors:\n raise serializers.ValidationError(verifier.errors)\n error_code = verifier.data.get('result', 'unknown_error')\n if verifier.status_code == status.HTTP_200_OK:\n raise serializers.ValidationError({error_code:\n VALIDATION_ERRORS[error_code]})\n else:\n # This errors are 'Payment required' or 'Rate limit' errors etc, they\n # logged in by the EmailVerifier and should not be exposed to a user.\n raise serializers.ValidationError({'unknown_error':\n VALIDATION_ERRORS['unknown_error']})\n return value", "def clean_email(self):\n\t\temail = self.cleaned_data['email']\n\t\tif User.objects.filter(email=email):\n\t\t\traise forms.ValidationError('Ya existe un email igual en la db.')\n\t\treturn email", "def validate_email(self, email):\n \n # check for missing email\n if not email:\n raise serializers.ValidationError([\"This field may not be blank.\"])\n return email", "def _validate_email(email, api_version='v1'):\n try:\n _validate_unicode(email)\n _validate_type(email, str, accounts.EMAIL_BAD_TYPE_MSG)\n _validate_length(email, accounts.EMAIL_MIN_LENGTH, accounts.EMAIL_MAX_LENGTH, accounts.EMAIL_BAD_LENGTH_MSG)\n validate_email.message = (\n accounts.EMAIL_INVALID_MSG.format(email=email) if api_version == 'v1' else accounts.AUTHN_EMAIL_INVALID_MSG\n )\n validate_email(email)\n except (UnicodeError, errors.AccountDataBadType, errors.AccountDataBadLength) as invalid_email_err:\n raise errors.AccountEmailInvalid(str(invalid_email_err))\n except ValidationError as validation_err:\n raise errors.AccountEmailInvalid(validation_err.message)", "def email_valid(email_string):\n if not email_string:\n raise WrongInput(\"Input cannot be blank\")\n if not isinstance(email_string, str):\n raise WrongInput(\"Invalid email address\")\n\n if '@' not in email_string or '.' not in email_string:\n raise WrongInput('Invalid email address. Example of a valid address: johndoe@example.com.')\n else:\n return email_string", "def validate_email(self, email_field):\n if User.query.filter_by(email=email_field.data).first():\n raise ValidationError('An account with this email address already exists')\n return True", "def test_already_registered_email_validation(self):\n\n main_page = pages.mainpage.MainPage(self.driver)\n main_page.click_sign_in_button()\n \n sign_in_page = pages.signinpage.SignInPage(self.driver)\n sign_in_page.enter_create_account_email_addres('test@test.test')\n sign_in_page.click_create_account_button() \n\n self.assertTrue(sign_in_page.check_if_account_create_error_is_visible(), 'Email validation failed')", "def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists. Please choose another email.')", "def clean_email(self):\n email = self.cleaned_data.get('email')\n email_exists = User.objects.filter(email=email).exists()\n\n if email_exists:\n raise forms.ValidationError(\n 'El correo ya existe en nuestros registros.'\n )\n\n return email", "def test_invalid_email(self):\n rv = self.login('Bo_wrong@example.com', 'Bo1995')\n self.assertIn(b'Invalid email! Please try again', rv.data)", "def clean_email(self):\n email = self.cleaned_data['email']\n if Dueno.objects.filter(email=email):\n raise forms.ValidationError('Ya existe un email igual registado.')\n return email", "def validate_email(self, email):\n email = email.lower()\n if User.objects.filter(email=email).exists():\n raise serializers.ValidationError('Email already registered.')\n return email", "def validate_email(form, field):\n\n user = User.query.filter_by(email=form.email.data).first()\n\n if user and not user == g.user:\n form.email.errors = [\n \"Email already associated with account!\",\n *form.email.errors\n ]\n raise ValidationError", "def validate_email_address(email_address: str):\n email_regex = \"^[a-z0-9]+[\\\\._]?[a-z0-9]+[@]\\\\w+[.]\\\\w{2,3}$\"\n if re.search(email_regex, email_address):\n return True\n return False", "def email_validate(self,\r\n email,\r\n fix_typos=False):\r\n\r\n # Prepare query URL\r\n _url_path = '/email-validate'\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += _url_path\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare form parameters\r\n _form_parameters = {\r\n 'output-case': 'camel',\r\n 'email': email,\r\n 'fix-typos': fix_typos\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.post(_query_url, headers=_headers, parameters=_form_parameters)\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, EmailValidateResponse.from_dictionary)", "def validate_email(self, email):\n if email and email_address_exists(email):\n raise serializers.ValidationError(\n \"A user is already registered with this e-mail address.\")\n\n return email", "def is_valid_email(email):\n if 7 < len(email) < 257:\n if re_valid_email.match(email) is None:\n return False\n # TODO: If/when we permit *quoted* local-parts, account for\n # legal additional @'s, e.g., \"foo@bar\"@bar.foo\n if len(email.split('@')[0]) > 64:\n return False\n return True\n return False", "def clean_email(self):\n e = self.cleaned_data['email']\n try:\n user = User.objects.get(email=e)\n if not user.is_active:\n msg = 'This user account has not been confirmed yet'\n raise forms.ValidationError(msg)\n except User.DoesNotExist:\n pass\n # msg = 'This email is not associated with an account'\n # raise forms.ValidationError(msg)\n return e", "def has_validated_email(self):\n return self.user.email_user is not None", "def test_valid_email(self):\n self.valid_email = {'user': {\n \"username\": \"remmy\",\n \"email\": \"remmytest.com\",\n \"password\": \"Password123\"\n }\n }\n response = self.client.post(\n self.reg_url,\n self.valid_email,\n format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"must be in the format xxxx@xxxx.xx\", response.content)", "def clean_email(self):\n email = self.cleaned_data['email'].lower()\n if not email.endswith('@pearson.com'):\n raise forms.ValidationError('Your email address must end with @pearson.com')\n return email", "def validate_email(request):\r\n # get submitted email.\r\n email = request.GET.get('email', None)\r\n try:\r\n # check if an account with this email already exists, in case of editing user's profile.\r\n is_email_taken = User.objects.filter(email__iexact=email).exclude(email__iexact=request.user.email).exists()\r\n except: \r\n # check if an account with this email already exists, in case of registering new user.\r\n is_email_taken = User.objects.filter(email__iexact=email).exists()\r\n data = {'is_email_taken':is_email_taken}\r\n if data['is_email_taken']:\r\n data['error_message'] = 'An account with this Email already exists.'\r\n return JsonResponse(data)", "def validate_email(request):\r\n # get submitted email.\r\n email = request.GET.get('email', None)\r\n try:\r\n # check if an account with this email already exists, in case of editing user's profile.\r\n is_email_taken = User.objects.filter(email__iexact=email).exclude(email__iexact=request.user.email).exists()\r\n except: \r\n # check if an account with this email already exists, in case of registering new user.\r\n is_email_taken = User.objects.filter(email__iexact=email).exists()\r\n data = {'is_email_taken':is_email_taken}\r\n if data['is_email_taken']:\r\n data['error_message'] = 'An account with this Email already exists.'\r\n return JsonResponse(data)", "def checkMailAddress(obj, someAddr):\n # #5353 use checkEmailAddress from CMFDefault utils instead of\n # validateSingleEmailAddress from plone_utils as the plone email validator \n # is better at detecting invalid addreses\n try:\n checkEmailAddress(someAddr)\n except EmailAddressInvalid:\n return False\n return True", "def validate_element(self, value):\n if super(EmailField, self).validate_element(value):\n valid_uname, valid_domain = validation_util.valid_email(value)\n if not (valid_uname and valid_domain):\n if isinstance(valid_domain, int):\n val_error = ValidationError(\n 'Field encountered improperly formatted email address: %s' % value)\n else:\n if '@' not in value:\n val_error = ValidationError(\n 'Field encountered email address with missing @ '\n 'character: %s' % value)\n else:\n val_error = ValidationError(\n 'Field encountered email address with illegal '\n 'characters: %s' % value)\n\n raise val_error\n else:\n return value", "def validate_email(self):\n email = input(\"Email: \").strip()\n if email is None or email == \"\":\n print(self.INVALID_INPUT)\n self.display_cust_unlock()\n else:\n return email", "def validate_email(form, field):\n if User.query.filter_by(email=form.email.data).first():\n form.email.errors.append(\n \"Email already associated with account!\")\n raise ValidationError", "def validate_email(self, email):\n if email is None:\n raise ValueError(\"Missing email value\")\n elif type(email) is not str:\n raise ValueError(\"Invalid email value, expect str\")\n\n normalized_email = self.normalize_email(email)\n\n existing_email = \\\n self.model.objects.filter(email=normalized_email).first()\n\n if existing_email:\n raise Exception(\"This email is already assigned to another User\")\n\n return normalized_email", "def isEmail(email):\n if not re.match(r\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\", email):\n return(0)\n return(1)" ]
[ "0.800048", "0.799266", "0.78625476", "0.78314734", "0.77968144", "0.77801484", "0.7779725", "0.7750616", "0.7714883", "0.7691427", "0.7665794", "0.76622987", "0.7633775", "0.762817", "0.7609659", "0.76070136", "0.75767475", "0.7563735", "0.7555903", "0.75257874", "0.7525405", "0.75217175", "0.7521635", "0.7507308", "0.74756914", "0.74277073", "0.741946", "0.74104244", "0.7409805", "0.74080163", "0.7399391", "0.7395284", "0.7384462", "0.7377802", "0.73654395", "0.7360897", "0.7349016", "0.7344122", "0.7343781", "0.73372906", "0.73255336", "0.73231083", "0.732011", "0.73084134", "0.7279133", "0.7278293", "0.72541857", "0.7245998", "0.72392744", "0.72381365", "0.72255486", "0.7222203", "0.7220228", "0.72173434", "0.72128856", "0.7208745", "0.72002393", "0.7193715", "0.71708786", "0.7166148", "0.7163786", "0.71531945", "0.7151708", "0.71398467", "0.7129946", "0.7128283", "0.705989", "0.7055075", "0.7042373", "0.70349026", "0.703465", "0.7028746", "0.7012412", "0.6999526", "0.69940937", "0.69847554", "0.69845736", "0.6958354", "0.6946199", "0.69457346", "0.6943079", "0.6940733", "0.69314593", "0.6922057", "0.6920595", "0.69088775", "0.6895022", "0.6894736", "0.6892879", "0.68874276", "0.6883182", "0.6882128", "0.68719757", "0.6870049", "0.6870049", "0.68676805", "0.6855489", "0.6854509", "0.68311036", "0.6817434", "0.6817064" ]
0.0
-1
Returns the User object for the email address
def get_user(self): if not self.is_valid(): return None # error checking done in: clean_email # NOTE: all emails are stored in lower-case e = self.clean_email().lower() return User.objects.get(email=e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user(email):\r\n return User.objects.get(email=email)", "def lookup_email(email):\n user = User.objects(email=email).first()\n return user", "def find_by_email(cls, email):\n return User.objects.filter(email=email).first()", "def get_by_email(self, email):\n user = (\n self.session\n .query(tables.User)\n .filter_by(email=email)\n .first()\n )\n return user", "def get_user_by_email(cls, user_email):\n\n try:\n user_login_info = User.query.filter_by(email=user_email).one()\n\n return user_login_info\n\n except Exception, error:\n print error", "def _find_user_by_email_address(self, email_address):\n # XXX: Maybe model is more appropriate place for such a method\n self.user = meta.session.query(User).filter_by(email=email_address).filter_by(verification=None).first()\n return self.user", "def helper_get_by_email(user_email):\n user = heart_rate_databases_starter.models.User.objects.raw({\"_id\": user_email}).first() # Get the first user where _id=email\n return user", "def get_user_by_email(self, strategy, email):\r\n return strategy.storage.user.user_model().objects.get(email=email)", "def get_user_by_email(email):\n\n return User.query.filter(User.email == email).first()", "def get_user_by_email(email):\n\n return User.query.filter(User.email == email).first()", "def get_user_by_email(email):\n\n user = User.query.filter(User.email == email).first()\n \n return user", "def get_user(self, email):\n try:\n return RegisterUser.objects.get(email=email)\n except:\n return None", "def get_user(email, queryset=None):\n if queryset is None:\n queryset = User.objects\n return queryset.get(username=_email_to_username(email))", "def get_user_by_email(email):\r\n\t\tif email:\r\n\t\t\ttry:\r\n\t\t\t\tuser = User.objects.filter(email=email, is_active=True)[0]\r\n\t\t\t\treturn user\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\treturn None", "def get_user_by_email(self, email: str):\n try:\n return model_to_dict(\n User.select().where(User.email == email).get())\n except DoesNotExist:\n raise ValueError(HTTPStatus.NOT_FOUND,\n 'User with email {} does not exist'.format(email))\n except Exception:\n raise BaseException(HTTPStatus.INTERNAL_SERVER_ERROR,\n 'Internal server error')", "def user_by_email(email):\n user = User.query.filter(User.email == email).one_or_none()\n return user", "def load_user(user_email):\n return User.query.get(user_email)", "def get_by_email(self, email):\n return self.session.query(User).filter_by(email=email).first()", "def get_user_by_username_or_email(username_or_email):\r\n if '@' in username_or_email:\r\n return User.objects.get(email=username_or_email)\r\n else:\r\n return User.objects.get(username=username_or_email)", "def get_one_user_by_email(email):\n return UserModel.query.filter_by(email=email, deleted_at=None).first()", "def get_info(email):\n # Get the first user where _id=email\n user = models.User.objects.raw({\"_id\": email}).first()\n return user", "def get_user(self, email):\n\n try:\n return self.client.admin_get_user(\n Username=email,\n UserPoolId=self.user_pool_id\n )\n except self.client.exceptions.UserNotFoundException:\n raise Exception('An account with the given email does not exist.')", "def _user_from_name_or_email(username_or_email):\r\n username_or_email = strip_if_string(username_or_email)\r\n\r\n if '@' in username_or_email:\r\n return User.objects.get(email=username_or_email)\r\n else:\r\n return User.objects.get(username=username_or_email)", "def get_user(self, email):\n return run_transaction(\n self.sessionfactory,\n lambda session: get_user_txn(session, email))", "def retrieve_user(self, email):\n if self.database is None:\n raise Exception(\"No database.\")\n if email is None or len(email) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_user(email)", "def _resolve_user(self, data: dict):\n user_email = data.get('eml')\n if not user_email:\n raise OBDControllerError('User email not found')\n\n user: User = self.db_session.query(User).filter(User.email == user_email).first()\n if not user:\n raise OBDControllerError('User does not exist')\n\n return user", "def find_user_by_email_address(email_address: str) -> Optional[DbUser]:\n return DbUser.query \\\n .filter(\n db.func.lower(DbUser.email_address) == email_address.lower()\n ) \\\n .one_or_none()", "def search_user_by_email(email: str, session=Depends(transaction)):\n user = get_user_by_email(session, email)\n if user is None:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND,\n detail=\"User was not found.\")\n return UserResponse(id=user.id, email=user.email, name=user.name,\n address=user.address, latitude=user.latitude,\n longitude=user.longitude)", "async def get_by_email(self, email: str) -> Optional[UD]:\n user = await looped_fetch(\n self.async_deta_base.fetch, query={\"email\": email.lower()}\n )\n\n return self.user_db_model(**user) if user else None", "def get_user_by_email(email):\n user = User.query.filter(User.email == email).first()\n result = userSchema.dump(user)\n return jsonify(result)", "def get_user(self, user_id=None, email=None):\n\n if not user_id and not email:\n raise ValueError('Either a user id or email must be provided.')\n\n query = User.objects.all()\n\n if user_id:\n query = query.filter(id=user_id)\n\n if email:\n email = normalize_email_address(email)\n query = query.filter(username=email)\n\n try:\n return query.get()\n except User.DoesNotExist:\n raise UserNotFound('The user with the provided parameters is not found.')", "def get_account_for_email(cls, email):\n assert email\n key = '<%s>' % email\n return cls.get_by_key_name(key)", "def get_user_account_from_email(email, default='raise', active_only=True):\n email = email.strip()\n try:\n return auth.models.User.objects.get(email__iexact=email,\n is_active=active_only)\n except auth.models.User.DoesNotExist:\n # User does not exist\n if default == 'raise':\n raise\n else:\n return default\n except auth.models.User.MultipleObjectsReturned:\n # The system expects to only have one user record per email,\n # so let's reraise the error to have it fixed in the database.\n raise auth.models.User.MultipleObjectsReturned(\n 'Found multiple records for user with email %r' % email)", "def find_user_by_mail(mail: str) -> User:\n\n # Find user with this username, or None if there isn't any\n return User.query.filter_by(mail=mail).first()", "def user_from_str(identifier):\r\n try:\r\n user_id = int(identifier)\r\n except ValueError:\r\n return User.objects.get(email=identifier)\r\n else:\r\n return User.objects.get(id=user_id)", "def show(self, email):\n\n return User.query.filter_by(email=email).first()", "def getUserbyEmail(self, email):\n\n cursor = self.conn.cursor()\n query = \"SELECT uid, cid, ufirstname, ulastname, udescription, urole, uclassification, email, pin \" \\\n \"FROM Users natural inner join Credential \" \\\n \"WHERE email= %s;\"\n cursor.execute(query, (email,))\n result = cursor.fetchone()\n return result", "def get_user_by_email(self, emailid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'emailid': emailid}\n url = SECURE_API_URL + \"raas/v1/user\"\n return self._lr_object._get_json(url, payload)", "def get(self, email: str):\r\n args = authParser.parse_args()\r\n\r\n if not validators.email(email):\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Input a valid email address\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n user = UserModel.query.filter(UserModel.email == email).first()\r\n\r\n if not user:\r\n # The email doesnt exist\r\n return {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Not Found\"\r\n }\r\n }, http.client.NOT_FOUND\r\n user = admin_namespace.marshal(user, user_model)\r\n return {\r\n \"status\": \"success\",\r\n \"details\": {\r\n \"result\": user\r\n }\r\n }, http.client.OK", "def find_invited_user(email, default=None):\n\n User = apps.get_model(settings.AUTH_USER_MODEL)\n\n try:\n return User.objects.get(email=email)\n except User.DoesNotExist:\n return default", "def lookup_user(email):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.users_lookupByEmail(email=email)\n assert response['ok'] is True\n return response['user']['id']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def get(self, username):\n return User.find_by_username_or_email(username)", "def authUser(self, email='user@mail.com'):\n user = self._createUser(email=email, role=UserType.USER)\n return user, self._authenticate(user)", "def lookup_user_by_email(email):\n try:\n slack_client = get_client()\n result = slack_client.users_lookupByEmail(email=email)\n id = result.data['user']['id'] # Looks like: UJ0JNCX19, tag the user in a message like <@UJ0JNCX19>\n return '<@' + id + '>'\n except:\n return email", "def get_user_from_token(token):\n try:\n jwt_decode = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n except:\n return None\n email = jwt_decode['user']\n try:\n user = User.get(User.email == email)\n except:\n return None\n return user", "def get_user(user_ref):\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(username=user_ref)\n except UserModel.DoesNotExist:\n return UserModel.objects.get(email=user_ref)", "def find_user(cls, user_unique_identifier):\n # Select from the table users where email_id = email_id limit 1 .\n # return a UserModel Object .\n return cls.query.filter_by(uuid=user_unique_identifier).first( )", "def find_by_mail(cls, mail):\n return UsersModel.query.filter_by(mail=mail).first()", "def current_user(email):\n for user in Data.users:\n if email == user['email']:\n return user", "def get_user(self, user_id):\n _email = self._email_for_user_id(user_id)\n response = self._get('/users?{0}'.format(urllib.urlencode({'search': _email})))\n for _user in response:\n if _user['email'] == _email:\n return _user\n return None", "def get_user(cls, email=None, user_id=None):\n\n params = {'email': email, 'user_id': user_id}\n user_dict = cls._do_call(\n 'GET', cls.api_endpoint + 'users', params=params)\n return user_dict", "def find_user(self, username=None, email=None):\n if username:\n try:\n if self.auth_username_ci:\n return (\n self.get_session.query(self.user_model)\n .filter(func.lower(self.user_model.username) == func.lower(username))\n .one_or_none()\n )\n else:\n return (\n self.get_session.query(self.user_model)\n .filter(func.lower(self.user_model.username) == func.lower(username))\n .one_or_none()\n )\n except MultipleResultsFound:\n log.error(\"Multiple results found for user %s\", username)\n return None\n elif email:\n try:\n return self.get_session.query(self.user_model).filter_by(email=email).one_or_none()\n except MultipleResultsFound:\n log.error(\"Multiple results found for user with email %s\", email)\n return None", "def getLoginUser():\n return session.query(User).filter_by(\n email=login_session['email']).one()", "def GetAppEngineUser(user_id):\n email_address = GetEmailAddress(user_id)\n if email_address:\n return users.User(email_address)\n else:\n return None", "def get_email(obj):\r\n return obj.user.email", "def _authenticate_and_get_user(self, email, password, user=None):\n # Authenticate the user\n emailBackend = EmailBackend()\n user = emailBackend.authenticate(username=email, password=password, user=user)\n return user", "def get_user_by_email(tx: Transaction, user_email: str) -> BoltStatementResult:\n\n # NOTE: tag labels are hardcoded here. If they change in tags csv, must be changed here.\n query = f\"\"\"\n MATCH (user:Person {{email: '{user_email}'}})\n OPTIONAL MATCH (user)-->(skill_tag:Tag:CanHelpWithTag)\n OPTIONAL MATCH (user)-->(passion_tag:Tag:PassionsTag)\n RETURN user, COLLECT(DISTINCT skill_tag.name) AS help_others, COLLECT(DISTINCT passion_tag.name) AS passions\"\"\"\n return tx.run(query)", "def get_by_name_or_email(self, name_or_email):\n User = tables.User\n user = (\n self.session\n .query(User)\n .filter(or_(\n func.lower(User.user_name) == name_or_email.lower(),\n func.lower(User.email) == name_or_email.lower()\n ))\n )\n return user.first()", "def test_resource_user_resource_get_user_by_email_address_get(self):\n pass", "def _create_user(self, email, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.save(using=self._db)\n return user", "def user_loader(self, email):\n if not self.um.check_file_exists(\"users.json\"):\n return\n\n if email not in self.users:\n return\n\n user = User()\n user.id = email\n return user", "def find_user():\n\tpost_json = request.get_json()\n\tif not post_json:\n\t\tabort(400)\n\temail = post_json['email']\n\tif not email:\n\t\tabort(400)\n\n\tuser = models.User.query.filter_by(email=email).first()\n\n\tif not user:\n\t\tusername = email.split(\"@\")[0]\n\t\tsame_username = models.User.query.filter_by(username=username).all()\n\t\tif len(same_username) > 0:\n\t\t\tusername = username + str(len(same_username))\n\t\tuser = models.User(\n\t\t\tusername = username,\n\t\t\temail = email,\n\t\t\tpassword = \"\"\n\t\t)\n\t\tdb.session.add(user)\n\t\tdb.session.commit()\n\treturn jsonify({'user_id':user.id}), 201", "def email_exist(email):\n return User.objects.filter(email=email).first()", "def test_get_user_by_emailuser_email_get(self):\n pass", "def test_gets_user_by_email(self):\r\n User.objects.create(email='another_user@example.com')\r\n desired_user = User.objects.create(email='edith@example.com')\r\n found_user = PasswordlessAuthenticationBackend().get_user('edith@example.com')\r\n self.assertEqual(desired_user, found_user)", "def clean_email(self):\n try:\n user = User.objects.get(email__iexact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(\"A user with that email already exists.\"))", "def fetch_user(query: str) -> User:\n user_filter = models.Q(username=query) | models.Q(email=query)\n if query.isdigit():\n user_filter |= models.Q(pk=query)\n\n user_model = get_user_model()\n try:\n return user_model.objects.get(user_filter)\n except user_model.DoesNotExist:\n raise exceptions.ParseError(\"Unknown user: {}\".format(query))\n except user_model.MultipleObjectsReturned:\n raise exceptions.ParseError(\"Cannot uniquely determine user: {}\".format(query))", "def lookup(cls, email):\n return cls.query.filter_by(email=email).one_or_none()", "def get_user(login):\n if isinstance(login,str) or isinstance(login,unicode):\n user = Session.query(User).filter(or_(User.email==login,\n User.username==login.lower())).first()\n return user\n else:\n raise Exception(login)", "def existing_email(cls, email):\n user_db = User.get_by('email', email)\n if not user_db:\n raise ValueError('This email is not in our database.')\n return email", "def create_user(self, email):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(email=self.normalize_email(email))\n #user.set_password('python_utah_north')\n user.save(using=self._db)\n return user", "def find_by_email(self, email):\n return self._repository.find_by_email(email)", "def find_by_identity(cls, identity):\n return User.query.filter(\n (User.email == identity) | (User.username == identity)).first()", "def get_auth0_user_id_by_email(email):\n\n get_token = GetToken(auth0_domain)\n token = get_token.client_credentials(\n auth0_client_id,\n auth0_client_secret,\n 'https://{}/api/v2/'.format(auth0_domain)\n )\n mgmt_api_token = token['access_token']\n auth0_users = Auth0Users(auth0_domain, mgmt_api_token)\n query = 'email:%s' % email\n results = auth0_users.list(q=query, search_engine='v3')\n if results['users']:\n auth0_user_id = results['users'][0]['user_id']\n else:\n auth0_user_id = None\n\n return auth0_user_id", "def get_fname_by_email(email):\n\n return db.session.query(User.fname).filter(User.email == email).first()", "def find_by_email(self, email):\n return self.mapper.find_by_email(email)", "def get_user(current_user):\n for user in user_db:\n if user['email'] == current_user:\n return user", "def verification_email_and_return_username(value: str) -> str:\n\n user = User.objects.filter(email=value)\n if len(user) != 0:\n return user[0].username\n else:\n raise serializers.ValidationError('User with given credentials are not found')", "def get_user_by_email(\n payload: dict,\n raiseonfail: bool = False,\n override_authdb_path: str = None,\n config: SimpleNamespace = None,\n) -> dict:\n\n engine, meta, permjson, dbpath = get_procdb_permjson(\n override_authdb_path=override_authdb_path,\n override_permissions_json=None,\n raiseonfail=raiseonfail,\n )\n\n for key in (\"reqid\", \"pii_salt\"):\n if key not in payload:\n LOGGER.error(\n \"Missing %s in payload dict. Can't process this request.\" % key\n )\n return {\n \"success\": False,\n \"user_info\": None,\n \"failure_reason\": (\n \"invalid request: missing '%s' in request\" % key\n ),\n \"messages\": [\"Invalid user info request.\"],\n }\n\n if \"email\" not in payload:\n\n LOGGER.error(\n \"[%s] Invalid user lookup request, missing %s.\"\n % (payload[\"reqid\"], \"email\")\n )\n\n return {\n \"success\": False,\n \"user_info\": None,\n \"failure_reason\": \"invalid request: missing 'email' in request\",\n \"messages\": [\"email provided.\"],\n }\n\n email = payload[\"email\"]\n\n try:\n\n users = meta.tables[\"users\"]\n\n s = (\n select(*user_info_columns(users))\n .order_by(asc(users.c.user_id))\n .select_from(users)\n .where(users.c.email == email)\n )\n\n with engine.begin() as conn:\n result = conn.execute(s)\n rows = result.first()\n\n try:\n\n serialized_result = dict(rows._mapping)\n\n LOGGER.info(\n \"[%s] User lookup request succeeded. \"\n \"email provided: %s.\"\n % (\n payload[\"reqid\"],\n pii_hash(payload[\"email\"], payload[\"pii_salt\"]),\n )\n )\n return {\n \"success\": True,\n \"user_info\": serialized_result,\n \"messages\": [\"User look up successful.\"],\n }\n\n except Exception as e:\n\n LOGGER.error(\n \"[%s] User lookup request failed. \"\n \"email provided: %s. Exception: %r\"\n % (\n payload[\"reqid\"],\n pii_hash(payload[\"email\"], payload[\"pii_salt\"]),\n e,\n )\n )\n\n if raiseonfail:\n raise\n\n return {\n \"success\": False,\n \"user_info\": None,\n \"failure_reason\": \"user email not found in DB\",\n \"messages\": [\"User look up failed.\"],\n }\n\n except Exception as e:\n\n LOGGER.error(\n \"[%s] User lookup request failed. \"\n \"email provided: %s. Exception: %r\"\n % (\n payload[\"reqid\"],\n pii_hash(payload[\"email\"], payload[\"pii_salt\"]),\n e,\n )\n )\n\n if raiseonfail:\n raise\n\n return {\n \"success\": False,\n \"user_info\": None,\n \"failure_reason\": \"exception when checking the DB\",\n \"messages\": [\"User look up failed.\"],\n }", "def get_by_email(cls, email):\n try:\n cursor.execute(\"select * from users where email = %s\", (email,))\n user = cursor.fetchone()\n return list(user)\n except Exception:\n return False", "def get_user_by_email(self, email):\n query = \"SELECT * FROM users WHERE email = %s\"\n self.cursor.execute(query,[email])\n result = self.cursor.rowcount\n return result", "def get_user(self):\n session_key = request.get_cookie(\n self.conf['auth.cookie_key'],\n secret=self.conf['auth.cookie_secret']\n )\n if session_key:\n with atomic(self.conf['auth.dbfile']) as cursor:\n try:\n username, email = next(cursor.execute(\"\"\"\n SELECT username, email\n FROM sessions\n INNER JOIN users ON users.userid = sessions.userid\n WHERE sessions.key = ?\n AND sessions.started <= (SELECT\n datetime('now', '+3 hour'))\n \"\"\", (session_key,)))\n except StopIteration:\n return\n else:\n return User(username, email, get_usergroups(cursor, \n username))", "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def find_user(database: Database) -> User:\n session_id = cast(SessionId, bottle.request.get_cookie(\"session_id\"))\n session = find_session(database, session_id)\n return User(session.get(\"user\", \"\"), session.get(\"email\", \"\"), session.get(\"common_name\", \"\"))", "def sample_user_dynamic_email(email):\n return get_user_model().objects.create_user(email=email,\n password=\"password123\",\n name=\"some name\")", "def get_user(name):\n try:\n return User.objects.get(name=name)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\"There is no user '{}'.\".format(name))", "def authenticate(self, username=None, password=None):\n\n if '@' in username:\n kwargs = {'email': username}\n else:\n kwargs = {'username': username}\n try:\n user = User.objects.get(**kwargs)\n if True:\n return user\n except User.DoesNotExist:\n return None", "def search_user_by_email(self,useremail, cursor):\n sql = \"SELECT * FROM users WHERE useremail=%s\"\n cursor.execute(sql,(useremail,))\n return cursor", "def get_user(self, instance, name):\n return instance.get_user(name)", "def get_user_name_from_email(email):\n u = db(db.auth_user.email == email).select().first()\n if u is None:\n return 'None'\n else:\n return ' '.join([u.first_name, u.last_name])", "def get_reference_user(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return ref\n except ObjectDoesNotExist:\n return None", "def get_user(request: Request) -> User:\n\n return _check_and_extract_user(request)", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def validate_email(self, value):\n if not User.objects.filter(email=value).exists():\n raise serializers.ValidationError('User with this email does not exist.')\n return value", "def lookup_user_name(self, user_email):\n\n url = 'users'\n options = '?query={}'.format(user_email.lower())\n response = self._pagerduty_session_get(url, options).json()\n\n if len(response['users'][0]) == 0:\n return None, None\n\n return response['users'][0]['id'], response['users'][0]['name']", "def get_user_name_from_email(email):\n\tu = db(db.auth_user.email == email).select().first()\n\tif u is None:\n\t\treturn 'None'\n\telse:\n\t\treturn ' '.join([u.first_name, u.last_name])", "def find_by_email(cls, email):\n\t\tif email:\n\t\t\treturn cls.query.filter_by(email=email).first()\n\t\treturn {\n\t\t\t\t'message': 'email field is required',\n\t\t\t\t'status': 'Failed'\n\t\t\t\t}, 400", "def _get_user_by_criteria(id_, email):\n criteria = dict()\n try:\n if id_:\n criteria[\"id_\"] = id_\n elif email:\n criteria[\"email\"] = email\n return User.query.filter_by(**criteria).one_or_none()\n except StatementError as e:\n print(e)\n return None", "def login(cls, email, password):\n user = cls(email, password)\n\n if not User.valid_auth(email, password):\n user.register()\n return user", "def retrieve_user_details(self, email):\n if self.database is None:\n raise Exception(\"No database.\")\n if email is None or len(email) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_user_details(email)" ]
[ "0.84499913", "0.83574003", "0.82821196", "0.818987", "0.81825906", "0.81438047", "0.8139404", "0.81299245", "0.81272453", "0.81272453", "0.8071784", "0.8008092", "0.8000772", "0.79462504", "0.78996384", "0.7887486", "0.7843716", "0.77918035", "0.77474326", "0.7740562", "0.772918", "0.77148354", "0.7672294", "0.76508826", "0.75336796", "0.7533041", "0.7497959", "0.74014187", "0.73671955", "0.73515534", "0.73463076", "0.7337273", "0.73194104", "0.72748953", "0.72511464", "0.72331", "0.7232614", "0.71952206", "0.71360755", "0.7122227", "0.7119355", "0.71107155", "0.7108522", "0.70690966", "0.70634896", "0.7032376", "0.7021429", "0.7011558", "0.6999539", "0.69829255", "0.697522", "0.69697154", "0.6967916", "0.6955151", "0.6949017", "0.69135374", "0.69103265", "0.68636465", "0.6859555", "0.68547994", "0.68505424", "0.68434834", "0.68362314", "0.68357867", "0.68172926", "0.681363", "0.679204", "0.67806524", "0.67664224", "0.6763789", "0.67436284", "0.66774267", "0.6673222", "0.66691065", "0.66560066", "0.6569586", "0.6560527", "0.6554992", "0.6553955", "0.6524139", "0.65190333", "0.65162474", "0.6503728", "0.6501567", "0.6499412", "0.64949775", "0.6476023", "0.64754206", "0.64702815", "0.6465711", "0.6460338", "0.64483976", "0.64276206", "0.64276206", "0.642364", "0.6420306", "0.6418727", "0.6414919", "0.64122903", "0.63901114" ]
0.8178796
5
Determines if a filename is a valid Python module. Assumes if is just the end of a path (i.e. does not contain ``os.path.sep``.
def is_valid_module(filename): if not filename.endswith('.py'): return False if filename == '__init__.py': return True for prefix in IGNORED_PREFIXES: if filename.startswith(prefix): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_module(path: str) -> bool:\n return os.path.isfile(path) and path.endswith(\".py\")", "def is_module(path):\n\n fname, ext = os.path.splitext(path)\n if ext == \".py\":\n return True\n elif os.path.exists(os.path.join(path, \"__init__.py\")):\n return True\n else:\n return False", "def is_module(filename):\n if not os.path.exists(filename):\n return None\n\n if filename.endswith('.py'):\n # Assume the file is a module file\n return PY_MODULEFILE\n regex = re.compile(re.escape(r'#%Module'))\n if regex.search(open(filename).readline()):\n return TCL_MODULEFILE\n return None", "def is_python_file(path):\n valid = False\n\n if os.path.isfile(path) and path.endswith('.py'):\n valid = True\n\n return valid", "def is_module_or_package(path):\r\n is_module = osp.isfile(path) and osp.splitext(path)[1] in ('.py', '.pyw')\r\n is_package = osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py'))\r\n return is_module or is_package", "def isModule(self, name):\n return os.path.isfile(self.modulePath(name))", "def is_valid_filename_py(filename):\n return _is_valid_filename(filename, 'py')", "def check_module(name):\n return importlib.util.find_spec(name) is not None", "def is_valid_package_module_name(name):\n if \".\" in name:\n for part in name.split(\".\"):\n if not is_valid_package_module_name(part):\n return False\n elif len(name):\n if name[0] not in _first_letter_for_valid_name:\n return False\n\n if len(set(name).difference(_char_set_for_valid_name)):\n return False\n else:\n return False\n return True", "def is_python_module(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n if (\n os.path.isfile(file_path)\n and file_path.endswith(\".py\")\n and file_name != \"__init__.py\"\n ):\n # Single file module (e.g. six.py)\n module_name = file_name.replace(\".py\", \"\")\n return (True, module_name)\n\n return (False, None)", "def _is_file_valid(name: str) -> bool:\n return not name.startswith(\".\")", "def is_python_module(module_path, name):\n\n try:\n imp.load_source(name, module_path)\n except (NameError, SyntaxError):\n return False\n except ImportError:\n log.w(TAG, \"This is a python module, but has non-existent imports!\")\n return False\n\n return True", "def IsPackage(path):\n init_base_path = os.path.join(path, '__init__.py')\n return (os.path.isfile(init_base_path) or\n os.path.isfile(init_base_path + 'c') or\n os.path.isfile(init_base_path + 'o'))", "def is_valid_file(file):\n return file.endswith('.py')", "def _check_if_pyc(fname):\n from imp import find_module\n from os.path import realpath, dirname, basename, splitext\n\n # Normalize the file-path for the find_module()\n filepath = realpath(fname)\n dirpath = dirname(filepath)\n module_name = splitext(basename(filepath))[0]\n\n # Validate and fetch\n try:\n fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath])\n except ImportError:\n raise IOError(\"Cannot find config file. \"\n \"Path maybe incorrect! : {0}\".format(filepath))\n return pytype, fileobj, fullpath", "def is_file(path_name):\n if re.search(\"\\.[a-zA-Z]+$\", os.path.basename(path_name)):\n return True\n else:\n return False", "def valid_filename(filename):\n if filename in IGNORED_FILES:\n return False\n if not os.path.exists(filename):\n return False\n _, ext = os.path.splitext(filename)\n return ext == '.py'", "def _has_extension(self, path):\r\n if re.match(r'.*\\\\.*\\..*$', path):\r\n return True", "def is_valid_filename_pyh(filename):\n return _is_valid_filename(filename, 'pyh')", "def _is_package(path):\n if not os.path.isdir(path):\n return False\n return os.path.isfile(os.path.join(path, '__init__.py'))", "def is_package(path: str) -> bool:\n return os.path.isdir(path) and \"__init__.py\" in os.listdir(path)", "def is_pyi_directory_init(filename):\n if filename is None:\n return False\n return path_utils.splitext(path_utils.basename(filename))[0] == \"__init__\"", "def path_is_nuspec_file(path):\n return os.path.isfile(path) and path.endswith('.nuspec')", "def _is_package(path):\n return (\n os.path.isdir(path)\n and os.path.exists(os.path.join(path, '__init__.py'))\n )", "def _is_package(path):\n return (\n os.path.isdir(path)\n and os.path.exists(os.path.join(path, '__init__.py'))\n )", "def is_bash_module(module_path):\n\n with open(module_path, 'r') as file_f:\n\n shebang = file_f.readline().rstrip('\\n')\n\n if re.match(\"^#!/.*sh$\", shebang):\n return 1\n else:\n return 0", "def isJsFile(path):\n return os.path.splitext(path)[1] == '.js'", "def is_python_log(log):\n found_file_extensions = []\n for file_extension in file_extensions:\n if re.search(r\"\\.%s(?!\\.)\\b\" % file_extension, log):\n found_file_extensions.append(file_extension)\n if len(found_file_extensions) == 1 and found_file_extensions[0] == \"py\":\n return True\n return False", "def is_package(self, fullname):\n return hasattr(self.__get_module(fullname), \"__path__\")", "def is_module(obj):\n return type(obj) is types.ModuleType", "def identifyIfExternalModuleExists(moduleIn, workingDir):\n if moduleIn.endswith('.py'):\n moduleToLoadString = moduleIn[:-3]\n else:\n moduleToLoadString = moduleIn\n workingDirModule = os.path.abspath(os.path.join(workingDir,moduleToLoadString))\n if os.path.exists(workingDirModule + \".py\"):\n moduleToLoadString = workingDirModule\n path, filename = os.path.split(workingDirModule)\n os.sys.path.append(os.path.abspath(path))\n else:\n path, filename = os.path.split(moduleToLoadString)\n if (path != ''):\n abspath = os.path.abspath(path)\n if '~' in abspath:\n abspath = os.path.expanduser(abspath)\n if os.path.exists(abspath):\n os.sys.path.append(abspath)\n else:\n raise IOError('The file \"{}\" provided does not exist!'.format(moduleIn))\n return moduleToLoadString, filename", "def is_header(file):\n return not file.is_dir() and not file.name == 'module.modulemap.in' and file.name != 'libcxx.imp'", "def is_valid(self, qstr=None):\r\n if qstr is None:\r\n qstr = self.currentText()\r\n return is_module_or_package(unicode(qstr))", "def ismodule(object):\r\n return isinstance(object, types.ModuleType)", "def is_func_module(self, ea, mod_name):\n if ea in self.rt_import_table:\n (module, ea, name, ord) = self.rt_import_table[ea]\n if module == mod_name:\n return True\n\n return False", "def is_py_script(item: str):\n is_it_py_script : bool = False\n ext : str = \".py\"\n if ext in item:\n is_it_py_script = True\n ...\n\n return is_it_py_script\n ...", "def is_pkg(cls, path):\n return exists(join(path, '__init__.py'))", "def _is_valid_filename(filename, ext):\n if not isinstance(filename, str):\n return False\n\n if not(ext == filename.split('.')[-1]):\n return False\n fname = os.path.abspath(filename)\n return os.path.isfile(fname)", "def check_extension(f):\n parts = f.split('.')\n last = parts[len(parts) - 1]\n return last in allowed_extensions", "def _isvalid_file(filename):\r\n thisisavalidfile = True\r\n if (filename[0] == \".\") or (filename[0] == \"_\") or not ((filename.split(\".\")[-1] == \"txt\") or (filename.split(\".\")[-1] == \"csv\")):\r\n thisisavalidfile = False\r\n\r\n return thisisavalidfile", "def _IsWellFormattedFilePath(path):\n return path.startswith(SRC) and path.endswith(_OWNERS)", "def is_valid_test_file(test_file):\n return '.pyc' not in test_file and '__pycache__' not in test_file", "def is_file_o(value):\n if not (type(value) is str and os.path.split(value)[0]):\n return False\n else:\n return True", "def module_name_from_filepath(path: str) -> str:\n name = osp.splitext(osp.basename(path))[0]\n if name == '__init__':\n name = osp.basename(osp.dirname(path))\n return name", "def looks_like_a_filename(kernel_source):\n result = False\n if isinstance(kernel_source, str):\n result = True\n #test if not too long\n if len(kernel_source) > 250:\n result = False\n #test if not contains special characters\n for c in \"();{}\\\\\":\n if c in kernel_source:\n result = False\n #just a safeguard for stuff that looks like code\n for s in [\"__global__ \", \"__kernel \", \"void \", \"float \"]:\n if s in kernel_source:\n result = False\n #string must contain substring \".c\", \".opencl\", or \".F\"\n result = result and any([s in kernel_source for s in (\".c\", \".opencl\", \".F\")])\n return result", "def checkName(name):\n currentpath = os.path.dirname(os.path.realpath(__file__))\n fullpath = os.path.join(currentpath, name)\n return os.path.isfile(fullpath)", "def test_get_module_qualname_from_path_invalid_path(self):\n\n name = b_utils.get_module_qualname_from_path(\"/a/b/c/d/e.py\")\n self.assertEqual(\"e\", name)", "def is_cpp(filename: Path) -> bool:\n from fnmatch import fnmatch\n\n return any(fnmatch(os.path.basename(filename), p) for p in CPP_PATTERNS)", "def check_path(filename):\n return not bool(checkPath(filename))", "def module_imported(module_name):\n return sys.modules.get(module_name) is not None", "def is_test_filename(filename):\n return 'test' in filename", "def is_package(self, fullmodname):\n submodname, is_package, relpath = self._get_info(fullmodname)\n return is_package", "def check_file_type(fname):\n ext = path.splitext(fname)[1]\n return ext in allowed_extensions", "def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0", "def is_header(file_name):\n return str(file_name).endswith('.h')", "def check_file_ext(f_name):\n global im_ext_\n for ext_ in im_ext_:\n if f_name.lower().endswith(ext_):\n return True\n return False", "def is_file(path: str) -> bool:\n return os.path.isfile(path)", "def checkIfPythonModuleIsInstalled(testConfig):\n try:\n exec(\"import \"+testConfig[\"pyModule\"])\n assert True\n except Exception as e:\n assert False, testConfig[\"name\"]+\": \"+testConfig[\"pyModule\"]+\" could not successfully be loaded in Python.\"", "def has_module(self, name: str) -> bool:\n return name in self.modules", "def is_good_file(filename):\n for e in extensions:\n if filename.endswith(e):\n return True\n return False", "def is_directory(self):\n return all(isinstance(child, PyModule) for child in self._children())", "def isCfile(path:str) -> bool:\n if not isexist(path):\n return False\n \n name, ext = path.split(\".\")\n \n return ext == \"c\"", "def is_path(cls, path_or_content: str):\n return (\n len(str(path_or_content).strip().splitlines()) == 1 and\n (os.path.splitext(path_or_content)[1] in cls.valid_file_extensions)\n )", "def check_standard_dir(module_path):\n if 'site-packages' in module_path:\n return True\n for stdlib_path in _STDLIB_PATHS:\n if fnmatch.fnmatchcase(module_path, stdlib_path + '*'):\n return True\n return False", "def _filter(self, path):\n return path.endswith('.py')", "def module_exists(module_name):\r\n\r\n try:\r\n __import__(module_name)\r\n except ImportError:\r\n return False\r\n else:\r\n return True", "def is_valid_package_name(package_name):\n return (\n package_name and\n bool(PACKAGE_NAME_RE.match(package_name)) and\n all(c.replace('.', '') != '' for c in package_name.split('/')))", "def is_allowed_file(filename):\n allowed_ext = filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n return '.' in filename and allowed_ext", "def is_python_package(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n file_name = os.path.basename(file_path)\n init_file_path = os.path.join(file_path, \"__init__.py\")\n\n if os.path.isdir(file_path) and os.path.isfile(init_file_path):\n # Package\n return (True, file_name)\n\n return (False, None)", "def is_filename(name):\n test = re.search(\"[A-Za-z0-9_-]+\\.xml$\", name)\n if test:\n return True\n else:\n return False", "def is_ida_module(module_name):\n return (\n module_name.startswith((\"_ida_\", \"ida\", \"idc\"))\n or module_name == \"sark\"\n or module_name == \"__main__\"\n )", "def is_js_file(fname):\r\n return REJS.search(fname) and \\\r\n TEST_INDICATOR not in fname", "def check_allowed_extension(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def _assert_perl_script(path):\n if not os.path.isfile(path):\n raise NotImplementedError('\"%s\" is not a file' % path)\n\n _stem, ext = os.path.splitext(path)\n if ext == \".pl\":\n return\n with open(path) as stream:\n if \"perl\" in stream.readline():\n return\n raise NotImplementedError(\"%s is not a perl script\" % path)", "def is_module_object(self, obj):\n if not isinstance(obj, BaseException):\n try:\n c = obj.__class__\n source_file = inspect.getsourcefile(c)\n except (TypeError, AttributeError):\n pass\n else:\n if source_file and source_file.startswith(self.path):\n return True\n\n return False", "def is_min(filename):\r\n return re.search(\"min.js$\", filename)", "def _import_module(self, name):\r\n try:\r\n __import__(name)\r\n return True\r\n except ImportError:\r\n return False", "def is_package_dir(path: Path) -> bool:\n if not path.is_dir():\n return False\n if path.name.endswith(\".egg-info\"):\n return False\n if (path / \"__init__.pyi\").exists():\n return True\n return False", "def allowed_code_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config['ALLOWED_CODE_EXTENSIONS']", "def ModuleAvailability(module_name):\r\n\r\n try:\r\n imp.find_module(module_name)\r\n return True\r\n except ImportError:\r\n return False", "def __get_non_python_library_module_file(module_name, environment=sys.path):\n found = None\n\n # Use the longer paths first\n paths = reversed(sorted(environment))\n for path in paths:\n base_path = path.replace(\"\\\\\", \"/\")\n if stypy_parameters_copy.type_inference_file_directory_name in path:\n base_path = base_path.replace(\"/\" + stypy_parameters_copy.type_inference_file_directory_name, \"\")\n\n temp = base_path + \"/\" + module_name.replace('.', '/') + \".py\"\n if os.path.isfile(temp):\n found = temp\n # Module (__init__) names have precedence over file names\n temp = base_path + \"/\" + module_name.replace('.', '/') + \"/__init__.py\"\n if os.path.isfile(temp):\n found = temp\n break\n if found is None:\n pass\n\n return found", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def is_valid_filename(\n filename: PathType,\n platform: Optional[PlatformType] = None,\n min_len: int = DEFAULT_MIN_LEN,\n max_len: Optional[int] = None,\n fs_encoding: Optional[str] = None,\n check_reserved: bool = True,\n) -> bool:\n\n return FileNameValidator(\n platform=platform,\n min_len=min_len,\n max_len=-1 if max_len is None else max_len,\n fs_encoding=fs_encoding,\n check_reserved=check_reserved,\n ).is_valid(filename)", "def _is_python_version(s: str) -> bool:\n\n return s.startswith(\"2\") or s.startswith(\"3\")", "def defined_submodule(arr):\n return any([el.endswith('_module]') for el in arr])", "def is_file(self):\n\n url_path = self.url.split('/')\n if re.match(r\".+\\.\\w+\", url_path[-1]):\n # Find <file_name>.<extension>\n return True\n return False", "def validfilename(self, filename):\n return not (filename == \"CVS\" or filename.startswith(\".\"))", "def is_file(path: str) -> bool:\n return _fs().is_file(path)", "def is_valid(path):\n return (\n bool(path)\n and os.path.isabs(path)\n and os.path.exists(path)\n and (not is_apple() or path.endswith(\".dylib\"))\n )", "def is_file(path):\n return os.path.isfile(path)", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def lookupmodule(self, filename):\n if os.path.isabs(filename) and os.path.exists(filename):\n return filename\n f = os.path.join(sys.path[0], filename)\n if os.path.exists(f) and self.canonic(f) == self.mainpyfile:\n return f\n root, ext = os.path.splitext(filename)\n if ext == '':\n filename = filename + '.py'\n if os.path.isabs(filename):\n return filename\n for dirname in sys.path:\n while os.path.islink(dirname):\n dirname = os.readlink(dirname)\n fullname = os.path.join(dirname, filename)\n if os.path.exists(fullname):\n return fullname\n return None", "def __is_file(extension_p, all_extensions_p):\n return extension_p in all_extensions_p", "def allowed_file(filename: str) -> bool:\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def has_module(name):\n _refresh_cache()\n return name in _modules" ]
[ "0.78435075", "0.7644184", "0.7453708", "0.74041945", "0.72734565", "0.7177794", "0.7162914", "0.6941581", "0.688582", "0.68661207", "0.68447286", "0.68002564", "0.66656727", "0.6652094", "0.6552671", "0.6495056", "0.6463849", "0.6461724", "0.63578534", "0.62999207", "0.62551785", "0.6217696", "0.6203724", "0.6176236", "0.6176236", "0.6150014", "0.61214566", "0.6111245", "0.61051655", "0.61011684", "0.60957354", "0.60831666", "0.6071883", "0.60451126", "0.60444045", "0.6034979", "0.60295475", "0.5989289", "0.5983622", "0.5975235", "0.5944519", "0.59016395", "0.58816886", "0.5872924", "0.5847926", "0.58321947", "0.58028024", "0.5795045", "0.578878", "0.5782747", "0.5781089", "0.57674825", "0.5758032", "0.57399166", "0.57275873", "0.5727045", "0.5726962", "0.57242626", "0.57201976", "0.5711968", "0.5710703", "0.5710463", "0.57075775", "0.5705867", "0.5689545", "0.5688843", "0.5681484", "0.56807464", "0.5677746", "0.5675627", "0.5672789", "0.56679016", "0.56461126", "0.56432354", "0.56225324", "0.56137335", "0.56115717", "0.55939406", "0.55806386", "0.5578534", "0.5576882", "0.5575488", "0.5571822", "0.5570079", "0.55597776", "0.5559208", "0.5558894", "0.55332345", "0.5529639", "0.552781", "0.5526066", "0.5526066", "0.5526066", "0.5526066", "0.5526066", "0.5526066", "0.55230653", "0.55217725", "0.5520272", "0.5514534" ]
0.73747915
4
Get list of all public modules relative to a path.
def get_public_modules(path, base_package=None): result = [] for subdir, _, files in os.walk(path): # Skip folders that start with _. if any([part.startswith('_') for part in subdir.split(os.path.sep)]): continue _, rel_dir = subdir.split(path) rel_dir = rel_dir.lstrip(os.path.sep) for filename in files: if is_valid_module(filename): mod_name, _ = os.path.splitext(filename) rel_path = os.path.join(rel_dir, mod_name) if base_package is not None: rel_path = os.path.join(base_package, rel_path) # Turn into a Python module rather than a file path. result.append(rel_path.replace(os.path.sep, '.')) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moduleList(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [p for p in folder_list \\\n if os.path.exists(os.path.join(path, p,'__init__.py'))\\\n or p[-3:] in ('.py','.so')\\\n or p[-4:] in ('.pyc','.pyo','.pyd')]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list", "def list_modules(path):\n modules = []\n for root, dirs, files in os.walk(path): # pylint: disable=unused-variable\n for file in files:\n if file.endswith(\".js\"):\n with open(os.path.join(path, file), 'r') as modfile:\n content = modfile.readlines()\n module_re = r\"/\\*\\* @module +([\\w.]+) +\\*/\"\n m = re.search(module_re, content[0])\n # test if its supposed to be a module\n if m and m.group(1):\n # great its a module ! lets see its content\n logger.debug(\"Module detected %s\" % m.group(1))\n modules.append((m.group(1), content))\n return modules", "def walk_modules(path):\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods", "def getRootModules():\n modules = []\n if ip.db.has_key('rootmodules'):\n return ip.db['rootmodules']\n t = time()\n store = False\n for path in sys.path:\n modules += moduleList(path) \n if time() - t >= TIMEOUT_STORAGE and not store:\n store = True\n print \"\\nCaching the list of root modules, please wait!\" \n print \"(This will only be done once - type '%rehashx' to \" + \\\n \"reset cache!)\"\n print\n if time() - t > TIMEOUT_GIVEUP:\n print \"This is taking too long, we give up.\"\n print\n ip.db['rootmodules'] = []\n return []\n \n modules += sys.builtin_module_names\n \n modules = list(set(modules))\n if '__init__' in modules:\n modules.remove('__init__')\n modules = list(set(modules))\n if store:\n ip.db['rootmodules'] = modules\n return modules", "def find_modules(x):\n return Path(x).rglob('*.py')", "def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]", "def list_modules(lookup_paths: list = None):\n result = []\n\n if lookup_paths is None:\n lookup_paths = analyzer_paths()\n\n for path in lookup_paths:\n analyzer_module_root = resource_filename(path, \"modules\")\n # analyzer_root = os.path.join(anchore_module_root, \"modules\")\n for f in os.listdir(analyzer_module_root):\n thecmd = os.path.join(analyzer_module_root, f)\n if re.match(r\".*\\.py$\", thecmd):\n result.append(thecmd)\n\n result.sort(key=lambda x: analyzer_name_from_path(x))\n return result", "def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name", "def list_dir(self, path):", "def listdir(self, path):\n return os.listdir(path)", "def get_modules(self):\n return self._module_loader.filelist", "def list_path(self, path):\n return LocalResources(\"\").list_path(path)", "def lists(path):\r\n return os.listdir(path)", "def modules_from_path(path, module_name=None, relative='.'):\n if path.is_file():\n yield import_module(relative + path.stem, module_name)\n elif path.is_dir():\n for file in path.iterdir():\n if '__pycache__' in str(file):\n continue\n if file.suffix == '.py' and file.stem != '__init__':\n yield from modules_from_path(file, module_name, relative)", "def modules(self):\n return self._modules.keys()", "def list(self):\n for dir in subdirs('plugins'):\n print dir.replace('plugins/', '')", "def getAllPackages(metadataPaths):\n\n global index\n index = createModuleIndex(metadataPaths)\n allRpms = []\n for name in index.get_module_names():\n module = index.get_module(name)\n for stream in module.get_all_streams():\n allRpms.extend(stream.get_rpm_artifacts())\n return allRpms", "def modules(self):\n return self._modules", "def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)", "def get_modules(site_path):\n for dirpath, dirnames, filenames in os.walk(site_path):\n module = dirpath[len(site_path) + 1:].split(os.path.sep)\n if module[0].endswith('.egg-info'):\n continue\n if module[0].endswith('.egg') or not module[0]:\n module.pop(0)\n\n for filename in filenames:\n root, ext = os.path.splitext(filename)\n if ext == '.py':\n if not re.match(VALID_PYTHON_FILENAME, root):\n continue\n if root == '__init__':\n yield '.'.join(module)\n else:\n yield '.'.join(module + [root])\n elif ext == '.egg-link':\n with open(os.path.join(dirpath, filename)) as f:\n egglink_path = f.readline().strip()\n linked_module_path = os.path.join(egglink_path, root.replace('-', '_'))\n for modname in get_modules_to_import_from_directory(linked_module_path):\n yield modname\n elif ext == '.egg':\n yield filename.split('-', 1)[0]", "def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)", "def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)", "def get_modules(self):\n return self._modules.values()", "def __dir__():\n return __all__", "def modules_in_current_dir(path, module_name):\n yield from modules_from_path(Path(path).parent, module_name)", "def get_leaf_modules(package_path):\n assert os.path.isfile(os.path.join(package_path, '__init__.py'))\n res = []\n root = os.path.join(package_path, '..')\n for path, _, files in os.walk(package_path):\n for f in files:\n if f.endswith(\".py\") and not f == \"__init__.py\":\n full_file = os.path.relpath(os.path.join(path, f), start=root)\n module = full_file.replace(os.sep, \".\")[:-3]\n res.append(module)\n return res", "def plugin_list(self):\r\n return get_module_list()", "def get_all_sources(remit):\n if remit == 'panzer' or remit == 'pandoc':\n os.chdir('source-'+remit)\n sourcelist = [name for name in os.listdir(\".\") if os.path.isdir(name)]\n os.chdir('..')\n else:\n # get the maximal list of sources for a diff\n pandoc_list = get_all_sources('pandoc')\n panzer_list = get_all_sources('panzer')\n sourcelist = list(set(pandoc_list+panzer_list))\n sourcelist.sort()\n return sourcelist", "def getDirContents(self, path):\r\n return sorted([int(file) for file in os.listdir(os.path.dirname(path))])", "def mod_list(dir):\n\n modList = []\n modHash = {}\n isModule = False\n for ii in os.walk(dir):\n if ii[0] == dir:\n for f in ii[2]:\n # If there is no __init__ file, then the directory\n # upon which mod_list() is operating is not a module\n if f[0:8] == '__init__':\n isModule = True\n elif f[-3:] == '.py':\n modHash[f[:-3]] = True\n elif f[-4:] == '.pyc' or f[-4:] == '.pyo':\n modHash[f[:-4]] = True\n if isModule:\n modList = modHash.keys()\n modList.sort()\n return(modList)\n else:\n # Returning an empty list allows 'in' tests since a list is iterable,\n # and None isn't\n return([])", "def modules():", "def public_files(self) -> Pattern:\n return self._parse_pattern(self.get(\"public_files\", None))", "def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]", "def list_plugins():\n l = list()\n for dir in os.listdir(PLUGIN_DIRECTORY):\n l.append(dir)\n return l", "def scandir(path_):\n return os.listdir", "def list(self):\n\n if self.isdir():\n from os import listdir\n\n return [u for e in listdir(self.fspath) for u in self.join(e).list()]\n\n else:\n return [self]", "def list_objects(self, path):\n return [x for x in self.list_objects_generator(path)]", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def get_public_keys():\n return public_keys", "def _find_local_submodules(pkgpath):\r\n # Find all the children modules in this package (non recursive)\r\n pkgname = static.modpath_to_modname(pkgpath, check=False)\r\n if pkgname is None:\r\n raise Exception('cannot import {!r}'.format(pkgpath))\r\n # TODO:\r\n # DOES THIS NEED A REWRITE TO HANDLE THE CASE WHEN __init__ does not exist?\r\n\r\n try:\r\n # Hack to grab the root package\r\n a, b = static.split_modpath(pkgpath, check=False)\r\n root_pkgpath = join(a, b.replace('\\\\', '/').split('/')[0])\r\n except ValueError:\r\n # Assume that the path is the root package if split_modpath fails\r\n root_pkgpath = pkgpath\r\n\r\n for sub_modpath in static.package_modpaths(pkgpath, with_pkg=True,\r\n recursive=False, check=False):\r\n sub_modname = static.modpath_to_modname(sub_modpath, check=False,\r\n relativeto=root_pkgpath)\r\n rel_modname = sub_modname[len(pkgname) + 1:]\r\n if not rel_modname or rel_modname.startswith('_'):\r\n # Skip private modules\r\n pass\r\n else:\r\n yield rel_modname, sub_modpath", "def library_search_path(self, pedantic=False):\n return []", "def __dir__(self):\n result = list(new_module.__all__)\n result.extend(('__file__', '__path__', '__doc__', '__all__',\n '__docformat__', '__name__', '__path__',\n '__package__', '__version__'))\n return result", "def _list_dir(self):\n return [os.path.join(self.cache_dir, fn)\n for fn in os.listdir(self.cache_dir)]", "def get_all_test_modules():\n test_modules = []\n current_directory = os.path.dirname(__file__)\n sys.path.insert(0, os.path.join(current_directory, '..'))\n files = sorted(os.listdir(current_directory))\n for file in files:\n if file.startswith('test') and file.endswith('.py'):\n test_modules.append(file.rstrip('.py'))\n\n return test_modules", "def __dir__():\n keys = (*globals().keys(), *_lazy_imports_obj.keys(), *_lazy_imports_mod.keys())\n return sorted(keys)", "def get_loaded_modules(self):\n return self._get_modules(self.loaded_modules)", "def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue", "def getAllModules(self):\n\n modules = cmds.ls(type=\"network\")\n returnMods = []\n for module in modules:\n attrs = cmds.listAttr(module)\n if \"parent\" in attrs:\n returnMods.append(module)\n\n return returnMods", "def getExternalFiles(self):\n return []", "def modules():\n cmd = \"{} -M\".format(_detect_os())\n ret = {}\n ret[\"static\"] = []\n ret[\"shared\"] = []\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n for line in out:\n comps = line.split()\n if not comps:\n continue\n if \"(static)\" in line:\n ret[\"static\"].append(comps[0])\n if \"(shared)\" in line:\n ret[\"shared\"].append(comps[0])\n return ret", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def _load_all_modules(path, names):\n module_names = []\n # For each module in the current directory...\n for importer, module_name, is_package in pkgutil.iter_modules(\n [os.path.dirname(path)]\n ):\n # print(\"importing:\", names + '.' + module_name)\n # Import the module.\n importlib.import_module(names + '.' + module_name)\n module_names.append(module_name)\n\n return module_names", "def glob(path: str) -> list[str]:\n fs, relative_path = url_to_fs(path)\n return cast(list[str], fs.glob(relative_path))", "def get_all_vdirs(path):\n items = glob.glob(path)\n return items", "def available_modules(self, user):\n return [sitecomp for sitecomp in self.enabled_modules() if sitecomp.has_perm(user)]", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def listdir(self, path: bytes) -> List[bytes]:\n directories, files = self.storage.listdir(path.decode())\n return (\n [b\".\", b\"..\"]\n + [name.encode() for name in directories if name]\n + [name.encode() for name in files if name]\n )", "def get_plugin_modules_from_dir(path, prefix='user_'):\n modules = []\n for f in path.glob('*.py'):\n name = plugin_package + \".\" + prefix + f.stem\n # FIXME: Drop str() when this is Python 3.6+\n spec = importlib.util.spec_from_file_location(name, str(f))\n module = importlib.util.module_from_spec(spec)\n sys.modules[name] = module\n spec.loader.exec_module(module)\n modules.append(module)\n return modules", "def get_drivers(dirpath):\n\n return all_drivers", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))", "def list_dir_no_hidden(path):\n\n return glob(os.path.join(path, \"*\"))", "def modules(cls):\n members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))\n modules = [module for name, module in members if not name.startswith('_')]\n return modules", "def list_modules(self) -> Optional[List[str]]:\n module_list: List[str] = []\n for forge_module in self._forge_modules:\n module_list.append(forge_module.name)\n for git_module in self._git_modules:\n module_list.append(git_module.name)\n return module_list", "def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)", "def urls(self):\n patterns = []\n for sitecomp in self.modules():\n patterns.append(sitecomp.urls)\n pass\n return patterns", "def _import_package_files():\n\n\timport os\n\texports = []\n\tglobals_, locals_ = globals(), locals()\n\tpackage_path = os.path.dirname(__file__)\n\tpackage_name = os.path.basename(package_path)\n\n\tfor filename in os.listdir(package_path):\n\t\tmodulename, ext = os.path.splitext(filename)\n\t\tif modulename[0] != '_' and ext in ('.py', '.pyw'):\n\t\t\tsubpackage = '%s.%s' % (package_name, modulename) # package relative\n\t\t\tmodule = __import__(subpackage, globals_, locals_, [modulename])\n\t\t\tmodict = module.__dict__\n\t\t\tnames = (modict['__all__'] if '__all__' in modict else\n\t\t\t\t[name for name in modict if name[0] != '_']) # all public\n\t\t\texports.extend(names)\n\t\t\tglobals_.update((name, modict[name]) for name in names)\n\n\treturn exports", "def remote_paths(self) -> list:\r\n results: list = []\r\n\r\n if self.imports_node is not None:\r\n results.extend([node.text for node in filter(is_import_node, self.imports_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n if self.folders_node is not None:\r\n results.extend([node.text for node in filter(is_folder_node, self.folders_node)\r\n if startswith(node.text, self.remote_schemas, ignorecase=True)])\r\n\r\n return results", "def relative_to_buildroot(self):\n return [os.path.join(self.rel_path, source) for source in self.source_paths]", "def _get_import_paths(self) -> list:\r\n results: list = []\r\n\r\n if self.imports_node is None:\r\n return []\r\n\r\n for import_node in filter(is_import_node, self.imports_node):\r\n import_path: str = import_node.text\r\n\r\n if startswith(import_path, self.remote_schemas, ignorecase=True):\r\n local_path = self._get_remote_path(import_node)\r\n PapyrusProject.log.info(f'Adding import path from remote: \"{local_path}\"...')\r\n results.append(local_path)\r\n continue\r\n\r\n if import_path == os.pardir or startswith(import_path, os.pardir):\r\n import_path = import_path.replace(os.pardir, os.path.normpath(os.path.join(self.project_path, os.pardir)), 1)\r\n elif import_path == os.curdir or startswith(import_path, os.curdir):\r\n import_path = import_path.replace(os.curdir, self.project_path, 1)\r\n\r\n # relative import paths should be relative to the project\r\n if not os.path.isabs(import_path):\r\n import_path = os.path.join(self.project_path, import_path)\r\n\r\n import_path = os.path.normpath(import_path)\r\n\r\n if os.path.isdir(import_path):\r\n results.append(import_path)\r\n else:\r\n PapyrusProject.log.error(f'Import path does not exist: \"{import_path}\"')\r\n sys.exit(1)\r\n\r\n return PathHelper.uniqify(results)", "def modules_registered(self) -> list[Module]:\n return [cmds[0].module for cmds in self._registry[\"by_module\"].values()]", "def _list_plugins_on_fs(cls):\n return os.listdir(settings.PLUGINS_PATH)", "def get_modules(file_path):\n path = file_path.split('/')\n\n # filter out non-java file\n if not path[-1].endswith(\".java\"):\n return\n\n top_directory = path[0]\n\n if top_directory in [\"core\", \"codegen\"]:\n return core_modules_to_test\n if top_directory in [\"http-clients\"]:\n return http_modules_to_test.get(path[1])\n elif top_directory== \"services\":\n return path[1]", "def GetModules(cls, file):\n modules = set([])\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n for sig in ModuleFinder.__signatures:\n match = sig.match(line)\n if match:\n module = match[1].split('.')[0]\n # Remove alias:\n module = ModuleFinder.__removeAlias.sub('', module)\n modules.add(module)\n break\n return modules", "def getAllImportFiles():\n\tdef get_path(base):\n\t\tb, t = os.path.split(base)\n\t\tif __name__ == t:\n\t\t\treturn [\"animation_nodes\"]\n\t\telse:\n\t\t\treturn get_path(b) + [t]\n\n\tfor root, dirs, files in os.walk(currentPath):\n\t\tpath = \".\".join(get_path(root))\n\t\tfor f in filter(lambda f:f.endswith(\".py\"), files):\n\t\t\tname = f[:-3]\n\t\t\tif not name == \"__init__\":\n\t\t\t\tyield path + \".\" + name", "def _list_modules():\r\n return [\r\n desc.module_class\r\n for desc\r\n in _list_descriptors()\r\n ]", "def get_items(path, only=None):\n path = os.path.expanduser(path)\n ps = [os.path.join(path, n)\n for n in os.listdir(path)\n if not n.startswith('.') and len(n) == 4]\n ps = [p for p in ps if os.path.isdir(p)]\n if only is not None:\n ps = [p for p in ps if nmrex.utils.fname(p) in only]\n return ps", "def _load_modules(path):\n import pkgutil\n import importlib\n # TODO: Consider how we should customize this prefix in case we want to load plugins from multiple paths / packages\n package_prefix = \"pyjen.plugins.\"\n retval = []\n\n # First, check to see if any plugins have already been loaded\n import sys\n for cur_module_name in sys.modules:\n if cur_module_name.startswith(package_prefix):\n cur_module = sys.modules[cur_module_name]\n\n # Make sure to exclude 'package initialization' modules\n if cur_module is not None and not cur_module.__file__.endswith(\"__init__.py\"):\n retval.append(cur_module)\n\n # TODO: Consider whether we need to consider re-loading plugins in certain cases\n\n # If not, then lets walk all modules in the plugin path and load them all\n for loader, name, ispkg in pkgutil.iter_modules([path], package_prefix):\n if not ispkg:\n cur_mod = importlib.import_module(name)\n\n if not cur_mod in retval:\n retval.append(cur_mod)\n\n # TODO: See whether we need to consider loading newly added plugins that may not have existed the first time this function is called\n return retval", "def demo_paths(self):\n base_path = os.path.join(self.module.__path__[0], 'demo')\n paths = []\n if os.path.isdir(base_path):\n for item in os.listdir(base_path):\n # TODO: support examples which is not auto-loaded\n if not os.path.isdir(os.path.join(base_path, 'examples')):\n paths.append(os.path.join(base_path, item))\n return paths", "def listdir(self, path):\n if self.__volume.isdir(path):\n return self.get_dirs(path)\n return ([],[])", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def load_all_submodules():\n # Load all modules in the current directory.\n pattern_list = _load_all_modules(__file__, __name__)\n return pattern_list", "def module_yamls(self):\n # app.yaml first (correspond to 'default' module), then everything else.\n yamls = self._modules.copy()\n return [yamls.pop('default').path] + [m.path for m in yamls.itervalues()]", "def list_directory(self, path):\n dirent = self.lookup(path)\n if dirent and dirent.is_directory():\n best_fit = self.retrieve_catalog_for_path(path)\n return best_fit.list_directory(path)", "def scrub_from_sys_modules():\n for k, m in sys.modules.items():\n if k in sys_modules_whitelist:\n continue\n\n if hasattr(m, '__file__') and m.__file__ is not None:\n mp = pathlib.Path(m.__file__)\n if pex_root in mp.parents:\n yield k", "def all_possible_beards(paths):\n literal_paths = get_literal_beard_paths(paths)\n\n for path in literal_paths:\n for f in os.listdir(path):\n if is_module(os.path.join(path, f)):\n yield os.path.basename(f)", "def listdir(url: str) -> Iterable[str]:\n entries = scandir(url)\n return (str(entry.url) for entry in entries)", "def read_nodules(path):\n annotation = parse_annotation(path)\n nodules = annotation_to_nodules(annotation)\n return nodules", "def clist() -> None:\n files = os.listdir(os.path.join(os.getcwd(), \"apis\"))\n files.remove(\"__init__.py\")\n files = [f.replace(\".py\", \"\") for f in files]\n print(files)", "def importdir(path):\n paths = []\n extensions = ['ogg']\n dirlist = []\n try:\n dirlist = os.listdir(path)\n except OSError:\n pass\n\n for filename in dirlist:\n if os.path.isdir(os.path.join(path, filename.strip())):\n for filex in importdir(os.path.join(path,\n filename.strip())):\n paths.append(filex)\n else:\n ext = filename.split('.')[-1]\n if ext in extensions:\n paths.append(os.path.join(path, filename.strip()))\n\n return paths", "def get_library_content(self):\n from glob import glob\n try:\n os.path.isdir(self.source)\n lst = glob(self.source + '/*')\n except TypeError:\n lst = self.source\n dircheck = True\n while dircheck is True:\n dircheck = False\n newlst = []\n for entry in lst:\n if os.path.isdir(entry):\n newlst.extend(glob(entry + '/*'))\n dircheck = True\n else:\n newlst.append(entry)\n lst = newlst\n return lst", "def to_list(self):\n return list(convert_submodules(self.__dict__).values())", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def _local_dir(self):\n return []", "def _find_all_importables(pkg: ModuleType) -> List[str]:\n return sorted(\n set(\n chain.from_iterable(\n _discover_path_importables(Path(p), pkg.__name__)\n # FIXME: Unignore after upgrading to `mypy > 0.910`. The fix\n # FIXME: is in the `master` branch of upstream since Aug 4,\n # FIXME: 2021 but has not yet been included in any releases.\n # Refs:\n # * https://github.com/python/mypy/issues/1422\n # * https://github.com/python/mypy/pull/9454\n for p in pkg.__path__ # type: ignore[attr-defined]\n ),\n ),\n )", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def get_object_list(self, url):\n path = self.base_path / url\n return [\n os.fspath((Path(dirpath) / filename).relative_to(path))\n for dirpath, _, files in os.walk(path)\n for filename in files\n if filename != path\n ]", "def listdir(self):\n if self._isurl(self._baseurl):\n raise NotImplementedError(\n \"Directory listing of URLs, not supported yet.\")\n else:\n return os.listdir(self._baseurl)", "def local_paths(self) -> List[Path]:\n return self._local_paths" ]
[ "0.68155396", "0.6617278", "0.6521537", "0.6507804", "0.64241713", "0.6303963", "0.62988365", "0.62711245", "0.6268034", "0.62417346", "0.62394094", "0.61733466", "0.61073136", "0.60500586", "0.60428715", "0.6029033", "0.60251296", "0.600245", "0.59671587", "0.5930028", "0.5929299", "0.5927241", "0.5895827", "0.5867134", "0.5865177", "0.5848004", "0.58411664", "0.58408874", "0.58360225", "0.58226395", "0.58163244", "0.58154285", "0.5798375", "0.5797482", "0.5795082", "0.578506", "0.57781523", "0.574493", "0.5742712", "0.5738588", "0.5735013", "0.5734711", "0.5732822", "0.5730233", "0.57184875", "0.57131195", "0.5704104", "0.57006145", "0.5696095", "0.56912965", "0.5675978", "0.5663239", "0.5652964", "0.5651096", "0.5639003", "0.56375384", "0.56367546", "0.56367373", "0.5633414", "0.5619497", "0.56192017", "0.5612656", "0.5601854", "0.5595838", "0.55927265", "0.55910635", "0.55801046", "0.55770427", "0.55721486", "0.556446", "0.55639285", "0.55580837", "0.55570537", "0.55559355", "0.5555553", "0.5553891", "0.5552196", "0.5549754", "0.5547797", "0.5543255", "0.5517325", "0.55170715", "0.55119455", "0.55066454", "0.5503449", "0.5495202", "0.54884624", "0.5482799", "0.5477598", "0.5475062", "0.5474797", "0.5471265", "0.5470346", "0.54593885", "0.54540306", "0.5453108", "0.545244", "0.54484105", "0.5442864", "0.5440805" ]
0.81640553
0
Main script to verify modules included.
def main(): mock_uri = '' inventory = fetch_inventory(SphinxApp, mock_uri, OBJECT_INVENTORY_RELPATH) sphinx_mods = set(inventory['py:module'].keys()) library_dir = os.path.join(BASE_DIR, 'gcloud') public_mods = get_public_modules(library_dir, base_package='gcloud') public_mods = set(public_mods) if not sphinx_mods <= public_mods: unexpected_mods = sphinx_mods - public_mods message = ['Unexpected error. There were modules referenced by ' 'Sphinx that are not among the public modules.'] message.extend(['- %s' % (mod,) for mod in unexpected_mods]) print('\n'.join(message), file=sys.stderr) sys.exit(1) undocumented_mods = public_mods - sphinx_mods # Remove ignored modules. undocumented_mods -= IGNORED_MODULES if undocumented_mods: message_parts = ['Found undocumented public modules:'] message_parts.extend(['- ' + mod_name for mod_name in sorted(undocumented_mods)]) print('\n'.join(message_parts), file=sys.stderr) sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_import():\n print('[GenHub] Checking Python modules.')\n\n basemod = [('yaml', 'pyyaml'), ('pycurl', 'pycurl')]\n devmod = ['pep8', 'pytest', 'pytest-cov', 'coverage']\n\n packages = dict()\n for importname, packagename in basemod:\n try:\n importlib.import_module(importname)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n for packagename in devmod:\n try:\n importlib.import_module(packagename)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n\n rundep = False\n for pkg in packages:\n char = '+'\n msg = 'Installed.'\n if packages[pkg] is False:\n char = '-'\n msg = 'Not installed!'\n rundep = True\n print('%c package %-12s: %s' % (char, pkg, msg))\n if rundep is True:\n print('Please install these dependencies before proceding')\n print('')", "def test_check_module(self) -> None:\n check_module(\"os\")", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def check_dependencies():\n\n # Check for python version\n print(\"Python location : {}\".format(sys.executable))\n print(\"Python version : {}\".format(sys.version))\n if sys.version_info[0] < 3:\n warnings.warn(\n \"WARNING : Using python 2. This Python version is no longer maintained. Use at your own risk.\"\n )\n\n # Check FSL installation\n try:\n print(f\"Your fsl directory is located here: {os.environ['FSLDIR']}\")\n except KeyError:\n raise AssertionError(\n \"You do not have FSL installed! See installation instructions here: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FslInstallation\"\n )\n\n # Check AFNI installation\n try:\n print(\n f\"Your AFNI directory is located here: {subprocess.check_output('which afni', shell=True, universal_newlines=True)}\"\n )\n except subprocess.CalledProcessError:\n raise AssertionError(\n \"You do not have AFNI installed! See installation instructions here: https://afni.nimh.nih.gov/pub/dist/doc/htmldoc/background_install/main_toc.html\"\n )", "def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)", "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "def check(self, test_modules=__all__):\n\n # if test suite is being running from within forcebalance module, append the forcebalance prefix\n if __name__==\"forcebalance.test.__init__\":\n test_modules = [\"forcebalance.test.\" + test_module for test_module in test_modules]\n\n for test_module in test_modules:\n __import__(test_module)", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def check_versions(ctx, show=False):\n sys.path.insert(0, os.path.join(ROOT_DIR, '_tools'))\n import versions\n versions.main()", "def test_module(self):\n pass", "def checklib(module):\n import imp\n for mod in module:\n try:\n imp.find_module(mod)\n ret = 1\n except ImportError as imperror:\n print(imperror)\n ret = 0\n return ret", "def test_module_imports(self):\n apps = [\n 'customers',\n 'customers.migrations',\n 'customers.management',\n 'customers.management.commands',\n 'customers.management.commands.load_customers_to_redis',\n 'customers.forms',\n 'customers.admin',\n 'customers.models',\n 'customers.urls',\n 'customers.views',\n ]\n for a in apps:\n self.assertTrue(module_exists(a))", "def test_vendored_libjuju(self):\n for name in sys.modules:\n if name.startswith(\"juju\"):\n module = sys.modules[name]\n if getattr(module, \"__file__\"):\n print(getattr(module, \"__file__\"))\n assert re.search('n2vc', module.__file__, re.IGNORECASE)\n\n # assert module.__file__.find(\"N2VC\")\n # assert False\n return", "def test_imports():\n assert False", "def modules():", "def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_rlmm_imported():\n assert \"rlmm\" in sys.modules", "def checkDependencies(check=True):\n modules = []\n f = open(CONST_REQUIREMENTS_FILE)\n for line in f:\n if line.find('#'):\n modules.append([line[:line.index('=')], (line[line.index('=')+2:]).strip()])\n f.close()\n\n for module in modules:\n try:\n __import__(module[0])\n except ImportError: \n if query_user_bool(\"Missing module %s.\" \\\n \" Do you wish to install it?\" % module[0]):\n subprocess.call([\"pip2\", \"install\", \"%s==%s\" %\n (module[0], module[1])])\n \n else:\n return False\n return True", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_require():", "def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version", "def testFindsBuiltins(self):\r\n self.assertEqual('sys', modulefinder.get_module_filename('sys'))\r\n self.assertEqual('time', modulefinder.get_module_filename('time'))", "def check_imports():\n try:\n import dns # pylint: disable=C0415,W0611 # noqa: F401\n import ecdsa # pylint: disable=C0415,W0611 # noqa: F401\n import google.protobuf # pylint: disable=C0415,W0611 # noqa: F401\n import jsonrpclib # pylint: disable=C0415,W0611 # noqa: F401\n import pyaes # pylint: disable=C0415,W0611 # noqa: F401\n import qrcode # pylint: disable=C0415,W0611 # noqa: F401\n import requests # pylint: disable=C0415 # noqa: F401\n except ImportError as i_e:\n sys.exit(\"Error: %s. Try 'sudo pip install <module-name>'\" % str(i_e))\n from google.protobuf import descriptor # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import message # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import reflection # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import ( # pylint: disable=C0415,W0611 # noqa: F401\n descriptor_pb2,\n )\n from jsonrpclib import ( # pylint: disable=C0415,W0611 # noqa: F401\n SimpleJSONRPCServer,\n )\n\n # make sure that certificates are here\n certs = requests.utils.DEFAULT_CA_BUNDLE_PATH\n if not os.path.exists(certs):\n raise AssertionError(\"Certificates not found\")", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def sanity_check_step(self):\n\n dirs = [os.path.join('include', 'gromacs')]\n\n # in GROMACS v5.1, only 'gmx' binary is there\n # (only) in GROMACS v5.0, other binaries are symlinks to 'gmx'\n bins = []\n libnames = []\n if LooseVersion(self.version) < LooseVersion('5.1'):\n bins.extend(['editconf', 'g_lie', 'genbox', 'genconf', 'mdrun'])\n\n if LooseVersion(self.version) >= LooseVersion('5.0'):\n bins.append('gmx')\n libnames.append('gromacs')\n if LooseVersion(self.version) < LooseVersion('5.1') and self.toolchain.options.get('usempi', None):\n bins.append('mdrun')\n else:\n libnames.extend(['gmxana', 'gmx', 'md'])\n # note: gmxpreprocess may also already be there for earlier versions\n if LooseVersion(self.version) > LooseVersion('4.6'):\n libnames.append('gmxpreprocess')\n\n # also check for MPI-specific binaries/libraries\n if self.toolchain.options.get('usempi', None):\n if LooseVersion(self.version) < LooseVersion('4.6'):\n mpisuff = self.cfg['mpisuffix']\n else:\n mpisuff = '_mpi'\n\n bins.extend([binary + mpisuff for binary in bins])\n libnames.extend([libname + mpisuff for libname in libnames])\n\n suff = ''\n # add the _d suffix to the suffix, in case of the double precission\n if re.search('DGMX_DOUBLE=(ON|YES|TRUE|Y|[1-9])', self.cfg['configopts'], re.I):\n suff = '_d'\n\n libs = ['lib%s%s.%s' % (libname, suff, self.libext) for libname in libnames]\n\n # pkgconfig dir not available for earlier versions, exact version to use here is unclear\n if LooseVersion(self.version) >= LooseVersion('4.6'):\n dirs.append(os.path.join(self.lib_subdir, 'pkgconfig'))\n\n custom_paths = {\n 'files': [os.path.join('bin', b + suff) for b in bins] + [os.path.join(self.lib_subdir, l) for l in libs],\n 'dirs': dirs,\n }\n super(EB_GROMACS, self).sanity_check_step(custom_paths=custom_paths)", "def verify():\n verbose = True\n log(\n \"Verifying current directory as a Dallinger experiment...\",\n verbose=verbose,\n )\n ok = verify_package(verbose=verbose)\n if ok:\n log(\"✓ Everything looks good!\", verbose=verbose)\n else:\n log(\"☹ Some problems were found.\", verbose=verbose)", "def test_ufedmm_imported():\n assert \"ufedmm\" in sys.modules", "def test_ifPythonModuleIsInstalled():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"pyModule\" in testConfig.config:\n print \"pyModule: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfPythonModuleIsInstalled, testConfig.config", "def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)", "def test_main_results():\n # Due to complexities testing with arguments to get full coverage\n # run the script externally with full arguments\n os.popen('python3 -m pip install -e .')\n os.popen(\n 'python3 Examples/WSO.py -url cn1234.awtest.com -username citests -password hunter2 -tenantcode shibboleet'\n ).read()\n\n filename = \"uem.json\"\n\n assert AUTH.check_file_exists(filename) is True\n assert AUTH.verify_config(filename, 'authorization',\n AUTH.encode(\"citests\", \"hunter2\")) is True\n assert AUTH.verify_config(filename, 'url', \"cn1234.awtest.com\") is True\n assert AUTH.verify_config(filename, 'aw-tenant-code', \"shibboleet\") is True", "def test_parrot_imported():\n assert \"parrot\" in sys.modules", "def setup_module():\n\n c = Config()\n if c.get('general', 'in_production'): # pragma: no cover\n raise RuntimeError(\"DO NOT run destructive test on production system\")\n\n \"Pull in the filesystem dump from a previous mirth run\"\n mi = MirthInteraction()\n mi.restore_database()\n\n \"Run a quick sanity check, whole module requires a populated db\"\n connection = db_connection('warehouse')\n count = connection.session.query(HL7_Msh).count()\n connection.disconnect()\n\n if count < 4000:\n err = \"Minimal expected count of records not present. \"\\\n \"Be sure to run 'process_testfiles_via_mirth' as a prerequisite\"\n raise RuntimeError(err)", "def testCheckPythonModule(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency = dependencies.DependencyDefinition('os')\n result, _ = dependency_helper._CheckPythonModule(dependency)\n self.assertTrue(result)\n\n dependency = dependencies.DependencyDefinition('bogus')\n result, _ = dependency_helper._CheckPythonModule(dependency)\n self.assertFalse(result)", "def test_mmelemental_imported():\n import sys\n\n assert \"mmelemental\" in sys.modules", "def check_prerequisites() -> None:\n # check black code formatter is installed\n if not is_installed(\"black\"):\n raise FileNotFoundError(\n \"Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html\"\n )\n\n # check isort code formatter is installed\n if not is_installed(\"isort\"):\n raise FileNotFoundError(\n \"Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort\"\n )\n\n # check protolint code formatter is installed\n if subprocess.call(f\"{base_protolint_command()} version\", shell=True) != 0: # nosec\n raise FileNotFoundError(\n \"Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint.\"\n )\n\n # check protocol buffer compiler is installed\n if not is_installed(\"protoc\"):\n raise FileNotFoundError(\n \"Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/\"\n )", "def run_tests(modules):\n print(\"Running integ tests in the following modules: \" + ', '.join(modules))\n modules_to_include = \"\"\n\n for m in modules:\n modules_to_include += \":\" + m + \",\"\n\n # remove last comma\n modules_to_include = modules_to_include[:-1]\n\n # build necessary dependencies first\n check_call([\"mvn\", \"clean\", \"install\", \"-pl\", modules_to_include, \"-P\", \"quick\", \"--am\"])\n check_call([\"mvn\", \"verify\", \"-pl\", modules_to_include, \"-P\", \"integration-tests\", \"-Dfailsafe.rerunFailingTestsCount=1\"])", "def setup():\n find_modules('alerters')\n find_modules('watchers')\n find_modules('auditors')", "def check_dependencies(module):\n try:\n from fhempy import lib\n\n initfile = inspect.getfile(lib)\n fhempy_root = os.path.dirname(initfile)\n with open(fhempy_root + \"/\" + module + \"/manifest.json\", \"r\") as f:\n manifest = json.load(f)\n\n if \"requirements\" in manifest:\n for req in manifest[\"requirements\"]:\n logger.debug(\"Check requirement: \" + req)\n if is_installed(req) == False:\n logger.debug(\" NOK\")\n return False\n else:\n logger.debug(\" OK\")\n except FileNotFoundError:\n logger.error(\"manifest.json not found!\")\n\n return True", "def test_root_module_import():\n import cython_oracle\n\n assert cython_oracle.oracle.answer_to_all_questions() == 42", "def test_deprecated_modules(self):\n\n deprecated_modules_present = False\n\n deprecated_modules = [\n \"game_assets\",\n \"models\",\n \"world\",\n \"modular_assets\",\n ]\n\n for path in self.application_files:\n for module in deprecated_modules:\n module_text = open(path).read()\n found_reference = False\n if \"import %s\" % module in module_text:\n found_reference = True\n if \"from %s\" % module in module_text:\n found_reference = True\n\n if found_reference:\n print(\"Found '%s' reference in %s\" % (module, path))\n deprecated_modules_present = True\n\n self.assertFalse(deprecated_modules_present)", "def require():", "def test_importable():\n root_path = os.path.dirname(MY_DIRECTORY)\n\n for version in versioning.get_all_versions():\n v = version.label.replace(\".\", \"_\")\n path = os.path.join(root_path, v)\n module_names = [m[:-3] for m in os.listdir(path) if m.endswith(\".py\")]\n for name in module_names:\n m = importlib.import_module(\".\".join([\"kuber\", v, name]))\n assert m is not None, f\"Expected kuber.{v}.{m} to be importable.\"", "def test_qm_project_python_testing_imported():\n assert \"qm_project_python_testing\" in sys.modules", "def check_dependencies(cls):\n\n missing = []\n for name in cls.DEPENDENCIES:\n try:\n import_module(name)\n except ModuleNotFoundError:\n missing.append(name)\n\n if any(missing):\n msg = ('The sup3r stitching module depends on the following '\n 'special dependencies that were not found in the active '\n 'environment: {}'.format(missing))\n logger.error(msg)\n raise ModuleNotFoundError(msg)", "def test_check(self):\n\n self.assertTrue(Naive().check(self.file_gitignore))\n self.assertTrue(Naive().check(self.file_tests))\n self.assertTrue(Naive().check(self.file_bin))\n self.assertTrue(Naive().check(self.file_py))\n self.assertTrue(Naive().check(self.file_authors))", "def test_imports():\n import sys\n import src\n assert 'sklearn.feature_extraction' not in sys.modules.keys()", "def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())", "def check_guards():\n issues = []\n\n def report(path):\n relpath = relative_to_project(path)\n lines = read_lines(path)\n\n res = validate_guards(relpath, lines)\n if res is not None:\n issues.append((relpath, res))\n\n for_each_header(report)\n\n if issues:\n for path, msg in issues:\n print(f\"{path}: {msg}\")\n sys.exit(1)", "def self_check(self):\n out = \"Loaded components\\n\"\n for package_name, package in sorted(self.packages.items()):\n out += \"\\t%s:\\n\" % package_name\n for c, fd in sorted(package.components.iteritems()):\n out += \"\\t\\t%s (%s)\\n\" % (c, fd.filename)\n\n LOG.info(out)\n\n for p in self.packages.values():\n for f in p.files:\n for id in f.requires:\n # This throws if it doesn't find something.\n try:\n self.get(id)\n except:\n LOG.exception(\"Error in: \" + f.filename)\n raise", "def count_scripts_without_module_load():\n repos_scripts = '/apps/leuven/icts/jobscripts/2017-10-10'\n script_files = glob.glob(repos_scripts + '/**/*.SC', recursive=True)\n n_scr = len(script_files)\n\n n_OK, n_fail = 0, 0 # for jobs that failed\n n_has, n_does_not = 0, 0 # for modules that have even a single \"module load ...\" \n for i, script_file in enumerate(script_files):\n try:\n with scripts.script(script_file) as scr:\n if scr.loaded:\n n_has += 1\n else:\n n_does_not += 1\n n_OK += 1\n except: # FileNotFoundError or etc.\n pass\n# n_fail += 1\n\n n_mod = n_has + n_does_not\n p_has = n_has / n_mod * 1e2\n p_does_not = n_does_not / n_mod * 1e2\n\n n_tot = n_scr\n n_fail= n_tot - n_OK\n p_OK = n_OK / n_tot * 1e2\n p_fail= n_fail / n_tot * 1e2\n\n print('\\nn_scr={0}'.format(n_scr))\n print('n_OK:{0}, n_fail:{1}'.format(n_OK, n_fail))\n print('n_has:{0}, n_does_not:{1}'.format(n_has, n_does_not))\n\n print('\\n\"{0}\" scripts have module loads and \"{1}\" do not load modules'.format(n_has, n_does_not))\n print('\"{0:.2f}%\" have module loads, and \"{1:.2f}%\" do not load modules'.format(p_has, p_does_not))\n print('\"{0:.2f}%\" of jobs succeeded, out of \"{1}\" jobs'.format(p_OK, n_tot))\n\n try: \n assert n_OK == n_has + n_does_not\n print('Assert: numbers match as expected\\n')\n return -1\n except AssertionError:\n print('AssertionError: numbers do not match: n_tot != n_has + n_does_not\\n')\n return 0", "def test_deps( deps ):\n\n success = []\n error = []\n for dep in deps:\n mod_name = dep[0]\n\n try:\n mod = __import__( mod_name )\n except ImportError:\n print \"FAILURE: Could not import\", mod_name\n error.append( mod_name )\n continue\n\n try:\n mod_version = mod.__version__.split('.')\n requested_mod_version = dep[1].split('.')\n for i in range( len( requested_mod_version ) ):\n if int( mod_version[i] ) < int( requested_mod_version[i] ):\n raise ImportError\n except ImportError:\n print \"FAILURE: Module\", mod_name, \"needs version\", requested_mod_version, \"but version\", mod_version, \"found\"\n error.append( mod_name )\n continue\n except AttributeError:\n# no .__version__\n pass\n\n print \"Success: \", mod_name\n success.append( mod_name )\n\n return ( success, error )", "def check_dependencies():\n required_found = True\n recommended_found = True\n print 'Checking dependencies ...\\n'\n print 'Required dependencies:'\n try:\n import Image\n assert Image.VERSION >= '1.1.5'\n print ' Python Imaging Library ....... OK'\n except ImportError:\n print ' !!! Python Imaging Library ... Not found'\n required_found = False\n except AssertionError:\n print ' !!! Python Imaging Library ... version', Image.VERSION,\n print 'found'\n print ' !!! Python Imaging Library 1.1.5 or higher is required'\n required_found = False\n if not required_found:\n print '\\nCould not find all required dependencies!'\n print 'Please install them and try again.'\n sys.exit(1)\n print", "def test_WW95_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import WW95 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.WW95\", test]", "def test_check_module(self):\n cmd = f\"{SRC_LISTER} -i {PWD}/test_pins.v\"\n with subprocess.Popen(\n shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n try:\n out, err = proc.communicate(timeout=5)\n assert len(err.strip()) == 0, \"Unexpected a circular reference error\"\n except subprocess.TimeoutExpired as te:\n print(\"Unexpected parser timeout during infinite loop test\")\n modules = read_files.find_modules(f\"{PWD}/test_pins.v\")\n assert len(modules) == 3, \"Wrong number of modules detected\"\n # check modules name\n assert [m[0] for m in modules] == [\"test_1\", \"test_2\", \"test_3\"], \"Wrong modules' name detected\"\n # create a modules\n ms = []\n for module in modules:\n m = verilog_repr.Module(module[0]) # set the name\n if module[1]:\n m.parse_parameters(module[1])\n m.parse_pins(module[2])\n m.parse_parameters(module[-1])\n m.parse_pins(module[-1])\n ms.append(m)\n assert len(m.params.keys()) == 2, \"Wrong number of parameters detected in module\"\n assert len(m.pins) == 9, \"Wrong number of pins detected in module\"\n for i in range(9):\n ps = [m.pins[i] for m in ms]\n assert all(ps[0].name == p.name for p in ps), \"Unexpected discrepancy on pins name\"\n assert all(ps[0].type == p.type for p in ps), \"Unexpected discrepancy on pins type\"\n assert all(ps[0].direction == p.direction for p in ps), \"Unexpected discrepancy on pins direction\"\n assert all(ps[0].lsb == p.lsb for p in ps), \"Unexpected discrepancy on pins lsb\"\n if ps[0].name in [\"rstb\"]:\n assert all(ps[1].msb == p.msb for p in ps[1:]), \"Unexpected discrepancy on pins msb\"\n assert ps[0].msb != ps[1].msb, \"Unexpected agreement between msb of test_1 and others\"\n assert all(ps[1].width == p.width for p in ps[1:]), \"Unexpected discrepancy on pins width\"\n assert ps[0].width != ps[1].width, \"Unexpected agreement between width of test_1 and others\"\n else:\n assert all(ps[0].msb == p.msb for p in ps), \"Unexpected discrepancy on pins msb\"\n assert all(ps[0].width == p.width for p in ps), \"Unexpected discrepancy on pins width\"", "def test_scripts_are_installed(self):\n fits_file = os.path.join(self.datadir, 'monol_testA.evt')\n command = 'HENreadfile {0}'.format(fits_file)\n sp.check_call(command.split())", "def validate():\n if not os.path.exists(os.path.join(ROOT, APP, '__init__.py')):\n message = ansi.error() + ' Python module not found.'\n if os.environ.get('LORE_APP') is None:\n message += ' $LORE_APP is not set. Should it be different than \"%s\"?' % APP\n else:\n message += ' $LORE_APP is set to \"%s\". Should it be different?' % APP\n sys.exit(message)\n\n if exists():\n return\n\n if len(sys.argv) > 1:\n command = sys.argv[1]\n else:\n command = 'lore'\n sys.exit(\n ansi.error() + ' %s is only available in lore '\n 'app directories (missing %s)' % (\n ansi.bold(command),\n ansi.underline(VERSION_PATH)\n )\n )", "def sanity_check_step(self):\n\n custom_paths = {\n 'dirs': ['lib/pkgconfig', 'bin'],\n }\n\n shlib_ext = get_shared_lib_ext()\n\n extra_files = []\n\n # ELPA uses the following naming scheme:\n # \"onenode\" suffix: no MPI support\n # \"openmp\" suffix: OpenMP support\n if self.toolchain.options.get('usempi', None):\n mpi_suff = ''\n else:\n mpi_suff = '_onenode'\n\n for with_omp in nub([False, self.toolchain.options.get('openmp', False)]):\n if with_omp:\n omp_suff = '_openmp'\n else:\n omp_suff = ''\n\n extra_files.append('include/elpa%s%s-%s/elpa/elpa.h' % (mpi_suff, omp_suff, self.version))\n extra_files.append('include/elpa%s%s-%s/modules/elpa.mod' % (mpi_suff, omp_suff, self.version))\n\n extra_files.append('lib/libelpa%s%s.a' % (mpi_suff, omp_suff))\n if self.cfg['with_shared']:\n extra_files.append('lib/libelpa%s%s.%s' % (mpi_suff, omp_suff, shlib_ext))\n\n custom_paths['files'] = extra_files\n\n super(EB_ELPA, self).sanity_check_step(custom_paths=custom_paths)", "def run_module():\n parser = ap.ArgumentParser()\n parser.add_argument(\"--dry_run\", action=\"store_true\", help=\"When provided, return zero exit\"\n \" status irrespective of the number of failures\")\n args = parser.parse_args()\n params = read_params()\n assert \"validation\" in params\n dry_run_param = params[\"validation\"][\"common\"].get(\"dry_run\", False)\n params[\"validation\"][\"common\"][\"dry_run\"] = args.dry_run or dry_run_param\n validator = Validator(params)\n validator.validate().print_and_exit(\n get_structured_logger(__name__,\n params[\"common\"].get(\"log_filename\", None)),\n not (args.dry_run or dry_run_param))", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def ImportsTest(recipe, allowed_modules):\n\n for _, val in sorted(recipe.global_symbols.iteritems()):\n if isinstance(val, types.ModuleType):\n module_name = val.__name__\n for pattern in allowed_modules:\n if pattern.match(val.__name__):\n break\n else:\n yield ('In %s:\\n'\n ' Non-whitelisted import of %s' % (recipe.path, module_name))", "def test_molssi_project_imported():\n assert \"molssi_project\" in sys.modules", "def testCheckDependencies(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency_helper.CheckDependencies(verbose_output=False)", "def check_dependencies(work_dir, fits_dir, fitsbase):\n # Print to screen what processing steps have been selected\n print \"The following processing steps have been selected:\\n\"\n if params.do_rfifind:\n print \" - PRESTO rfifind (RFI mitigation tools)\"\n if params.do_prepsub:\n print \" - PRESTO prepsubband (dedispersion)\"\n if params.do_candsearch:\n print \" - PRESTO acceleration search and candidate sifting\"\n if params.do_presto_sp:\n print \" - PRESTO singlepulse search (singlepulse.py)\"\n # Print to screen what processing steps are being skipped\n print \"\\nThe following processing steps are being skipped:\\n\"\n if params.do_rfifind == 0:\n print \" - PRESTO rfifind (RFI mitigation tools)\"\n if params.do_prepsub == 0:\n print \" - PRESTO prepsubband (dedispersion)\"\n if params.do_candsearch == 0:\n print \" - PRESTO acceleration search and candidate sifting\"\n if params.do_presto_sp == 0:\n print \" - PRESTO singlepulse search (singlepulse.py)\"\n print \"\\nChecking dependencies...\\n\"\n # There must be at least one .fits file in the fits directory\n fl = glob(fits_dir + '/%s*.fits' %fitsbase)\n if len(fl):\n print \" Found %d file(s) in %s:\\n\" %(len(fl), fits_dir)\n for i in fl:\n print \" %s\\n\" %(i.split('/')[-1])\n else:\n print \" No %s*.fits files found in %s !\\n Exiting...\\n\" %(fitsbase, fits_dir)\n sys.exit(0)\n # If skipping the RFIFIND step in processing but want to do\n # processing steps further down the line, then there must be a\n # rfi_products folder in the results directory with a .mask file\n # in it\n if params.do_rfifind == 0 and params.use_mask and \\\n (params.do_prepsub or params.do_candsearch or params.do_presto_sp):\n mlist = glob(work_dir + '/rfi_products/*.mask')\n if len(mlist):\n print \" Using RFI .mask:\\n %s\\n\" %(mlist[0])\n else:\n print \" No RFI .mask found in %s/rfi_products!\\n Exiting...\\n\"\\\n %(work_dir)\n sys.exit(0)\n # If skipping the PREPSUBBAND step in processing but want to\n # do processing steps further down the line, then there must be\n # de-dispersed time series files in the results directory of\n # the form basename*DM*.dat and basename*DM*.inf\n if params.do_prepsub == 0 and (params.do_candsearch or \n params.do_presto_sp):\n dats = glob(work_dir + '/*DM*dat')\n infs = glob(work_dir + '/*DM*inf')\n if not (len(dats) and len(infs)):\n print \" No .dat and/or .inf files in %s!\\n Exiting...\\n\" %(work_dir)\n sys.exit(0)\n # If we haven't exited by now, then things should be good\n print \"\\nLooks good...\\n\\n\"\n # Pause for a few seconds so you can actually read the output\n time.sleep(5)", "def verify():", "def check_init_file(name : str, init_path : 'Path') -> list:\n\n # Read __init__ if it exists\n\n package_modules = []\n\n try:\n\n # Check for __all__ global variable assignments\n\n pattern = re.compile(r\"\"\"(?:^__all__\\s*=\\s*)(\\[[^\\[\\]]*\\]$)\"\"\", re.MULTILINE)\n\n with open(str(init_path), 'r') as init_file:\n\n logging.getLogger('GUM Dispenser').info('Found __init__.py')\n\n init_contents = init_file.read()\n\n # Grab the capturing group\n\n init_results = [current_match.group(1).strip() for current_match in pattern.finditer(init_contents)\n if not current_match.group(1) is None and\n not (current_match.group(1).isspace() or current_match.group(1) == '')]\n\n logging.getLogger('GUM Dispenser').debug(init_results)\n\n if len(init_results) > 0:\n\n logging.getLogger('GUM Dispenser').info('Found __all__ declaration. Using ' +\n str(init_results[-1]) + ' as module list')\n\n package_modules = ast.literal_eval(init_results[-1])\n\n\n # Make sure specified modules exist before we go further\n\n ensure_modules_exist(package_modules, init_path.parent)\n\n else:\n\n logging.getLogger('GUM Dispenser').warning('__init__.py __all__ definition was not found for package ' +\n name + '. Treating all same level .py files' +\n ' as included modules...')\n\n # Safely handle case where __init__ does not exist\n except FileNotFoundError:\n\n logging.getLogger('GUM Dispenser').warning('__init__.py does not exist for package ' + name +\n '. Treating all same level .py files as included modules...')\n\n if len(package_modules) == 0:\n\n # Grab all .py files in package directory\n\n src_dir = init_path.parent\n\n logging.getLogger('GUM Dispenser').debug('Parent directory: ' + str(src_dir))\n\n os.chdir(src_dir)\n\n package_modules = glob.glob('*.py')\n\n package_modules = [name.rstrip('.py') for name in package_modules]\n\n\n return package_modules", "def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))", "def CheckPrerequisites(_):\n _LocalDataPath(RUN_SCRIPT)\n _LocalDataPath(CONFIG_FILE)", "def test_include(self):\n\n include_example = os.path.join(here, 'include-example.ini')\n parser = ManifestParser(manifests=(include_example,))\n\n # All of the tests should be included, in order:\n self.assertEqual(parser.get('name'),\n ['crash-handling', 'fleem', 'flowers'])\n self.assertEqual([(test['name'], os.path.basename(test['manifest'])) for test in parser.tests],\n [('crash-handling', 'bar.ini'), ('fleem', 'include-example.ini'), ('flowers', 'foo.ini')])\n\n\n # The manifests should be there too:\n self.assertEqual(len(parser.manifests()), 3)\n\n # We already have the root directory:\n self.assertEqual(here, parser.rootdir)\n\n\n # DEFAULT values should persist across includes, unless they're\n # overwritten. In this example, include-example.ini sets foo=bar, but\n # it's overridden to fleem in bar.ini\n self.assertEqual(parser.get('name', foo='bar'),\n ['fleem', 'flowers'])\n self.assertEqual(parser.get('name', foo='fleem'),\n ['crash-handling'])\n\n # Passing parameters in the include section allows defining variables in\n #the submodule scope:\n self.assertEqual(parser.get('name', tags=['red']),\n ['flowers'])\n\n # However, this should be overridable from the DEFAULT section in the\n # included file and that overridable via the key directly connected to\n # the test:\n self.assertEqual(parser.get(name='flowers')[0]['blue'],\n 'ocean')\n self.assertEqual(parser.get(name='flowers')[0]['yellow'],\n 'submarine')\n\n # You can query multiple times if you need to::\n flowers = parser.get(foo='bar')\n self.assertEqual(len(flowers), 2)\n\n # Using the inverse flag should invert the set of tests returned:\n self.assertEqual(parser.get('name', inverse=True, tags=['red']),\n ['crash-handling', 'fleem'])\n\n # All of the included tests actually exist::\n self.assertEqual([i['name'] for i in parser.missing()], [])\n\n # Write the output to a manifest:\n buffer = StringIO()\n parser.write(fp=buffer, global_kwargs={'foo': 'bar'})\n self.assertEqual(buffer.getvalue().strip(),\n '[DEFAULT]\\nfoo = bar\\n\\n[fleem]\\n\\n[include/flowers]\\nblue = ocean\\nred = roses\\nyellow = submarine')", "def test_include():\n from bst import BST", "def check_submodules():\n if not os.path.exists('.git'):\n return\n with open('.gitmodules') as f:\n for l in f:\n if 'path' in l:\n p = l.split('=')[-1].strip()\n if not os.path.exists(p):\n raise ValueError('Submodule %s missing' % p)\n\n\n proc = subprocess.Popen(['git', 'submodule', 'status'],\n stdout=subprocess.PIPE)\n status, _ = proc.communicate()\n status = status.decode(\"ascii\", \"replace\")\n for line in status.splitlines():\n if line.startswith('-') or line.startswith('+'):\n raise ValueError('Submodule not clean: %s' % line)", "def testPynocleImportsPynocle(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(THISDIR, '__init__')\r\n self.assertEqual(expected, modulefinder.get_module_filename('pynocle', __file__))", "def ensure_modules_exist(found_modules: list, package_path: 'Path') -> None:\n\n for current_module in found_modules:\n\n module_path = package_path.joinpath(current_module + '.py')\n\n # Check result here to make sure module exists\n\n try:\n\n with open(str(module_path), 'r') as module_file:\n\n logging.getLogger('GUM Dispenser').info('Successfully opened ' + current_module)\n\n except FileNotFoundError:\n\n raise SourceModuleNotFoundError('The module named ' + current_module + ', specified in __init__.py, ' +\n 'does not exist')\n\n logging.getLogger('GUM Dispenser').info('All specified modules exist')", "def test_expected_successes(modpath):\n retcode, out = flake8(join(modpath, \"test_cases\"))\n assert not retcode, out", "def _check_dependencies(self):\n imgmin = exists('imgmin')\n image_optim = exists('image_optim')\n\n if not imgmin or not image_optim:\n puts(p('Dependencies have not been installed:'))\n\n message = 'imgmin - https://github.com/rflynn/imgmin'\n message = s('✓ ' + message) if imgmin else e('✗ ' + message)\n puts(message)\n\n message = 'image_optim - http://rubygems.org/gems/image_optim'\n message = s('✓ ' + message) if image_optim else e('✗ ' + message)\n puts(message)\n\n sys.exit(0)", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def test_ensureWhenNotImported(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\", \"m3\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None, \"m3\": None})", "def test_imports():\n from tg_utils import admin\n from tg_utils import checks\n from tg_utils import compressor_filters\n from tg_utils import email\n from tg_utils import files\n from tg_utils import hashmodels\n from tg_utils import lock\n from tg_utils import managers\n from tg_utils import mixins\n from tg_utils import models\n from tg_utils import profiling\n from tg_utils import signals\n from tg_utils import uuid\n from tg_utils import decorators", "def test_ensureWhenNotImportedDontPrevent(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\")\n self.assertEqual(modules, {})", "def test_plot_ay_imported():\n assert \"plot_ay\" in sys.modules", "def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]", "def check_dependencies(module_name, check_version=False):\n\n module_cfg = MODULES[module_name]\n try:\n module = importlib.import_module(module_name)\n if module_cfg['check_version']:\n message = \"{inst_ver} {req_ver}\".format(\n inst_ver=INSTALLED_VERSION_MESSAGE.format(\n module_name,\n module.__version__,\n ),\n req_ver=REQ_VERSION_MESSAGE.format(\n name=module_name,\n version=module_cfg['version'],\n url=URL,\n ),\n )\n\n for index, package in enumerate(module_cfg['package']):\n _package = importlib.import_module(package)\n\n if module_cfg.get('method'):\n for method in module_cfg.get('method')[index]:\n if not hasattr(_package, method):\n sys.stderr.write(message)\n\n elif module_cfg.get('module'):\n for module in module_cfg.get('module')[index]:\n try:\n importlib.import_module(module)\n except ModuleNotFoundError:\n sys.stderr.write(message)\n\n return True\n except ModuleNotFoundError:\n message = \"{name} {text} <{url}>.\\n\".format(\n name=module_name,\n text='library is missing. Check requirements on the '\n 'manual page',\n url=URL,\n )\n sys.stderr.write(message)", "def main():\n check_slugs()\n check_identifiers()", "def check_dependencies():\n check_python_version()\n\n dependencies = [\"sqlite3\"]\n\n for dependency in dependencies:\n try:\n __import__(dependency)\n except ImportError as e:\n raise CuckooStartupError(\"Unable to import \\\"%s\\\"\" % dependency)\n\n return True", "def test_ifWhichCanFindBinary():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"binary\" in testConfig.config:\n print \"Binary: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfWhichCanFindBinary, testConfig.config", "def _is_rpm_all_lib_include_files_installed(self):\n return False", "def validate(self, module, config):\n from clarity_ext.extensions import ExtensionService\n extension_svc = ExtensionService(lambda _: None)\n config_obj = ConfigFromConventionProvider.get_extension_config(module)\n exception_count = 0\n\n for entry in config_obj:\n module = entry[\"module\"]\n try:\n extension_svc.run_test(config, None, module, False, True, True)\n print(\"- {}: SUCCESS\".format(module))\n except NoTestsFoundException:\n print(\"- {}: WARNING - No tests were found\".format(module))\n except Exception as e:\n # It's OK to use a catch-all exception handler here since this is only used while\n # running tests, so we want to be optimistic and try to run all tests:\n print(\"- {}: ERROR - {}\".format(module, e))\n print(\" Fresh run: clarity-ext extension {} test-fresh\".format(module))\n print(\" Review, then: clarity-ext extension {} freeze\".format(module))\n exception_count += 1\n\n return exception_count", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result", "def test_basic(testdir):\n testdir.makepyfile(\"\"\"import sys\"\"\")\n result = testdir.runpytest('--pylint')\n assert 'Missing module docstring' in result.stdout.str()\n assert 'Unused import sys' in result.stdout.str()\n assert 'Final newline missing' in result.stdout.str()\n assert 'passed' not in result.stdout.str()", "def main() -> None:\n verify_pip_is_installed()\n print('Regenerating \"requirements.txt\" file...')\n install_python_dev_dependencies.compile_pip_requirements(\n 'requirements.in', 'requirements.txt')\n # Adds a note to the beginning of the 'requirements.txt' file to make sure\n # developers understand that they should not append or change this\n # autogenerated file.\n with utils.open_file(\n common.COMPILED_REQUIREMENTS_FILE_PATH, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(\n '# Developers: Please do not modify this auto-generated file. If\\n'\n '# you want to add, remove, upgrade, or downgrade libraries,\\n'\n '# please change the `requirements.in` file, and then follow\\n'\n '# the instructions there to regenerate this file.\\n' + content)\n\n mismatches = get_mismatches()\n if mismatches:\n _rectify_third_party_directory(mismatches)\n validate_metadata_directories()\n else:\n print(\n 'All third-party Python libraries are already installed correctly.')", "def main(argv):\n #ouverture du fichier de log passé en paramètre\n with open(argv[0], 'r') as log:\n line = log.readline()\n result = \"\"\n #lecture ligne par ligne tant que l'on ne tombe pas sur une ligne vide\n while line != \"\" :\n #mise a jour d'un module\n if ' 1 modules...' in line:\n result = \"\"\n #nom du fichier de test chargé\n if 'TEST' in line and 'openerp.modules.loading' in line:\n result += 'File loaded: ' + line.split('module ')[1] + '\\n'\n #erreur déclenchée par le test\n if 'ERROR' in line and 'yaml_import' in line:\n result += '/!\\\\' + line.split('yaml_import:')[1] + '\\n'\n #infos sur une assertion échouée\n if 'test:' in line or 'values:' in line:\n result += '\\t' + line + '\\n'\n line = log.readline()\n\n #résumé du log de test\n nberror = result.count('/!\\\\')\n result += 'There is ' + str(nberror) + ' test(s) failed'\n print result", "def test_ifVersionIsCorrect():\n \n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"version\" in testConfig.config:\n print \"Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfVersionIsExact, testConfig.config\n \n if \"minimum_version\" in testConfig.config:\n print \"Minimum Version: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfMinimumVersionIsMet, testConfig.config", "def CheckPrereqs():\n logging.info('entering ...')\n\n if platform.system() != 'Linux' and platform.system() != 'Darwin':\n Die('Sorry, this script assumes Linux or Mac OS X thus far. '\n 'Please feel free to edit the source and fix it to your needs.')\n\n # Ensure source files are available.\n for f in [\n 'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',\n 'package.json', 'js/engine/validator.js', 'js/engine/validator_test.js',\n 'js/engine/validator-in-browser.js', 'js/engine/tokenize-css.js',\n 'js/engine/definitions.js', 'js/engine/parse-css.js',\n 'js/engine/parse-srcset.js', 'js/engine/parse-url.js'\n ]:\n if not os.path.exists(f):\n Die('%s not found. Must run in amp_validator source directory.' % f)\n\n # Ensure protoc is available.\n try:\n libprotoc_version = subprocess.check_output(['protoc', '--version'])\n except (subprocess.CalledProcessError, OSError):\n Die('Protobuf compiler not found. Try \"apt-get install protobuf-compiler\" '\n 'or follow the install instructions at '\n 'https://github.com/ampproject/amphtml/blob/main/validator/README.md#installation.'\n )\n\n # Ensure 'libprotoc 2.5.0' or newer.\n m = re.search(b'^(\\\\w+) (\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)', libprotoc_version)\n if (m.group(1) != b'libprotoc' or\n (int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):\n Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)\n\n # Ensure that the Python protobuf package is installed.\n for m in ['descriptor', 'text_format', 'json_format']:\n module = 'google.protobuf.%s' % m\n try:\n __import__(module)\n except ImportError:\n # Python3 needs pip3. Python 2 needs pip.\n if sys.version_info < (3, 0):\n Die('%s not found. Try \"pip install protobuf\" or follow the install '\n 'instructions at https://github.com/ampproject/amphtml/blob/main/'\n 'validator/README.md#installation' % module)\n else:\n Die('%s not found. Try \"pip3 install protobuf\" or follow the install '\n 'instructions at https://github.com/ampproject/amphtml/blob/main/'\n 'validator/README.md#installation' % module)\n\n # Ensure JVM installed. TODO: Check for version?\n try:\n subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError):\n Die('Java missing. Try \"apt-get install openjdk-7-jre\" or follow the'\n 'install instructions at'\n 'https://github.com/ampproject/amphtml/blob/main/validator/README.md#installation'\n )\n logging.info('... done')", "def test_examples():\n import airconics\n # pytest runs test files in ./__pycache__: need to go up two levels\n example_dir = os.path.abspath(\n os.path.join(__file__, '..', '..', 'examples', 'core'))\n example_scripts = os.listdir(example_dir)\n for script in example_scripts:\n if script.endswith('.py'):\n fname = os.path.join(example_dir, script)\n try:\n subprocess.check_call(['python', fname])\n except subprocess.CalledProcessError:\n raise AssertionError('Example {} failed'.format(fname))", "def _verify_include_files_used(self, file_uses, included_files):\n for include_file, use in file_uses.items():\n if not use & USES_DECLARATION:\n node, module = included_files[include_file]\n if module.ast_list is not None:\n msg = \"'{}' does not need to be #included\".format(\n node.filename)\n if use & USES_REFERENCE:\n msg += '; use a forward declaration instead'\n self._add_warning(msg, node)", "def test_parent_module_import():\n from cython_oracle import oracle\n\n assert oracle.answer_to_all_questions() == 42", "def main():\n sandbox = create_sandbox()\n directory = download_package_to_sandbox(\n sandbox,\n 'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'\n )\n print(directory)\n destroy_sandbox(sandbox)", "def check_req_utils():\n utils = (['dmenu', 'gpg', 'pass', 'xclip', 'exo-open', 'pkill'])\n for util in utils:\n if find_executable(util) is None:\n print(\"ERROR: Util '{}' is missing, install it before proceeding! Exiting!\".format(util))\n sys.exit(1)", "def check_init_files_and_folders():\n\t#['cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', 'cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', 'color_detect_2.py', 'dedupe.py', 'detect_image_group_ku.py', 'detect_shape_5.py', 'get_cam_id_2.py', 'get_image_8.py', 'gui_hsv.py', 'knaps.py', 'knapsack_2.py', 'maps.html', 'program_detect_rectangle.zip', 'start_capture.py']\n\tfile_list=[\n\t#'cascade_wimb_bus_front_100_stages_1000_pos_3000_neg.xml', \n\t'models/cascade_wimb_bus_front_33_stages_1000_pos_3000_neg_wrong.xml', \n\t#'color_detect_2.py', \n\t#'dedupe.py', \n\t'detect_bus_haar_group.py', \n\t#'detect_shape_5.py', \n\t'get_cam_detail.py', \n\t'get_image.py', \n\t#'gui_hsv.py', \n\t#'knaps.py', \n\t#'knapsack_2.py', \n\t#'maps.html', \n\t#'program_detect_rectangle.zip', \n\t'start_wimb.py',\n\t'g.php',\n\t]\n\tdirectory_list=[\n\t'images',\n\t'images_bgs',\n\t'images_bgs_mask',\n\t#'images_bgs_result',\n\t'images_color',\n\t'images_haar',\n\t'images_haar_result',\n\t'images_number',\n\t'images_number_result',\n\t'models',\n\t'images_old',\n\t'text_number',\n\t]\n\t\n\tfor file_name in file_list: print 'file '+file_name+' existed: '+str(os.path.isfile(file_name))\n\tfor directory_name in directory_list: \n\t\tprint 'directory '+directory_name+' existed: '+str(os.path.isdir(directory_name))\n\t\tif not os.path.isdir(directory_name): \n\t\t\tos.makedirs(directory_name)\n\t\tif \"images\" in directory_name: shutil.copy(path+'/g.php',path+'/'+directory_name+'/g.php')", "def test_can_load_relative_include(self):\r\n path = os.path.join(TEST_FILES_PATH, \"test_rel_include.bb\")\r\n steps, vars = ExecuteScriptFile(path, {})\r\n self.assertEquals(vars['test'], \"Hello World\")" ]
[ "0.6922173", "0.6404539", "0.639195", "0.6359169", "0.6339818", "0.6293124", "0.6275313", "0.61491907", "0.6106056", "0.6074857", "0.60627216", "0.602918", "0.5958757", "0.5958374", "0.5927664", "0.59225035", "0.5901187", "0.58883744", "0.58760595", "0.584319", "0.5842419", "0.58418435", "0.58329326", "0.58262974", "0.5820207", "0.58123153", "0.5805495", "0.5795288", "0.57888025", "0.5775069", "0.5766958", "0.57605773", "0.5754728", "0.57514954", "0.57437056", "0.5708994", "0.5708373", "0.5697147", "0.5691683", "0.56903124", "0.5687111", "0.56862134", "0.5660482", "0.5660445", "0.56507605", "0.5647055", "0.5646126", "0.5635215", "0.5634185", "0.56321526", "0.56290257", "0.5619774", "0.561495", "0.56040275", "0.56013787", "0.55870116", "0.55820364", "0.55816656", "0.5580843", "0.5566981", "0.55576116", "0.554752", "0.55472803", "0.5544331", "0.55426544", "0.55377674", "0.5537247", "0.5522982", "0.5504463", "0.54802835", "0.54550725", "0.5450736", "0.5444761", "0.5440938", "0.5440589", "0.5437011", "0.54231185", "0.54192543", "0.5418544", "0.54175526", "0.54160726", "0.5415732", "0.5410834", "0.5398208", "0.5389355", "0.53837883", "0.53834623", "0.5375696", "0.53735787", "0.5365705", "0.53626764", "0.53587765", "0.53585786", "0.5358204", "0.53503984", "0.5349783", "0.5349185", "0.53465873", "0.5342975", "0.5325105" ]
0.58290154
23
Initialise parameters for MARL training
def __init__( self, state_size, action_size, hidden_dim=128, state_rep_size=64, learning_rate=1e-5, eta=2 ): super(RND, self).__init__(state_size, action_size, eta) self.hidden_dim = hidden_dim self.state_rep_size = state_rep_size self.learning_rate = learning_rate self.predictor_dev = "cpu" self.target_dev = "cpu" # create models self.predictor_model = RNDNetwork(state_size, action_size, hidden_dim, state_rep_size) self.target_model = RNDNetwork(state_size, action_size, hidden_dim, state_rep_size) for param in self.target_model.parameters(): param.requires_grad = False self.optimizer = optim.Adam(self.predictor_model.parameters(), lr=learning_rate) self.loss = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_parameters(self):\n for i in range(1, self.L):\n self.W[i - 1] = np.random.randn(self.layer_dims[i], self.layer_dims[i - 1]) * 0.01\n self.b[i - 1] = np.zeros((self.layer_dims[i], 1))", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def init_params(self, parameters):\r\n max_epoch = parameters['num_epoch']\r\n momentum_rate = parameters['momentum']\r\n loss = parameters['loss_function']\r\n accuracy = parameters['accuracy']\r\n regularization = parameters['regularization']\r\n batch_size = parameters['batch_size']\r\n optimizer = parameters['optimizer'] if parameters['optimizer'] is not None else 'batch'\r\n self.__init__(max_epoch, optimizer, loss, accuracy, momentum_rate, regularization, batch_size)", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def initialize_parameters():\n\n W1 = tf.get_variable('W1', [3,3,3,64], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W2 = tf.get_variable('W2', [3,3,64,128], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W3 = tf.get_variable('W3', [3,3,128,256], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W4 = tf.get_variable('W4', [3,3,256,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n W5 = tf.get_variable('W5', [3,3,512,512], initializer=tf.contrib.layers.xavier_initializer(seed = 0))\n\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"W2\": W2,\n \"W3\": W3,\n \"W4\": W4,\n \"W5\": W5\n }\n\n return parameters", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k * 2],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k * 2],\n initializer=self.initializer)", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def _initialize_parameters(self):\n self.ent_emb = tf.get_variable('ent_emb', shape=[len(self.ent_to_idx), self.k],\n initializer=self.initializer)\n self.rel_emb = tf.get_variable('rel_emb', shape=[len(self.rel_to_idx), self.k],\n initializer=self.initializer)", "def __init__(self, parameters, learning_rate):\n self.parameters = parameters\n self.learning_rate = learning_rate", "def initialize_parameters(X, Y, nb_units_per_hidden_layer):\n # Your code here\n np.random.seed(1)\n params = {}\n L = len(nb_units_per_hidden_layer)\n params['W' + str(1)] = np.random.randn(nb_units_per_hidden_layer[0],X.shape[0] ) * 0.05\n params['b' + str(1)] = np.zeros((nb_units_per_hidden_layer[0], 1))\n\n for i in range(1, L):\n params['W' + str(i+1)] = np.random.randn(nb_units_per_hidden_layer[i], nb_units_per_hidden_layer[i - 1]) * 0.01\n params['b' + str(i+1)] = np.zeros((nb_units_per_hidden_layer[i], 1))\n params['W' + str(L+1)]= np.random.randn(1, nb_units_per_hidden_layer[L-1]) * 0.05\n params['b' + str(L+1)]= np.zeros((1,1))\n return params\n # raise NotImplementedError", "def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])", "def __init__(self, **kwargs):\n\n super(MLP, self).__init__()\n\n # TODO: why lbfgs and not adam?\n self.solver = kwargs.pop('solver', 'lbfgs')\n self.alpha = kwargs.pop('alpha', 1e-5)\n self.random_state = kwargs.pop('random_state', 1)\n\n # determine if the MLP can be initialized or not\n self.clf = None\n self.hidden_layer_sizes = kwargs.pop('hidden_layer_sizes', -1)\n if not (self.hidden_layer_sizes == -1):\n self.initMLPClassifier(**kwargs)", "def initialize_model_params():\n beta_0 = np.array([0., 0.])\n mu_0 = 0.\n return beta_0, mu_0", "def __init__(self, lam=1.0):\n self.lam = lam\n\n # these are set in fit\n self.b = None # float\n self.w = None # (nvars, ) array", "def param_init_lstm(options, params, prefix='lstm'):\n W = numpy.concatenate([ortho_weight(options['dim_proj']),\n ortho_weight(options['dim_proj']),\n ortho_weight(options['dim_proj']),\n ortho_weight(options['dim_proj'])], axis=1)\n params[_p(prefix, 'W')] = W\n U = numpy.concatenate([ortho_weight(options['dim_proj']),\n ortho_weight(options['dim_proj']),\n ortho_weight(options['dim_proj']),\n ortho_weight(options['dim_proj'])], axis=1)\n params[_p(prefix, 'U')] = U\n b = numpy.zeros((4 * options['dim_proj'],))\n params[_p(prefix, 'b')] = b.astype(config.floatX)\n\n return params", "def initialize_parameters(n_a,n_x,n_y):\n np.random.seed(1)\n Wax=np.random.randn(n_a,n_x)*0.01 #input to hidden\n Waa=np.random.randn(n_a,n_a)*0.01 #hidden to hidden\n Wya=np.random.randn(n_y,n_a)*0.01 #hidden to output\n b=np.zeros((n_a,1)) #hidden bias\n by=np.zeros((n_y,1)) #output bias\n \n parameters={\"Wax\":Wax,\"Waa\":Waa,\"Wya\":Wya,\"b\":b,\"by\":by}\n return parameters", "def _initialize_model(rngs):\n init_model_state, init_params = model_def.init(\n rngs, *dummy_input, train=False, debug=False).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if config.get('init_head_bias', None) is not None:\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')", "def __init__(self,args, variance_epsilon=1e-12):\n super(BERTLayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(args.input_dim))\n self.beta = nn.Parameter(torch.zeros(args.input_dim))\n self.variance_epsilon = variance_epsilon", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def custom_init(init_params, seed=0):\n import numpy as np\n new_params = []\n rng = jax.random.PRNGKey(seed)\n i = 0\n number_layers = len([0 for l1 in init_params if len(l1) != 0])\n for l1 in init_params:\n if (len(l1)) == 0: new_params.append(()); continue\n new_l1 = []\n for l2 in l1:\n if len(l2.shape) == 1:\n # Zero init biases\n new_l1.append(jnp.zeros_like(l2))\n else:\n n = max(l2.shape)\n first = int(i == 0)\n last = int(i == number_layers - 1)\n mid = int((i != 0) * (i != number_layers - 1))\n mid *= i\n\n std = 1.0 / np.sqrt(n)\n std *= 2.2 * first + 0.58 * mid + n * last\n\n if std == 0:\n raise NotImplementedError(\"Wrong dimensions for MLP\")\n\n new_l1.append(jax.random.normal(rng, l2.shape) * std)\n rng += 1\n i += 1\n\n new_params.append(new_l1)\n\n return new_params", "def initialize(self):\n\n\t\tparameters = {}\n\t\tL = len(self.layer_dims) # number of layers in the network\n\n\t\tfor l in range(1, L):\n\t\t\tparameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1]) * 0.01\n\t\t\tparameters['b' + str(l)] = np.zeros((self.layer_dims[l], 1))\n\n\t\t\tassert(parameters['W' + str(l)].shape == (self.layer_dims[l], self.layer_dims[l-1]))\n\t\t\tassert(parameters['b' + str(l)].shape == (self.layer_dims[l], 1))\n\n\t\treturn parameters", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def initialize_parameters(layer_dim):\n #tf.set_random_seed(0)\n L= len(layer_dim)\n parameters={}\n for i in range(1,L):\n parameters[\"W\" +str(i)] = tf.get_variable(\"W\"+str(i), [layer_dim[i],layer_dim[i-1]], initializer = tf.contrib.layers.xavier_initializer(seed=1))\n parameters[\"b\" +str(i)] = tf.get_variable(\"b\" +str(i),[layer_dim[i],1],initializer= tf.zeros_initializer())\n assert(parameters['W' + str(i)].shape == (layer_dim[i], layer_dim[i-1]))\n assert(parameters['b' + str(i)].shape == (layer_dim[i], 1))\n return parameters", "def reset_parameters(self):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n nn.init.normal_(self.embed.weight, mean=0.0, std=self.d_model ** -0.5)\n nn.init.constant_(self.embed.weight[self.pad], 0)\n if self.output is not None and not self.tie_embedding:\n nn.init.xavier_uniform_(self.output.weight)\n nn.init.constant_(self.output.bias, 0.0)", "def _set_train_params(self,\n lr: float = 1e-3,\n l2norm: float = 1e-2,\n ):\n self.lr = lr\n self.l2norm = l2norm\n self.optimizer = torch.optim.Adam(\n self.model.parameters(), lr=lr, weight_decay=l2norm)", "def __init__(self, initial_lr: float, k: float):\n super().__init__()\n self.initial_lr = initial_lr\n self.k = k", "def init_parameters(self):\n # Create the weights and biases\n for i in range(1, len(self.layer_dimensions)):\n # Initialization from He et al.\n mu = 0\n var = 2 / self.layer_dimensions[i]\n sigma = np.sqrt(var)\n weight_shape = (self.layer_dimensions[i - 1], self.layer_dimensions[i])\n weight = np.random.normal(loc=mu, scale=sigma, size=weight_shape)\n bias = np.zeros((self.layer_dimensions[i], ))\n\n # Saving in the parameters dict\n layer_weight = \"w_\" + str(i)\n self._parameters[layer_weight] = weight\n layer_b = \"b_\" + str(i)\n self._parameters[layer_b] = bias", "def init_params(self):\n self.conv = Conv(self.conv_layers[0][-1], self.out_channels, padding=self.padding,stride=self.stride)\n self.W = torch.randn(self.num_labels, self.cout_numel, requires_grad=True)\n self.T = torch.randn(self.num_labels, self.num_labels, requires_grad=True)", "def parameter_initialization(self):\n dictsize = settings.PARS.get('numBases')\n numClass = self.train_labels.shape[0] # number of objects\n Dinit = np.empty((self.train_feats.shape[0], 0)) # for C-Ksvd and D-Ksvd\n dictLabel = np.empty((numClass, 0), dtype=np.int)\n numPerClass = dictsize//numClass\n param1 = {\n 'mode': 2,\n 'K': settings.PARS.get('numBases'), # size of the dictionary\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'iter': settings.PARS.get('iterationini')\n }\n param2 = {\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'mode': 2\n }\n\n for classid in range(numClass):\n col_ids = np.array(np.nonzero(self.train_labels[classid, :] == 1)).ravel()\n # ensure no zero data elements are chosen\n data_ids = np.array(np.nonzero(np.sum(self.train_feats[:, col_ids]**2, axis=0) > 1e-6)).ravel()\n\n # Raising an error if any zero lement is found\n if col_ids.shape[0] != data_ids.shape[0]:\n raise DatasetZeroElementFound\n\n # Initilization for LC-KSVD (perform KSVD in each class)\n Dpart = self.train_feats[:, col_ids[np.random.choice(data_ids, numPerClass, replace=False)]]\n param1['D'] = Dpart # initial dictionary\n Dpart = trainDL(self.train_feats[:, col_ids[data_ids]], **param1)\n Dinit = np.c_[Dinit, Dpart]\n labelvector = np.zeros((numClass, 1), dtype=np.int)\n labelvector[classid] = 1\n dictLabel = np.c_[dictLabel, np.tile(labelvector, (1, numPerClass))]\n\n param1['D'] = np.asfortranarray(Dinit) # initial dictionary\n # RuntimeError: matrix arg 10 must be a 2d double Fortran Array\n self.train_feats = self.train_feats if np.isfortran(self.train_feats) else np.asfortranarray(self.train_feats)\n Dinit = trainDL(self.train_feats, **param1)\n Xinit = lasso(self.train_feats, Dinit, **param2)\n\n # learning linear classifier parameters\n tmp = np.linalg.inv(Xinit@Xinit.T+np.eye(*(Xinit@Xinit.T).shape))@Xinit\n Winit = tmp@self.train_labels.T\n Winit = Winit.T\n\n Q = np.zeros((dictsize, self.train_feats.shape[1])) # energy matrix\n\n for frameid in range(self.train_feats.shape[1]):\n label_training = self.train_labels[:, frameid]\n maxid1 = label_training.argmax(0)\n\n for itemid in range(Dinit.shape[1]):\n label_item = dictLabel[:, itemid]\n maxid2 = label_item.argmax(0)\n\n if maxid1 == maxid2:\n Q[itemid, frameid] = 1\n\n Tinit = tmp@Q.T\n Tinit = Tinit.T\n\n return Dinit, Winit, Tinit, Q", "def __init__(self,m):\n # initialize model parameters\n \n # w is the m x 1 vector of weights.\n # m: num of features\n self.w = np.random.rand(m)", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def __init__(self, *args, **kwargs):\n self.classes = [0,1] # (default to 0/1; replace during training)\n self.theta = np.array([]) # placeholder value before training\n\n if len(args) or len(kwargs): # if we were given optional arguments,\n self.train(*args,**kwargs) # just pass them through to \"train\"", "def init_parameters(obj, hyperparameters):\n # Initialize Global Configuration Parameter\n params = hyperparameters['global']\n setattr(obj, 'param', params)\n\n # Initialize Attributes (Pre-Checked Parameters)\n setattr(obj, 'learning_rate', params['learning_rate'])\n setattr(obj, 'loss', params['loss'])\n setattr(obj, 'max_iter', params['max_iter'])\n\n if params['loss'] == 'least_squares':\n setattr(obj, 'num_classes', 1)\n elif params['loss'] in ['binary_crossentropy', 'categorical_crossentropy', 'auto']:\n setattr(obj, 'num_classes', params['num_classes'])\n\n # Initialize Attributes (Optional Values - Based on Default Parameters)\n if 'l2_regularization' not in params or params['l2_regularization'] is None:\n setattr(obj, 'l2_regularization', 0)\n else:\n setattr(obj, 'l2_regularization', params['l2_regularization'])\n\n if 'max_bins' not in params:\n setattr(obj, 'max_bins', 255)\n else:\n setattr(obj, 'max_bins', params['max_bins'])\n\n if 'max_depth' not in params or params['max_depth'] is None:\n setattr(obj, 'max_depth', None)\n else:\n setattr(obj, 'max_depth', params['max_depth'])\n\n if 'max_leaf_nodes' not in params or params['max_leaf_nodes'] is None:\n setattr(obj, 'max_leaf_nodes', 31)\n else:\n setattr(obj, 'max_leaf_nodes', params['max_leaf_nodes'])\n\n if 'min_samples_leaf' not in params or params['min_samples_leaf'] is None:\n setattr(obj, 'min_samples_leaf', 20)\n else:\n setattr(obj, 'min_samples_leaf', params['min_samples_leaf'])\n\n if 'random_state' in params:\n setattr(obj, 'random_state', params['random_state'])\n else:\n setattr(obj, 'random_state', None)\n\n if 'scoring' in params:\n setattr(obj, 'scoring', params['scoring'])\n else:\n setattr(obj, 'scoring', None)\n\n if 'verbose' not in params or params['verbose'] is None:\n setattr(obj, 'verbose', False)\n else:\n setattr(obj, 'verbose', True)\n\n return obj", "def _initialize_model(rngs):\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state", "def initialize_setup(self, init_lr):\n param_list = []\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n param_list.append(param)\n\n self.optimizer = torch.optim.AdamW(\n param_list, lr=init_lr, eps=1e-6)\n\n self.optim_scheduler = get_linear_schedule_with_warmup(\n self.optimizer, num_warmup_steps=0,\n num_training_steps=len(self.train_examples) * self.max_epochs)\n\n if not path.exists(self.model_path):\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n # Try to initialize the mention model part\n if path.exists(self.pretrained_mention_model):\n print(\"Found pretrained model!!\")\n checkpoint = torch.load(self.pretrained_mention_model)\n self.model.load_state_dict(checkpoint['model'], strict=False)\n else:\n logger.info('Loading previous model: %s' % self.model_path)\n # Load model\n self.load_model(self.model_path)", "def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)", "def init_parameters(self):\n stdv = 1. / math.sqrt(self.weight.data.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)", "def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)", "def __init__(self, K=5, alpha=None, lmda=1, n_jobs=8, nr_em_epochs=10):\n self.alpha = alpha\n if self.alpha is None:\n self.alpha = 1./K\n self.lmda = lmda\n\n self.K = K\n\n self.n_jobs = n_jobs\n self.nr_em_epochs = nr_em_epochs", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.params = {'weight': np.random.normal(loc = 0, scale=0.0001, size=(out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n \n self.grads = {'weight': np.zeros((out_features,in_features)),\\\n 'bias': np.zeros((1, out_features))}\n ########################\n # END OF YOUR CODE #\n #######################", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with kaiming_uniform style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if p.dim() == 1:\n nn.init.constant_(p, 0.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', 0.0))\n elif p.dim() in [2, 4]:\n nn.init.kaiming_uniform_(p, mode='fan_in', nonlinearity='relu')\n logger.info('Initialize %s with %s / %.3f' % (n, 'kaiming_uniform', param_init))\n else:\n raise ValueError(n)", "def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3", "def __init__(self, size, parameters):\n\n self.weights = self.init_weights(size)\n self.alpha = parameters['alpha']\n self.epsilon = parameters['epsilon']\n self.gamma = parameters['gamma']\n self.value = 0.0 #np.random.random()", "def __init__(self, *args, **kwargs):\n # Initializing the test & training set\n self._x_train = kwargs['X_train']\n self._y_train = kwargs['Y_train']\n self._x_test = kwargs['X_test']\n self._y_test = kwargs['Y_test']\n\n self.num_iteration = kwargs['num_iteration']\n self.learning_rate = kwargs['learning_rate']", "def __init__(self, in_features, out_features):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n self.in_features = in_features\n self.out_features = out_features\n\n self.__MEAN = 0\n self.__STD = 0.0001\n\n self.params = {\n 'weight': np.random.normal(loc=self.__MEAN, scale=self.__STD, size=(out_features, in_features)), \n 'bias': np.zeros(out_features),\n }\n self.grads = {\n 'weight': None, \n 'bias': None,\n }\n\n self.input_cache = None\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def __init__(self):\n self.num_examples_per_epoch = 99999\n self.optimizer = \"Adam\"\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 0.0001\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0001\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5000", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, train):\n return", "def init(self, parameters, agent_parameters):\n pass", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def initialize(self):\n params = {}\n for i in range(1, len(self.layer_dimensions)):\n params['b_' + str(i)] = np.ones((self.layer_dimensions[i], 1))\n if self.he_initialization:\n params['W_' + str(i)] = np.random.randn(self.layer_dimensions[i],\n self.layer_dimensions[i - 1]) * np.sqrt(\n 2 / self.layer_dimensions[i - 1])\n else:\n params['W_' + str(i)] = np.random.rand(self.layer_dimensions[i], self.layer_dimensions[i - 1]) - 0.5\n return params", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)", "def reset_parameters_lecun(self, param_init=0.1):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def set_initial_params(model: LogisticRegression):\n n_classes = 15 # threat types\n n_features = 33 # Number of features in dataset\n model.classes_ = np.array([i for i in range(15)])\n\n model.coef_ = np.zeros((n_classes, n_features))\n if model.fit_intercept:\n model.intercept_ = np.zeros((n_classes,))", "def inititalize_parameters(self, nodes_of_layers, training_data_size):\n\n\t\tassert(self.layers == len(nodes_of_layers))\n\t\tassert(2 == len(training_data_size))\n\t\tself.w_array = [np.array([0])]\n\t\tself.b_array = [np.array([0])]\n\t\tfeatures, nums = training_data_size\n\n\t\t# initialize the parameters of layer one\n\t\tself.w_array.append(np.random.randn(nodes_of_layers[0], features)\n\t\t\t\t\t\t\t* np.sqrt(1 / nums))\n\t\tself.b_array.append(np.zeros((nodes_of_layers[0], 1)))\n\n\t\tfor layer in range(1, self.layers):\n\t\t\tself.w_array.append(np.random.randn(nodes_of_layers[layer],\n\t\t\t\t\t\t\t\tnodes_of_layers[layer - 1])\n\t\t\t\t\t\t\t\t* np.sqrt(1 / nodes_of_layers[layer - 1]))\n\t\t\tself.b_array.append(np.zeros((nodes_of_layers[layer], 1)))\n\t\treturn self.w_array, self.b_array", "def __init__(self, config, variance_epsilon=1e-12):\r\n super(BertLayerNorm, self).__init__()\r\n self.gamma = nn.Parameter(torch.ones(config.hidden_size))\r\n self.beta = nn.Parameter(torch.zeros(config.hidden_size))\r\n self.variance_epsilon = variance_epsilon", "def __init__(self, modelwithparams=None, random_number=-1, problem_type='infer'):\n self.modelwithparams = modelwithparams\n self.oldpara = self.modelwithparams\n self.random_number = random_number\n self.flag = True\n self.problem_type = problem_type", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def init_paramters(self):\r\n carb_bg_ratio = 5.0\r\n time_to_breakdown = 45.0\r\n insulin_bg_ratio = 50.0\r\n time_to_peak = 45.0\r\n basal_rate = 0.0\r\n digestion_speed = 1.0\r\n activation_speed = 1.0\r\n\r\n # set state to initial\r\n self.S = [self.carb_bg_ratio, self.time_to_breakdown,\r\n self.insulin_bg_ratio, self.time_to_peak,\r\n self.basal_rate, self.digestion_speed,\r\n self.activation_speed]", "def __initialize_nlp(self, nlp):\n nlp[\"nbQ\"] = 0\n nlp[\"nbQdot\"] = 0\n nlp[\"nbTau\"] = 0\n nlp[\"nbMuscles\"] = 0\n nlp[\"plot\"] = {}\n nlp[\"var_states\"] = {}\n nlp[\"var_controls\"] = {}\n nlp[\"CX\"] = self.CX\n nlp[\"x\"] = nlp[\"CX\"]()\n nlp[\"u\"] = nlp[\"CX\"]()\n nlp[\"J\"] = []\n nlp[\"g\"] = []\n nlp[\"g_bounds\"] = []\n nlp[\"casadi_func\"] = {}", "def _init_node_parm(self, key):\n wf_net_conf = WorkFlowNetConfML(key)\n self.model_path = wf_net_conf.model_path\n self.ml_class = wf_net_conf.ml_class\n self.config = wf_net_conf.config\n self.batch_size = 10000\n self.model_type = wf_net_conf.model_type\n\n #Todo 어떻게 꺼내는지 승우씨한테 물어볼것\n _wf_data_conf = wf_data_conf(key.split('_')[0]+'_'+key.split('_')[1]+'_'+'dataconf_node')\n self.data_conf = _wf_data_conf.conf\n self.label = _wf_data_conf.label\n self.cell_feature = _wf_data_conf.cell_feature\n self.cross_cell = _wf_data_conf.cross_cell\n self.extend_cell_feature = _wf_data_conf.extend_cell_feature\n self.label_values = _wf_data_conf.label_values\n\n _wf_data_node = wf_data_node(key.split('_')[0] + '_' + key.split('_')[1] + '_' + 'data_node')\n self.multi_read_flag = _wf_data_node.multi_node_flag\n self.predict_path = _wf_data_node.predict_path", "def __init__(self, lr, eps=1e-6):\n LearningRate.__init__(self, lr)\n\n self.epsilon = eps\n self.parameters = []", "def _setup_from_parameters(self,params):\n\n # SHOULD WE CHECK HERE THAT INPUT PARAMETERS HAVE SAME KP / Z_STAR ?\n\n # copy input dictionary\n self.linP_params=params.copy()\n\n # will add polynomial describing the log power, around kp_kms\n linP_kms_2=0.5*params['alpha_star']\n linP_kms_1=params['n_star']\n A_star=(2*np.pi**2)*params['Delta2_star']/self.kp_kms**3\n linP_kms_0=np.log(A_star)\n linP_kms = np.poly1d([linP_kms_2,linP_kms_1,linP_kms_0])\n # why are we storing this poly1d object? When do we actually use it?\n self.linP_params['linP_kms']=linP_kms", "def default_training_params():\n N_EPOCHS = 100\n BATCH_SIZE = 64\n EPSILON = 0.0001\n return N_EPOCHS, BATCH_SIZE, EPSILON", "def _initial_setup(self, **train_kwargs):\n self._update(time_step=0., **train_kwargs)", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def __init__(self, learning_rate, momentum_coef):\n self.learning_rate = learning_rate\n self.momentum_coef = momentum_coef", "def __init__(self, epochs, **kwargs):\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0", "def __init__(self, params = None, layer_sizes = None, scale = 0.1, rs=npr.RandomState(0)):\n self.layer_sizes = layer_sizes\n\n if params is not None:\n self.param = params\n else:\n if layer_sizes is None:\n raise Exception(\"Please provide the layer sizes\")\n\n self.params = [(scale * rs.randn(m, n), # weight matrix\n scale * rs.randn(n)) # bias vector\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]", "def __init__(self, k=3, min_iter=50, max_iter=1000, learn_rate=1e-7,\n regularization=0.5, convergence_tol=0.001, use_pca=True,\n verbose=False, preprocessor=None):\n self.k = k\n self.min_iter = min_iter\n self.max_iter = max_iter\n self.learn_rate = learn_rate\n self.regularization = regularization\n self.convergence_tol = convergence_tol\n self.use_pca = use_pca\n self.verbose = verbose\n super(_base_LMNN, self).__init__(preprocessor)", "def _set_params_initializer(self, hparams, mode, scope):\n\t\tself.mode = mode\n\t\t# pre_train flag is used for distinguish with pre_train and fine tune\n\t\tif hparams.enable_vae:\n\t\t\t_info('Enable VAE')\n\t\t\tself.enable_vae = True\n\t\t\tself.pre_train = hparams.pre_train\n\t\telse:\n\t\t\tself.enable_vae = False\n\t\t\tself.pre_train = False\n\t\tself.dtype = tf.float32\n\t\tself.global_step = tf.Variable(0, trainable=False)\n\n\t\t# define the input for the model\n\t\tself.encoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='encoder_input_data')\n\t\tself.decoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='decoder_input_data')\n\t\tself.decoder_output_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='decoder_output_data')\n\t\tself.seq_length_encoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None], name='seq_length_encoder_input_data')\n\t\tself.seq_length_decoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None], name='seq_length_decoder_input_data')\n\t\t\n\t\t# load some important hparamters\n\t\tself.unit_type = hparams.unit_type\n\t\tself.num_units = hparams.num_units\n\t\tself.num_encoder_layers = hparams.num_encoder_layers\n\t\tself.num_decoder_layers = hparams.num_decoder_layers\n\t\tself.num_encoder_residual_layers = self.num_encoder_layers - 1\n\t\tself.num_decoder_residual_layers = self.num_decoder_layers - 1\n\n\t\tself.batch_size = tf.size(self.seq_length_encoder_input_data)\n\n\t\t# set initializer\n\t\trandom_seed = hparams.random_seed\n\t\tinitializer = _mh.get_initializer(hparams.init_op, random_seed, hparams.init_weight)\n\t\ttf.get_variable_scope().set_initializer(initializer)\n\n\t\t# embeddings\n\t\tself.src_vocab_size = hparams.src_vocab_size\n\t\tself.tgt_vocab_size = hparams.tgt_vocab_size\n\t\tself.init_embeddings(hparams, scope)", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if p.dim() == 1:\n nn.init.constant_(p, 0.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', 0.0))\n elif p.dim() == 2:\n nn.init.uniform_(p, a=-param_init, b=param_init)\n logger.info('Initialize %s with %s / %.3f' % (n, 'uniform', param_init))\n else:\n raise ValueError(n)", "def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def __init__(self, params: Iterable[nn.Parameter]):\n self.params = params\n self.param_states = [p.requires_grad for p in self.params]", "def set_parameters(self, We1,be1, We2, be2, We3, be3, Wmu, bmu, Wstd, bstd, Wd1, bd1, Wd2, bd2, Wd3, bd3):\r\n self.en_fc1.weight=nn.Parameter(We1)\r\n self.en_fc1.bias=nn.Parameter(be1)\r\n \r\n self.en_fc2.weight=nn.Parameter(We2)\r\n self.en_fc2.bias=nn.Parameter(be2)\r\n \r\n self.en_fc3.weight=nn.Parameter(We3)\r\n self.en_fc3.bias=nn.Parameter(be3)\r\n \r\n self.en_mu.weight=nn.Parameter(Wmu)\r\n self.en_mu.bias=nn.Parameter(bmu)\r\n \r\n self.en_log.weight=nn.Parameter(Wstd)\r\n self.en_log.bias=nn.Parameter(bstd)\r\n \r\n self.de_fc1.weight=nn.Parameter(Wd1)\r\n self.de_fc1.bias=nn.Parameter(bd1)\r\n \r\n self.de_fc2.weight=nn.Parameter(Wd2)\r\n self.de_fc2.bias=nn.Parameter(bd2)\r\n \r\n self.de_fc3.weight=nn.Parameter(Wd3)\r\n self.de_fc3.bias=nn.Parameter(bd3)\r\n \r\n return", "def __init__(self, epsilon=0.05, gamma=0.9, alpha=0.2, numTraining=0, **args):\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n RQLearningAgent.__init__(self, **args)", "def init(X1, Y1, X2, Y2):\n\n global X1_train\n global Y1_train\n global X2_train\n global Y2_train\n \n X1_train, Y1_train, X2_train, Y2_train = X1, Y1, X2, Y2", "def reset_parameters(self):\n init_method = getattr(init, self.initialization)\n for layer in range(self.num_layers):\n fc = self.get_fc(layer)\n init_method(fc.weight.data)\n if self.use_bias:\n init.constant(fc.bias.data, val=0)\n init_method(self.out.weight.data)\n init.constant(self.out.bias.data, val=0)" ]
[ "0.7397329", "0.7190591", "0.7161128", "0.7092822", "0.7059068", "0.6987178", "0.6977974", "0.69458026", "0.6906171", "0.68707854", "0.6857603", "0.6851031", "0.6840059", "0.6775705", "0.6761099", "0.6754401", "0.67539597", "0.6738152", "0.6727141", "0.6707005", "0.6685758", "0.6682182", "0.6677773", "0.66673785", "0.6666603", "0.66618025", "0.66228765", "0.66213286", "0.6601487", "0.6595655", "0.65704423", "0.654193", "0.65355843", "0.6532507", "0.6530382", "0.65248436", "0.65233624", "0.6521719", "0.65200686", "0.6516219", "0.65154815", "0.6493841", "0.6489682", "0.64823204", "0.6456858", "0.6456791", "0.64562136", "0.6455624", "0.64536214", "0.6449677", "0.64430624", "0.6435773", "0.6423606", "0.6421461", "0.64151853", "0.6404124", "0.6404124", "0.6404124", "0.6404124", "0.6404124", "0.6403131", "0.6398341", "0.63917434", "0.63869035", "0.63869035", "0.6384711", "0.6377318", "0.6376916", "0.6366425", "0.6366003", "0.6360926", "0.63609093", "0.6360762", "0.63607585", "0.63536394", "0.6352936", "0.6351905", "0.63472795", "0.63464826", "0.6342971", "0.6335662", "0.63308686", "0.6327804", "0.6327285", "0.6324815", "0.6321916", "0.63216853", "0.63181293", "0.63173825", "0.6304742", "0.63044626", "0.63044626", "0.62993306", "0.6291125", "0.6280146", "0.6276526", "0.62744623", "0.626516", "0.6264186", "0.62626725", "0.6261393" ]
0.0
-1
Compute intrinsic reward for given input
def compute_intrinsic_reward(self, state, action, next_state, use_cuda, train=False): if use_cuda: fn = lambda x: x.cuda() device = "gpu" else: fn = lambda x: x.cpu() device = "cpu" if not self.predictor_dev == device: self.predictor_model = fn(self.predictor_model) self.predictor_dev = device if not self.target_dev == device: self.target_model = fn(self.target_model) self.target_dev = device target_feature = self.target_model(next_state) predict_feature = self.predictor_model(next_state) forward_loss = ((target_feature - predict_feature) ** 2).sum(-1).mean() self.loss = forward_loss if train: self.optimizer.zero_grad() self.loss.backward(retain_graph=True) torch.nn.utils.clip_grad_norm_(self.predictor_model.parameters(), 0.5) self.optimizer.step() return self.eta * forward_loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reward(input):\n state = np.array([input[0], input[1]])\n action = input[2]\n action = np.clip(action, -2.0, 2.0)\n costs = angle_normalize(state[0])**2 + .1 * state[1]**2 + .001 * (action**2)\n\n return - costs", "def _compute_reward(self): \n reward = -1\n return reward", "def _compute_reward(self):\n reward = 0.0\n return reward", "def intrinsic_reward(\n self,\n state: Any,\n algo_step: OrderedDict[str, Any],\n reward: Any,\n terminal: Any,\n next_state: Any\n ) -> Any:\n with torch.no_grad():\n with evaluate(self.normalization_layer):\n return self._get_loss(next_state).item()", "def compute_reward(self, obs, action, state):\n pass", "def get_reward(self):\n\t\t# returns the reward for current state\n\n\t\t#temporary line for testing:\n\t\t#return self.reward_idea() # also not working yet\n\n\t\tcost = self.cost_function()\n\t\tconstraints_violation = self.get_constraints_violation()\n\t\t#old_aug_cost = self.augmented_cost\n\t\t#new_aug_cost = self.get_augmented_cost(cost,constraints_violation)\n\t\t#reward = old_aug_cost - new_aug_cost # reward formula\n\t\t#self.augmented_cost = new_aug_cost # update augmented_cost\n\t\treward = -self.get_augmented_cost(cost,constraints_violation)\n\t\t#print(\"***\\nDEBUG cost: \" +str(cost)+\" constraints_violation: \"+str(constraints_violation))\n\t\t#print(\"DEBUG reward: \"+str(reward))\n\t\t'''\n\t\t#old idea that is probably bad and not necessary:\n\t\tif(self.step_count == 0):\n\t\t\t# old_aug_cost doesn't exist in first step... ACTUALLY IT DOES!\n\t\t\tprint(\"DEBUG step_count == 0, reward would be \"+str(reward))\n\t\t\tprint(\"DEBUG old_aug_cost: \"+str(old_aug_cost) + \" new_aug_cost: \"+str(new_aug_cost) )\n\t\t\treturn 0\n\t\t'''\n\n\t\treturn reward", "def compute_reward(self, state, rl_actions, **kwargs):\n raise NotImplementedError", "def reward(self):\n if self._state is None:\n return 0\n return self.reward_fn(self._state)", "def _get_reward(self):\n if self.status():\n return self.current_step/self.ep_length # the reward is proportional to the duration \n else:\n return 0", "def _compute_reward_(self):\n if self._target_type == \"position\":\n dist = np.linalg.norm(self._target_diff_, ord=2)\n if self._reward_type == \"linear\":\n reward_dist = -dist\n elif self._reward_type == \"precision\":\n reward_dist = -dist +\\\n np.exp( -dist**2 / 0.01)\n elif self._reward_type == \"sparse\":\n if dist < 0.05:\n reward_dist = 0\n else:\n reward_dist = -0.1\n\n elif self._target_type == \"angle\":\n dist = np.linalg.norm(self._target_diff_, ord=1)\n if self._reward_type == \"linear\":\n reward_dist = -dist\n elif self._reward_type == \"precision\":\n reward_dist = -dist +\\\n np.exp(-dist ** 2 / 0.01)\n elif self._reward_type == \"sparse\":\n raise NotImplementedError\n\n # TODO: doublecheck whether '0' or '-1' should be used as the index\n reward_vel = -self._vel_penalty * np.square(self._qd_[-1, self._joint_indices]).sum()\n\n #self.info['reward_dist'] = reward_dist\n #self.info['reward_vel'] = reward_vel\n\n return (reward_dist + reward_vel) * self._dt / 0.008", "def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward", "def compute_reward(self, obs, action, state):\n return self._reward_func(obs, action), state", "def compute_intrinsic_reward(self, next_obs):\r\n next_obs = torch.tensor(next_obs, dtype=torch.float, device=self.device)\r\n #next_obs = torch.FloatTensor(next_obs).to(self.device)\r\n\r\n target_next_feature = self.rnd.target(next_obs)\r\n predict_next_feature = self.rnd.predictor(next_obs)\r\n intrinsic_reward = (target_next_feature - predict_next_feature).pow(2).mean(1) ### MSE --- Issues\r\n #intrinsic_reward = (target_next_feature - predict_next_feature).pow(2).sum(1) / 2\r\n\r\n return intrinsic_reward.data.cpu().numpy()", "def reward(self,\n state: float) -> float:\n raise NotImplementedError", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _compute_reward(self, observations, done):\n raise NotImplementedError()", "def _reward(self, action: Action) -> float:\n raise NotImplementedError", "def reward(self) -> float:\n\n if not self._reward_provided:\n self._reward_provided = True\n return self._reward\n\n return 0", "def reward_function(self):\r\n def R(state, decision, nodes):\r\n return -1.0/1000*nodes['G'].get_preds_value(state)*(decision['G:R_1']+decision['G:L'])\r\n \r\n return R", "def get_reward(self):\n accuracy = self.state\n # Use accuracy as reward for now.\n reward = accuracy\n return reward", "def compute(self, observation=None):\n reward = -1\n\n if self.task.check_distance_threshold(observation):\n reward += 1.0\n\n self.rewards_history.append(reward)\n return reward", "def calculate_reward(self):\n if AT.REWARD not in self.attributes:\n return (0, 1)\n return self.attributes[AT.REWARD].calculate(self)", "def reward(self) -> Reward:\n return self.__reward", "def reward_calc(state, action, setpoint):\r\n return max(-np.square(state - setpoint) - action, -150)", "def forward(self, input, mask, reward):\n input = input.contiguous().view(-1)\n # reward = reward.repeat(mask.shape[1], 1).transpose(0, 1)\n reward = reward.contiguous().view(-1).unsqueeze(1)\n mask = mask.float()\n output = - input * (mask * reward).contiguous().view(-1)\n output = torch.sum(output) / torch.sum(mask)\n return output", "def _get_reward(self, normalized_state, normalized_unconstrained_action, normalized_constrained_action):\n denormalized_unconstrained_charge_rate_in_W = self.denormalize_network_output(normalized_unconstrained_action)\n denormalized_constrained_charge_rate_in_W = self.denormalize_network_output(normalized_constrained_action)\n denormalized_state = normalized_state * self.energy_system.stm_train_subsequent_states_stds + self.energy_system.stm_train_subsequent_states_means\n\n cost_of_net_drawn_electricity = self._get_cost_of_net_drawn_electricity_in_euros(denormalized_state, denormalized_constrained_charge_rate_in_W)\n charge_rate_punishment = self._get_punishment_for_excessive_charge_rate(denormalized_unconstrained_charge_rate_in_W)\n soc_punishment = self._get_punishment_for_impossible_resulting_soc(denormalized_state, denormalized_unconstrained_charge_rate_in_W) \n reward = - cost_of_net_drawn_electricity - charge_rate_punishment - soc_punishment\n #tf.summary.scalar('cost_of_net_drawn_electricity in euros', cost_of_net_drawn_electricity) \n #tf.summary.scalar('reward', reward)\n\n return reward, cost_of_net_drawn_electricity", "def _reward(self):\n\n return 1-self.step_count/ self.max_steps", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n self.minmax = max(self.values) - min(self.values)\n \n \n pass", "def reward(self, observation, action, reward):\n self.iter[action] += 1\n self.expvalue[action] += 1/self.iter[action]*(reward - self.expvalue[action])", "def get_reward(self):\n return self.calc_reward(self.sim.pose[:3], self.sim.v)", "def _get_reward(self, action):\n HIRE_COST = 1 # TODO 7/29/20 - Determine significance of this value\n\n # Lookup the state representation using the cur_state index. Then we\n # can get the candidate productivity score.\n obs = self.observation_function[self.cur_state]\n prod_score = obs[1]\n r = action*(prod_score - HIRE_COST)\n return r", "def reward(self):\n return self._reward", "def intrinsic_reward(self, env_states, constraints, orderings, selected_actions):\n return self._critic_fn(\n controller_states, constraints, orderings, selected_actions)", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n \n pass", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n \n pass", "def reward(self, observation, action, reward):\n self.counts[action] = self.counts[action] + 1\n n = self.counts[action]\n value = self.values[action]\n \n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[action] = new_value\n \n pass", "def _calculate_single_step_reward(\n self,\n state: EnvState,\n action: int,\n transition_function_reward_output: chex.Array,\n params: EnvParams,\n ) -> int:\n cost = jnp.dot(transition_function_reward_output, params.cost_components)\n reward = -1 * cost\n return reward", "def get_reward(self):\n \n pos_error = np.sum(abs(self.sim.pose[:3] - self.target_pos[:3]))\n pos_error = np.log(pos_error)\n z_error = abs(self.sim.pose[2] - self.target_pos[2])\n velocity_error = np.dot(np.subtract(1, np.tanh(self.sim.pose[:3])), self.sim.v)\n reward = 1. - pos_error - 0.02 * z_error\n #reward = 1 - z_error - xy_erro, r/800 - ((1-z_error)*z_v/100) - angv/20\n reward = np.clip(reward, -2, None)\n\n #reward = np.maximum(np.minimum(reward, max_reward), min_reward)\n\n return reward", "def _add_intrinsic_reward(self, observation, extrinsic_reward):\n intrinsic_reward = self.intrinsic_model.compute_intrinsic_reward(\n observation, self.training_steps, self.eval_mode)\n reward = extrinsic_reward + intrinsic_reward\n\n if self._clip_reward:\n intrinsic_reward = np.clip(intrinsic_reward, -1., 1.)\n reward = np.clip(reward, -1., 1.)\n if (self.summary_writer is not None and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='Train/ExtrinsicReward',\n simple_value=extrinsic_reward),\n tf.Summary.Value(tag='Train/IntrinsicReward',\n simple_value=intrinsic_reward),\n tf.Summary.Value(tag='Train/TotalReward',\n simple_value=reward)\n ])\n self.summary_writer.add_summary(summary, self.training_steps)\n\n return float(reward)", "def reward(self, reward):\r\n return np.sign(reward)", "def get_reward_function(self):\n R_fn = np.zeros(self.n_states)\n R_fn[0] = 1.0\n\n return R_fn", "def reward(self, history_id, reward):\n pass", "def calculate_reward(self):\n vel = self.agent.get_linear_velocity()[:2]\n pos = self.agent.get_position()[:2]\n dist = np.linalg.norm(pos)\n # position vector and optimal velocity are orthogonal to each other:\n # optimal reward when position vector and orthogonal velocity\n # point into same direction\n vel_orthogonal = np.array([-vel[1], vel[0]])\n r = 0.1*np.dot(pos, vel_orthogonal)/(1+np.abs(dist-self.circle.radius))\n r += 0.01 * self.agent.specific_reward()\n return r", "def compute(self, observation):\n observation = observation[\"observation\"] if isinstance(observation, dict) else observation\n o1 = observation[0:3] if self.env.reward_type != \"2dvu\" else observation[0:int(len(observation[:-3])/2)]\n o2 = observation[3:6] if self.env.reward_type != \"2dvu\" else observation[int(len(observation[:-3])/2):-3]\n reward = self.calc_dist_diff(o1, o2)\n self.task.check_distance_threshold(observation=observation)\n self.rewards_history.append(reward)\n return reward", "def reward_func(self, state):\n if abs(state['theta']) < 1:\n return 2.0\n\n elif abs(state['theta']) < 3:\n return 0.0\n\n elif abs(state['theta']) > 30:\n return -100.0\n return -2.0", "def get_reward(self, state, action, next_state, absorbing):\n raise NotImplementedError", "def _reward(self, a):\r\n\r\n xrel = self._body_coord()[0] - self.goal\r\n dist = np.sum(xrel ** 2)\r\n return (\r\n - self.cx * dist / (np.sqrt(dist) + 1) - self.cu * np.sum(a ** 2)\r\n )", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def reward(self, reward):\n return np.sign(reward)", "def process_reward(self, reward):\n return reward", "def get_reward(self, i_state, action, is_execute_success, coverage_vector):\n pass", "def calculateReward(self, bonus=0):\n reward = -1 + bonus\n\n if ((self.last_system_action[\"action\"]==\"sugg-all\" and self.last_user_action[\"action\"]!=\"eli-sugg-all\") or \n (\"change-page\" in self.last_system_action[\"action\"] and self.last_user_action[\"action\"]!='change-page')):\n reward -= .3\n self.sugg_penalty-=.3\n\n if \"info-all\" in self.last_system_action[\"action\"] and \"info-all\" not in self.last_user_action[\"action\"]:\n reward -= .5\n self.info_penalty -=.5\n\n if self.training:\n new_position = self.dataset.getPosition(self.user.constraints['target_function'])\n if new_position < self.previous_position:\n reward += 5*((self.previous_position-new_position)/self.nb_items)\n self.previous_position = new_position\n\n elif self.wasCommandIgnored(self.last_user_action['action'],self.last_system_action['action']):\n reward -= 10\n\n if self.last_user_action[\"action\"]==\"dont-know\":\n reward -= 2\n\n if \"system_answered\" in self.last_user_action:\n reward += .7\n\n if self.last_system_action[\"action\"] == \"START\" :\n reward -= 20\n\n if self.last_system_action[\"action\"] in [\"info\", \"info-all\"] and self.current_function==None:\n reward -= 20\n\n\n return reward", "def get_reward(self, is_terminal_state, result, reward, is_agent_move):\n if is_terminal_state:\n reward += 0 if result == \"Tie\" else 10 if is_agent_move else -10\n #elif (not is_terminal_state and is_agent_move):\n else:\n reward += -1\n\n return reward", "def calculate_reward(self):\n cur_dist = self.get_xy_distance()\n reward = self.old_dist - cur_dist + 0.01 * self.agent.specific_reward()\n self.old_dist = cur_dist\n return reward", "def __generate_reward_function(self):\n K = -3\n self.reward = np.array([[10, 0, K],\n [0, 2, 0],\n [K, 0, 10]])", "def get_reward(self, observations, actions):\n\n #initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if(len(observations.shape)==1):\n observations = np.expand_dims(observations, axis = 0)\n actions = np.expand_dims(actions, axis = 0)\n batch_mode = False\n else:\n batch_mode = True\n\n #get vars\n xvel = observations[:, 9].copy()\n body_angle = observations[:, 2].copy()\n front_leg = observations[:, 6].copy()\n front_shin = observations[:, 7].copy()\n front_foot = observations[:, 8].copy()\n zeros = np.zeros((observations.shape[0],)).copy()\n\n # ranges\n leg_range = 0.2\n shin_range = 0\n foot_range = 0\n penalty_factor = 10\n\n #calc rew\n self.reward_dict['run'] = xvel\n\n front_leg_rew = zeros.copy()\n front_leg_rew[front_leg>leg_range] = -penalty_factor\n self.reward_dict['leg'] = front_leg_rew\n\n front_shin_rew = zeros.copy()\n front_shin_rew[front_shin>shin_range] = -penalty_factor\n self.reward_dict['shin'] = front_shin_rew\n\n front_foot_rew = zeros.copy()\n front_foot_rew[front_foot>foot_range] = -penalty_factor\n self.reward_dict['foot'] = front_foot_rew\n\n # total reward\n self.reward_dict['r_total'] = self.reward_dict['run'] + self.reward_dict['leg'] + self.reward_dict['shin'] + self.reward_dict['foot']\n\n #return\n dones = zeros.copy()\n if(not batch_mode):\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones", "def get_current_reward(self, state):\n if state == 1:\n return 1.0 + self.rng.normal(scale=self.terminal_reward_stdev)\n else:\n return 0.0", "def _get_reward(self):\n\n # compute reward depending on the radius and distance to target\n radius_reward = -(self.radius_sheep*0.9)/self.init_sheep_root\n target_reward = -(self.target_distance*0.9)/self.init_sheep_root\n\n reward = target_reward + radius_reward\n\n # ensure it is always an array\n if not type(reward) is np.ndarray:\n reward = np.array([reward])\n return reward[0]", "def step_reward(self) -> Tuple[float, dict]:\n unserved_reward = -self.unserved_penalty * self.state[\"real_power_unserved\"]**2\n peak_reward = -self.peak_penalty * \\\n max(0, self.state[\"real_power_consumed\"] - self.peak_threshold)**2\n reward = unserved_reward + peak_reward\n reward /= self.reward_scale\n return reward, {\"real_power_unserved\": unserved_reward, \"peak_reward\": peak_reward}", "def reward(self, reward):\n return float(np.sign(reward))", "def _reward(self, action):\n raise NotImplementedError", "def get_reward(self):\n\t\tdist = np.sqrt(np.sum(np.square(np.asarray(self.state) - np.asarray(self.goal))))\n\n\t\tdist_diff = self.prev_dist - dist\n\t\tself.reward = dist_diff * 10\n\n\t\tself.prev_dist = dist", "def reward_func(sample_solution=None):\r\n\r\n # make sample_solution of shape [sourceL x batch_size x input_dim]\r\n sample_solution = tf.stack(sample_solution,0)\r\n\r\n sample_solution_tilted = tf.concat((tf.expand_dims(sample_solution[-1],0),\r\n sample_solution[:-1]),0)\r\n # get the reward based on the route lengths\r\n\r\n\r\n route_lens_decoded = tf.reduce_sum(tf.pow(tf.reduce_sum(tf.pow(\\\r\n (sample_solution_tilted - sample_solution) ,2), 2) , .5), 0)\r\n return route_lens_decoded", "def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0", "def update(self, arm, reward, context):", "def reward_calc(self, reward_traj,V,V_end):\n r_all = np.concatenate((reward_traj,[V_end]),-1)\n V_all = V #np.concatenate((V,[V_end]),-1)\n delta = r_all[:-1] + self.gamma * V_all[1:] - V_all[:-1]\n \n adv = Generalized_Adv_Estimator.discounted_sum(delta,self.gamma*self.lam)\n rtg = adv + V_all[:-1]\n\n adv = adv.astype('float32')\n rtg = rtg.astype('float32')\n\n return adv, rtg", "def get_reward(self, done):\n reward = 0\n self.calc_pos_diff_ratio()\n reward = self.calc_base_reward_2(reward)\n\n return reward", "def calculate_reward(self):\n cur_dist = self.get_xy_distance()\n # reduce agent specific reward such that electricity costs are not\n # higher than moving towards the goal\n reward = self.old_dist - cur_dist + 0.01 * self.agent.specific_reward()\n self.old_dist = cur_dist\n\n return reward", "def _calculate_reward( self, old_state_vars, new_state_vars ):\n if 'total-wealth' == self.objective:\n x_old = old_state_vars.ptf_asset_vals.sum()\n x_new = new_state_vars.ptf_asset_vals.sum()\n elif 'relative-profit' == self.objective:\n x_old = old_state_vars.ptf_asset_vals.sum() - old_state_vars.bmk_asset_vals.sum()\n x_new = new_state_vars.ptf_asset_vals.sum() - new_state_vars.bmk_asset_vals.sum()\n else:\n raise ValueError( 'Unsupported objective: {}'.format(objective) )\n\n # Discount the objective before applying the utility function\n gamma = self.get_gamma()\n reward = self.utility_fun( gamma * x_new ) - self.utility_fun( x_old )\n return reward", "def _calculate_reward(self):\n\n reward = 0.0 # base reward\n\n if self.calc_lane_invasion:\n waypoint = self.world.get_map().get_waypoint(self.vehicle.get_location(), project_to_road=True, lane_type=(carla.LaneType.Driving | carla.LaneType.Shoulder | carla.LaneType.Sidewalk))\n change_allowed = waypoint.lane_change\n left_lane = waypoint.left_lane_marking\n right_lane = waypoint.right_lane_marking\n\n if change_allowed is not carla.LaneChange.Both:\n if (change_allowed is not carla.LaneChange.Right and right_lane in self.lanes_invaded) or (change_allowed is not carla.LaneChange.Left and left_lane in self.lanes_invaded):\n reward += -1\n \n if self._lane_type_invasion(self.lanes_invaded):\n # crossing a lane marker of a lane that shouldn't be crossed\n reward += -1\n \n if self.calc_speed_limit:\n velocity = self.vehicle.get_velocity()\n\n if self.vehicle.get_speed_limit() < math.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2):\n # over speeding\n reward += -1\n\n if self.traffic_light:\n if self.vehicle.is_at_traffic_light() and (self.vehicle.get_control().brake < 0.2):\n # negative reward for not decelarating in a red light\n # perhaps a part should be added to check if the agent crossed the light\n # while it was red and give a more substantial negative reward\n reward += -1\n\n # TODO maybe add a positive reward for speed, some coefficient time speed (velocity below x)\n\n if self.calc_distance:\n # reward for distance traveled\n current_location = self.vehicle.get_location()\n distance = self.last_location.distance(current_location)\n reward += distance # adding a coefficient to the distance to increase importance of distance (make the car prefer moving)\n # if distance <= 0.3 and not self.vehicle.is_at_traffic_light():\n ## small negative reward for not moving, it's large enough to make the car move,\n ## but small enough to have it prioratize not breaking the law\n # reward += -1\n self.last_location = current_location\n\n if self.calc_lane_type:\n # reward for lane type\n current_w = self.map.get_waypoint(self.vehicle.get_location())\n if current_w.lane_type != carla.LaneType.Driving and current_w != carla.LaneType.Sidewalk:\n reward += -1\n elif current_w.lane_type == carla.LaneType.Sidewalk and not current_w.is_junction:\n reward += -1\n \n if self.calc_velocity:\n # reward for velocity\n velocity = self.vehicle.get_velocity()\n speed = 3.6 * math.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2) # speed in km/h\n if speed >= 50:\n reward += 1\n # reward += math.log(speed, 4) if speed <= 65 else 1\n \n if self.calc_rotation:\n vehicle_yaw = self.vehicle.get_transform().rotation.yaw\n lane_yaw = self.map.get_waypoint(self.vehicle.get_location()).transform.rotation.yaw\n yaw_diff = np.abs(vehicle_yaw - lane_yaw)\n if yaw_diff > 180:\n yaw_diff = 360 - yaw_diff # 180 is the max\n reward += -round(yaw_diff / 100, 3)\n \n\n \"\"\"# expirament: trying a very simple reward scheme based on speed:\n velocity_vector = self.vehicle.get_velocity()\n speed = 3.6 * math.sqrt(velocity_vector.x**2 + velocity_vector.y**2 + velocity_vector.z**2) # speed in km/h\n if speed >= 55:\n reward += 1.5\n elif speed >= 25:\n reward += 1.25\n elif speed >= 10:\n reward += 1\"\"\"\n \n return reward", "def get_reward(self):\n #original reward function: reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n thrusts = self.sim.get_propeler_thrust(self.sim.prop_wind_speed)\n linear_forces = self.sim.get_linear_forces(thrusts)\n distance = np.linalg.norm(self.target_pos - self.sim.pose[:3])\n #speed = math.sqrt(np.square(self.sim.find_body_velocity()).sum())\n #with 300x300x300m env, the max distance from one corner to another is 519\n max_distance = 519\n #Focus quadcopter on not crashing but first rewarding an upward linear force until at the height of the target\n if self.sim.pose[2] < self.target_pos[2]:\n #velocity_discount = 1/speed\n reward = np.tanh(linear_forces[2])\n #after getting to the correct z-coordinate, move to the correct y-coordinate\n elif self.sim.pose[1] < self.target_pos[1]:\n #velocity_discount = 1/speed\n reward = 1 + np.tanh(linear_forces[1])\n #finally, after getting rewards for the x and y coordinates, give reward for distance\n #at this stage, the drone will have overshot the x and y coordinates, but it would be in a better area to\n #start searching for the x coordinate\n elif distance > 1 and self.sim.pose[2] > self.target_pos[2] and self.sim.pose[1] > self.target_pos[1] :\n reward = 2 + (1-math.pow((distance/300),.04))\n elif distance < 1:\n self.success = True\n reward = 100\n #possible reward for hover: np.exp(-np.square(linear_forces[2]))\n return reward", "def reward(self, state):\n if state == (self.size-1, self.size-1):\n return 10\n elif state == (self.size-1, self.mid):\n return -1\n elif state == (self.size-1, 0):\n return 0\n elif state == (0, self.size-1):\n return -12\n else:\n return -1", "def _get_reward(self, terminal):\n if not terminal:\n return 0\n\n folded_design, _ = fold(self.design.primary)\n hamming_distance = hamming(folded_design, self.target.dot_bracket)\n if 0 < hamming_distance < self._env_config.mutation_threshold:\n hamming_distance = self._local_improvement(folded_design)\n\n normalized_hamming_distance = hamming_distance / len(self.target)\n\n # For hparam optimization\n episode_info = EpisodeInfo(\n target_id=self.target.id,\n time=time.time(),\n normalized_hamming_distance=normalized_hamming_distance,\n )\n self.episodes_info.append(episode_info)\n\n return (1 - normalized_hamming_distance) ** self._env_config.reward_exponent", "def get_reward(self):\n \n ##get total reward value from Q\n total_reward = 0.0\n for value in self.Q.itervalues():\n total_reward = total_reward + value\n \n ##return the average reward\n return total_reward", "def get_reward(self):\n \n ##get total reward value from Q\n total_reward = 0.0\n for value in self.Q.itervalues():\n total_reward = total_reward + value\n \n ##return the average reward\n return total_reward", "def get_reward(self):\n assert self.env.prev_obs_data is not None\n assert self.env.obs_data is not None\n reward, termination = self.reward_fn(self.env.prev_obs_data,\n self.env.obs_data)\n reward = float(reward)\n termination = bool(termination)\n\n return reward, termination", "def update_Q(self, reward):\n old_estimate = self.q_estimates[self.prev_action]\n self.q_estimates[self.prev_action] = old_estimate + 1/self.N[self.prev_action] * (reward - old_estimate)", "def get_reward(self):\n reward, self.reward = self.reward, 0\n return reward", "def reward(self):\n current_base_position = self.minitaur.GetBasePosition()\n dt = self.control_time_step\n self._current_vel = velocity = (current_base_position[0] - self._last_base_position[0]) / dt\n vel_clip = np.clip(velocity, -self._goal_limit, self._goal_limit)\n velocity_reward = self._goal_vel - np.abs(self._goal_vel - vel_clip)\n\n action = self._past_actions[self._counter - 1]\n prev_action = self._past_actions[max(self._counter - 2, 0)]\n prev_prev_action = self._past_actions[max(self._counter - 3, 0)]\n acc = action - 2 * prev_action + prev_prev_action\n action_acceleration_penalty = np.mean(np.abs(acc))\n\n reward = 0.0\n reward += 1.0 * velocity_reward\n reward -= 0.01 * action_acceleration_penalty # TODO(krshna): lowering acceleration penalty, try 0.01, 0.002\n\n if self._debug:\n self.pybullet_client.addUserDebugText('Current velocity: {:3.2f}'.format(\n self._current_vel), [0, 0, 1], [1, 0, 0])\n return reward", "def _get_reward(self):\n if self.is_game_done:\n return self.price - 1\n else:\n return 0.0", "def calculate_return(list_of_reward, gamma):\n G = 0\n for r in reversed(list_of_reward):\n G = gamma * G + r\n\n return G", "def update(self, reward):\n raise NotImplementedError", "def update_reward(state, reward, max_reward, alpha=1, c=100, gamma=0.9):\n\t\t# update number of actions done so far to this state\n\t\tactions[state] = actions.get(state, 0.0) + 1.0\n\t\t# compute learning rate\n\t\talpha *= c / (c + actions[state])\n\t\trewards[state] = rewards.get(state, 0.0) + alpha*(reward+gamma*max_reward-rewards.get(state, 0.0))", "def compute(self, observation):\n reward = self.calc_dist_diff(observation[0:3], observation[3:6], observation[6:9])\n self.task.check_distance_threshold(observation=observation)\n self.rewards_history.append(reward)\n return reward", "def get_reward(self, state, reward, done):\n\n # simple reward = position. Don't explicitly incentivise agent to take left hill as well as right hill\n # let it figure it out by itself\n if done and self.success:\n return 100\n else:\n return (state[0]-0.5)/3", "def reward(self, player, winning_state):\n if winning_state == \"Tie\":\n return 1\n elif winning_state == \"Resume\":\n return -1\n else:\n if player == \"agent\":\n return 10\n else:\n return -10", "def get_reward(self, state, action, next_state):\n assert action in self.get_possible_actions(\n state), \"cannot do action %s from state %s\" % (action, state)\n x, y = self.__state_to_xy(next_state)\n reward = -1\n if MAP[y][x] == 'G':\n reward = 1\n elif MAP[y][x] == 'H':\n reward = -100\n\n return reward", "def reward(self, env):\n del env\n return 1", "def discount_reward(reward, gamma):\n discount_r = np.zeros_like(reward)\n r_total = 0\n for _ in reversed(range(0, reward.size)):\n if reward[_] != 0:\n r_total = 0\n r_total = r_total * gamma + reward[_]\n discount_r[_] = r_total\n return discount_r", "def reward(self):\n if self.num_steps >= self.timeout:\n return self.fail_penalty\n\n return self.default_reward", "def immediate_reward(state_node):\n #DANIEL: edited the reward function to get rid of the parent argument and will just be a function of the current state\n return state_node.state.reward()", "def get_reward(self, done):\n return self.reward_function(done)", "def get_reward(self, dags, entropies,inputs,targets):\n if not isinstance(entropies, np.ndarray):\n entropies = entropies.data.cpu().numpy()\n\n score=self.get_score(inputs,targets,dags)\n #score=1-self.get_loss(inputs,targets,dags)\n print(score.item())\n R = utils.to_item(score.data)\n\n if self.args.entropy_mode == 'reward':\n rewards = R + self.args.entropy_coeff * entropies.mean()\n elif self.args.entropy_mode == 'regularizer':\n rewards = R * np.ones_like(entropies)\n else:\n raise NotImplementedError(f'Unkown entropy mode: {self.args.entropy_mode}')\n\n return rewards" ]
[ "0.83368236", "0.76844054", "0.7681588", "0.7373235", "0.72947526", "0.7241712", "0.72022116", "0.7160538", "0.7067619", "0.7058791", "0.70559245", "0.7047685", "0.70335954", "0.70237744", "0.70136887", "0.70136887", "0.70136887", "0.70136887", "0.70136887", "0.70136887", "0.6973559", "0.6955224", "0.69331723", "0.6895637", "0.6874078", "0.6863332", "0.6859877", "0.6851922", "0.6832945", "0.6823229", "0.68157864", "0.67605245", "0.67452484", "0.6741461", "0.6716172", "0.669558", "0.66867626", "0.66810906", "0.66810906", "0.66810906", "0.6670113", "0.6642092", "0.6629265", "0.66231436", "0.66171163", "0.66157573", "0.65990937", "0.6580491", "0.65672743", "0.65646285", "0.65305746", "0.6530326", "0.6530326", "0.6530326", "0.6530326", "0.6530326", "0.65288", "0.65213645", "0.65206873", "0.6485463", "0.6482084", "0.6478115", "0.64780104", "0.6456262", "0.64536405", "0.64363354", "0.6433227", "0.6427502", "0.6427426", "0.64186966", "0.64149696", "0.6414704", "0.6414283", "0.64137", "0.641099", "0.6403184", "0.63885397", "0.637653", "0.6366106", "0.63652045", "0.63545024", "0.63545024", "0.633437", "0.6324533", "0.6307949", "0.6302563", "0.6301828", "0.630116", "0.6281228", "0.62811", "0.6275956", "0.62686163", "0.6233884", "0.6222415", "0.6205588", "0.6203525", "0.6190591", "0.6188009", "0.6183564", "0.61685187" ]
0.6941467
22
Get losses of last computation if existing
def get_losses(self): if self.loss is not None: return [self.loss] else: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def losses(self):\n pass", "def compute_loss(self):", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def compute_loss(self, obs, returns):", "def get_current_loss(self):\n return sum(self.recent_loss_array)/sum(self.recent_loss_bs_array)", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def _compute_loss(self):\n state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)\n\n state = torch.FloatTensor(state)\n next_state = torch.FloatTensor(next_state)\n action = torch.LongTensor(action)\n reward = torch.FloatTensor(reward)\n done = torch.FloatTensor(done)\n\n q_values = self.dqn(state)\n q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)\n\n next_q_values = self.target_dqn(next_state)\n next_q_value = next_q_values.max(1)[0]\n target = reward + self.discount_factor * next_q_value * (1 - done)\n\n # loss = F.smooth_l1_loss(q_value, target.detach())\n loss = F.mse_loss(q_value, target.detach())\n\n return loss", "def _get_loss(self):\n raise NotImplementedError", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def losses(self):\n losses = []\n for layer in self.layers:\n losses += layer.losses\n if context.in_eager_mode():\n return losses\n\n relevant_inputs = self.inputs or []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs,\n losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))", "def get_loss(self):\n raise NotImplementedError", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def get_loss(self):\n return self.loss / self.cnt", "def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss", "def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)", "def compute_losses(self):\n cycle_consistency_loss_a = \\\n self._lambda_a * losses.cycle_consistency_loss(\n real_images=self.input_a, generated_images=self.cycle_images_a,\n )\n cycle_consistency_loss_b = \\\n self._lambda_b * losses.cycle_consistency_loss(\n real_images=self.input_b, generated_images=self.cycle_images_b,\n )\n\n lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)\n lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)\n\n g_loss_A = \\\n cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b\n g_loss_B = \\\n cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a\n\n d_loss_A = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_a_is_real,\n prob_fake_is_real=self.prob_fake_pool_a_is_real,\n )\n d_loss_B = losses.lsgan_loss_discriminator(\n prob_real_is_real=self.prob_real_b_is_real,\n prob_fake_is_real=self.prob_fake_pool_b_is_real,\n )\n\n optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)\n\n self.model_vars = tf.trainable_variables()\n\n d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]\n g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]\n d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]\n g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]\n\n self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)\n self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)\n self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)\n self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)\n\n for var in self.model_vars:\n print(var.name)\n\n # Summary variables for tensorboard\n self.g_A_loss_summ = tf.summary.scalar(\"g_A_loss\", g_loss_A)\n self.g_B_loss_summ = tf.summary.scalar(\"g_B_loss\", g_loss_B)\n self.d_A_loss_summ = tf.summary.scalar(\"d_A_loss\", d_loss_A)\n self.d_B_loss_summ = tf.summary.scalar(\"d_B_loss\", d_loss_B)", "def loss_op(self):\n return self.loss", "def loss(self):\n return self._loss", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def calculate_loss(self, output, batch):\n\n detailed_loss = {}\n for loss_func_key, this_loss_func, weight in self.loss_funcs:\n this_loss = this_loss_func(output, batch) * weight\n detailed_loss[loss_func_key] = this_loss\n loss = sum(detailed_loss.values())\n return loss, detailed_loss", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_loss(self, batch, y_next_true):\n\n # Get the output of the gru layer for the input which serves as input to the reconstruction + forecasting model\n gru_output = self.model(batch, training=True)\n\n # Forecasting model loss calculation\n # Using mse yields the same result as RMSE and is more stable\n y_next_pred = self.model.forecasting_model(gru_output, training=True)\n y_next_pred = y_next_pred[:, -1, :] # only get the prediction for the last timestamp\n\n mse_for = tf.keras.losses.MeanSquaredError()\n loss_for = mse_for(y_next_true, y_next_pred)\n\n # Reconstruction model loss calculation\n # Like VAE based on: https://bit.ly/3oRMiQz\n mse_rec = tf.keras.losses.MeanSquaredError()\n reconstructed_output = self.model.reconstruction_model(gru_output)\n reconstruction_target = gru_output if 'reconstruct_gru' in self.hyper.variants else batch\n\n loss_rec = mse_rec(reconstruction_target, reconstructed_output)\n loss_rec += sum(self.model.reconstruction_model.losses) # Add KLD regularization loss\n\n # Overall loss\n loss = loss_for + loss_rec\n\n return loss", "def get_current_validation_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name+'_val'] = float(getattr(self, 'loss_' + name + '_val')) # float(...) works for both scalar tensor and float number\n return errors_ret", "def _build_loss(self, results, features, labels):\n losses, loss = getters.get_loss(\n self.loss.IDENTIFIER, results, labels, **self.loss.to_dict())\n self._loss = loss\n self._losses = losses\n\n other_losses = get_tracked(tf.GraphKeys.REGULARIZATION_LOSSES)\n if other_losses:\n loss = [loss] + other_losses\n loss = tf.add_n(loss, name=\"TotalLoss\")\n self._total_loss = loss\n return losses, loss", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()", "def get_loss(self, inputs, outputs, add_summary=True):\n cfg = self.cfg()\n torch.autograd.set_detect_anomaly(True)\n # g_loss = tf.zeros(dtype=tf.float32, shape=[])\n g_loss = self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n r_loss = self.regularization_loss(cfg)\n# print(g_loss, r_loss)\n g_loss += r_loss\n # if cfg.proj_weight:\n # g_loss += self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n\n # if cfg.drc_weight:\n # g_loss += add_drc_loss(cfg, inputs, outputs, cfg.drc_weight, add_summary)\n #\n # if cfg.pc_rgb:\n # g_loss += add_proj_rgb_loss(cfg, inputs, outputs, cfg.proj_rgb_weight, add_summary, self._sigma_rel)\n #\n # if cfg.proj_depth_weight:\n # g_loss += add_proj_depth_loss(cfg, inputs, outputs, cfg.proj_depth_weight, self._sigma_rel, add_summary)\n #\n # if add_summary:\n # tf.contrib.summary.scalar(\"losses/total_task_loss\", g_loss)\n\n return g_loss", "def log_losses(self, epoch):\n logger.info(\"At epoch {}. Train Loss: {}\".format(epoch, self.training_losses[-1]))", "def getLoss(self, x_test, t_test):\n x_t = Variable(x_test, requires_grad=False)\n #Feed inputes into neural network\n t_pred = self.model(x_t)\n #Now lets compute out loss\n loss = self.loss_fn(t_pred, t_test)\n return loss", "def compute_loss(self, targets, logits, seq_length):\n\n\t\twith tf.name_scope('evaluate_loss'):\n\t\t\tloss, norm = self.loss_computer(targets, logits, seq_length)\n\t\t\t\n\t\treturn loss, norm", "def calc_loss(self, x: np.ndarray, y: np.ndarray) -> float:\n return self.descent.calc_loss(x, y)", "def calculate_loss(self, output, batch, training_context, last_activation=None):\n if self._model_loss_key is None:\n return output\n else:\n return output[self._model_loss_key]", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def calculate_loss(self, X, y):\n probs = self.predict(X)\n\n num_examples = X.shape[0]\n\n sub = np.subtract(probs, y)\n abs_sum = np.abs(sub)\n sm = np.sum(abs_sum)\n loss = 1 - sm / num_examples\n print(\"Current loss: [ \" + str(\"{:6.5f}\").format(loss) + \" ]\")\n return loss", "def get_loss(self, Loss, results, inputs, device):\n return", "def record_losses_conflict(self, X,Y):\n _, _, dissimilar_features_dict = self.predict(X)\n decorrelation_loss = self.compute_decorrelation_loss(dissimilar_features_dict)\n decorrelation_loss.backward()\n grads_decorrelation = self.get_gradients()\n for p in self.parameters(): p.grad = None # erase gradients\n\n scores, _, _ = self.predict(X)\n CE_loss = self.criterion(scores, Y) # loss of the current batch\n CE_loss.backward()\n grads_classification = self.get_gradients()\n for p in self.parameters(): p.grad = None # erase gradients\n\n\n for signal in self.signals_list:\n l_class_norm = torch.norm(grads_classification[signal])\n self.losses_norms[signal]['classification'].append(l_class_norm)\n l_decor_norm = torch.norm(grads_decorrelation[signal])\n self.losses_norms[signal]['decorrelation'].append(l_decor_norm)\n\n dot_product = torch.dot(grads_decorrelation[signal], grads_classification[signal])\n self.cosine_sim[signal].append(dot_product/(l_class_norm*l_decor_norm))", "def get_loss(self, batch_x, batch_y, return_preds=False):\n preds, reprs = self.call(batch_x)\n err = preds - batch_y\n loss = 0.5 * tf.square(err)\n if return_preds:\n return loss, err, preds, reprs\n else:\n return loss, err", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def get_loss(y_):\n m_ = tf_lab_q.dequeue_many(BATCH_SIZE)\n y_m_ = tf.mul(y_, ones_)\n y_diff_ = tf.sub(y_m_, tf.transpose(y_m_))\n t_1_ = -tf.mul(0.95*ones_, y_diff_)\n t_2_ = tf.log(ones_ + tf.exp(y_diff_))\n sum_ = tf.add(t_1_, t_2_)\n mult_sum_ = tf.mul(m_, sum_)\n loss_ = tf.reduce_sum(mult_sum_) / tf.reduce_sum(m_)\n return loss_, m_", "def calculate_loss(model, t, logits, labels):\n model_para = model.get_paramaters_list_reshape()\n myTF.calculate_para_dependence_loss(model_para,t)\n\n myTF.calculate_cross_entropy_loss(logits, labels)\n\n return tf.add_n(tf.get_collection('losses'), name='loss_total')", "def loss(self) -> KernelLoss:\n return self._loss", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def training_losses(self):\r\n if self._training_losses is None:\r\n # Builds the per-task metrics and losses.\r\n # This the total summed training loss of tasks in the joint training.\r\n self._training_losses = dict(\r\n total_loss=tf.keras.metrics.Mean(\"training_loss\", dtype=tf.float32))\r\n for name in self.multi_task.tasks:\r\n self._training_losses[name] = tf.keras.metrics.Mean(\r\n \"training_loss\", dtype=tf.float32)\r\n return self._training_losses", "def __call__(self, *args, return_losses=False, **kwargs):\n self._losses_dict = {}\n results = super().__call__(*args, **kwargs)\n if not return_losses:\n return results\n else:\n self._losses_dict['total_loss'] = tf.reduce_sum(\n list(self._losses_dict.values()))\n return results, self._losses_dict", "def get_current_losses(self) -> Dict[str, float]:\n errors_ret = dict()\n for name, loss in self.losses.items():\n if isinstance(name, str):\n errors_ret[name] = float(loss)\n return errors_ret", "def backward_val(self):\n self.loss_similarity = [NCC(warped_img, self.batch_fixed) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [NCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:]) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def get_loss(self, combined_loss):\n loss_dict = {'iou_loss': combined_loss[0].item(),\n 'emotion_loss': combined_loss[1].item()}\n self.iou = 1 - combined_loss[0].item()\n return combined_loss[0] + combined_loss[1], loss_dict", "def _build_loss(self):\n self._build_loss_D()\n self._build_loss_G()\n tf.add_to_collection('losses', self.loss_D)\n tf.add_to_collection('losses', self.loss_G)", "def loss(self):\n return self._get(\"loss\")", "def get_loss(self, y_target, pred=None):\n last_model = self.models[-1]\n if not isinstance(last_model, ModelWithLoss):\n raise TypeError(f\"The last model in the pipeline has to e an instance of 'ceml.model.ModelWithLoss' but not of {type(last_model)}\")\n\n return last_model.get_loss(y_target, pred)", "def biggestRecentLoss(losses, memory=3):\n memory += 1\n if len(losses) < memory:\n lookback = len(losses)\n else:\n lookback = memory\n oldlosses = losses[-lookback:]\n oldloss = max(oldlosses)\n return oldloss", "def _compute_loss(self, state, action, reward, next_state, done):\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n q_value = q_values[action]\n\n next_state = torch.FloatTensor(next_state)\n next_q_values = self.dqn(next_state)\n next_q_value = next_q_values.max()\n\n if done:\n target = reward\n else:\n target = reward + self.discount_factor * next_q_value\n\n loss = (q_value - target).pow(2).mean()\n\n return loss", "def compute_loss(self, o, y):\n if self.regression:\n return (o - y)**2\n else:\n if y > 0:\n return -np.log(o)\n else:\n return -np.log(1-o)", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def _get_loss(self, states):\n states = self.normalization_layer(states)\n rnd_pred = self.rnd(states)\n\n with torch.no_grad():\n rnd_target = self.rnd_target(states)\n\n rnd_loss = self.rnd_loss_func(rnd_pred, rnd_target)\n\n return rnd_loss", "def compute_loss(self, model, inputs, return_outputs=False):\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss", "def get_loss(self, xb1, xb2):\n feed = {tbn('xb1:0'): xb1,\n tbn('xb2:0'): xb2,\n tbn('is_training:0'): False}\n\n losses = self.sess.run(tf.get_collection('losses'), feed_dict=feed)\n\n lstring = ' '.join(['{:.3f}'.format(loss) for loss in losses])\n\n return lstring", "def gather_loss(clone, regularization_losses):\n # The return value.\n sum_loss = None\n # Individual components of the loss that will need summaries.\n regularization_loss = None\n # Compute and aggregate losses on the clone device.\n # with tf.device('/device:CPU:0'):\n all_losses = []\n clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, scope=None)\n if clone_losses:\n clone_loss = tf.add_n(clone_losses, name='clone_loss')\n\n all_losses.append(clone_loss)\n if regularization_losses:\n regularization_loss = tf.add_n(regularization_losses,\n name='regularization_loss')\n all_losses.append(regularization_loss)\n if all_losses:\n sum_loss = tf.add_n(all_losses)\n # tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)\n if regularization_loss is not None:\n tf.summary.scalar('regularization_loss', regularization_loss)\n return sum_loss", "def loss(self, data):\n loss, smoothed, lr = data\n\n curves = []\n\n curve_keys = ['color', 'linestyle', 'linewidth', 'alpha']\n\n if loss is not None:\n loss_name = self.config.get('label', f\"loss #{self.index + 1}\")\n loss_label = f'{loss_name} ⟶ {loss[-1]:2.3f}'\n final_window = self.config.get('final_window', None)\n if final_window is not None:\n final_window = min(final_window, len(loss))\n final = np.mean(loss[-final_window:])\n loss_label += f\"\\nmean over last {final_window} iterations={final:2.3f}\"\n\n loss_config = self.config.filter(keys=curve_keys, prefix='curve_')\n loss_curve = self.ax.plot(loss, label=loss_label, **loss_config)\n curves.extend(loss_curve)\n\n if smoothed is not None:\n smoothed_color = scale_lightness(loss_config['color'], scale=.5)\n smooth_window = self.config.get('window')\n smoothed_label = self.config.get('smoothed_label', loss_name)\n smoothed_label = smoothed_label + '\\n' if smoothed_label else ''\n smoothed_label += f'smoothed with window {smooth_window}'\n smoothed_curve = self.ax.plot(smoothed, label=smoothed_label, color=smoothed_color, linestyle='--')\n curves.extend(smoothed_curve)\n\n if lr is not None:\n lr_ax = self.ax if loss is None else self.twin_ax\n lr_label = f'learning rate №{self.index + 1} ⟶ {lr[-1]:.0e}'\n lr_config = self.config.filter(keys=curve_keys, prefix='lr_')\n lr_curve = lr_ax.plot(lr, label=lr_label, **lr_config)\n lr_ax.set_ylabel('Learning rate', fontsize=12)\n curves.extend(lr_curve)\n\n return curves", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def compute_loss(self, sample):\n observations_batch, actions_batch, return_batch, masks_batch, \\\n old_action_log_probs_batch, adv_targ = sample\n\n assert old_action_log_probs_batch.shape == (self.mini_batch_size, 1)\n assert adv_targ.shape == (self.mini_batch_size, 1)\n assert return_batch.shape == (self.mini_batch_size, 1)\n\n values, action_log_probs, dist_entropy = self.evaluate_actions(\n observations_batch, actions_batch)\n\n assert values.shape == (self.mini_batch_size, 1)\n assert action_log_probs.shape == (self.mini_batch_size, 1)\n assert values.requires_grad\n assert action_log_probs.requires_grad\n assert dist_entropy.requires_grad\n\n # [TODO] Implement policy loss\n ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n surr1 = ratio * adv_targ\n surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # [TODO] Implement value loss\n value_loss = F.mse_loss(return_batch, values)\n\n # This is the total loss\n loss = policy_loss + self.config.value_loss_weight * value_loss - self.config.entropy_loss_weight * dist_entropy\n\n return loss, policy_loss, value_loss, dist_entropy", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def get_loss_fn():\n return reconstruction", "def loss_fn(model):\n with flax.deprecated.nn.stateful() as state:\n with flax.deprecated.nn.stochastic(dropout_rng):\n logits = model(example, train=True)\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets)\n mean_loss = loss / weight_sum\n return mean_loss, (logits, state)", "def get_loss(self, trajectories):\n # Use self.agent to replay the trajectories computation on the batch of trajectories\n replayed = replay_agent(self.agent, trajectories)\n\n info = trajectories.info\n trajectories = trajectories.trajectories\n\n # Compute the cumulated future reward\n reward = trajectories[\"_observation/reward\"]\n mask = trajectories.mask()\n reward = reward * mask\n max_length = trajectories.lengths.max().item()\n cumulated_reward = torch.zeros_like(reward)\n cumulated_reward[:, max_length - 1] = reward[:, max_length - 1]\n for t in range(max_length - 2, -1, -1):\n cumulated_reward[:, t] = (\n reward[:, t]\n + self.config[\"discount_factor\"] * cumulated_reward[:, t + 1]\n )\n\n # Compute reinforce loss\n action_probabilities = replayed[\"action_probabilities\"]\n action_distribution = torch.distributions.Categorical(action_probabilities)\n baseline = replayed[\"baseline\"].squeeze(-1)\n log_proba = action_distribution.log_prob(trajectories[\"action/action\"])\n reinforce_loss = log_proba * (cumulated_reward - baseline).detach()\n reinforce_loss = (reinforce_loss * mask).sum(1) / mask.sum(1)\n avg_reinforce_loss = reinforce_loss.mean()\n\n # Compute entropy loss\n entropy = action_distribution.entropy()\n entropy = (entropy * mask).sum(1) / mask.sum(1)\n avg_entropy = entropy.mean()\n\n # Compute baseline loss\n baseline_loss = (baseline - cumulated_reward) ** 2\n baseline_loss = (baseline_loss * mask).sum(1) / mask.sum(1)\n avg_baseline_loss = baseline_loss.mean()\n\n return DictTensor(\n {\n \"avg_reward\": cumulated_reward[:, 0].mean(),\n \"baseline_loss\": avg_baseline_loss,\n \"reinforce_loss\": avg_reinforce_loss,\n \"entropy_loss\": avg_entropy,\n }\n )", "def get_loss(\n self,\n inputs,\n outputs,\n annotations,\n cand_net,\n add_controller_regularization=True,\n add_evaluator_regularization=True,\n ):\n return sum(self._criterion(inputs, outputs, annotations, cand_net).values())", "def get_total_loss(add_regularization_losses=True, graph=None, name=\"total_loss\"):\n losses = tf.losses.get_losses()\n graph = graph or tf.get_default_graph()\n\n if add_regularization_losses:\n losses += [get_or_create_regularization_loss(graph)]\n return tf.add_n(losses, name=name)", "def get_loss(self, x):\n x = self.normalize(x)\n pernalty_func = 0\n if self.coincide_fun is not None:\n tmp_res = self.coincide_fun(x)\n for i in range(self.cons_num):\n cons = self.constrain[i]\n self.loss_list[i] = cons(x, tmp_res)\n pernalty_tmp = self.pow(self.relu(self.mul_k[i] / self.sigma_k + self.loss_list[i]), self.pow_rate)\\\n - self.pow((self.mul_k[i] / self.sigma_k), self.pow_rate)\n pernalty_func += pernalty_tmp\n objective_val = self.objective_func(x, tmp_res)\n else:\n for i in range(self.cons_num):\n cons = self.constrain[i]\n self.loss_list[i] = cons(x)\n pernalty_tmp = self.pow(self.relu(self.mul_k[i] / self.sigma_k + self.loss_list[i]), self.pow_rate)\\\n - self.pow((self.mul_k[i] / self.sigma_k), self.pow_rate)\n pernalty_func += pernalty_tmp\n objective_val = self.objective_func(x)\n loss1 = self.obj_weight * objective_val\n lagrangian_func = loss1 + self.sigma_k / 2 * pernalty_func\n res = [lagrangian_func, self.loss_list, objective_val, x]\n return res", "def learning_curve():\n loss = []\n val_loss = []\n data_size = []\n\n x_slid, y_slid = sliding_window_main(x, y)\n x_train, y_train, x_val, y_val, x_test, y_test = data_splitting_main(x_slid, y_slid)\n m_tot = x_train.shape[0]\n\n batch_step = 50\n try:\n for m in range(batch_size, m_tot, batch_step*batch_size):\n print(\"Training: \", m)\n net = create_network()\n history = trainer(net, x_train[:m], y_train[:m], x_val, y_val)\n loss.append(history.history[\"loss\"][-1])\n val_loss.append(history.history[\"val_loss\"][-1])\n data_size.append(m)\n\n print(\"Loss:\", loss[-1])\n print()\n\n finally:\n plt.plot(data_size, loss, label=\"Loss\", marker=\"o\")\n plt.plot(data_size, val_loss, label=\"Val Loss\", marker=\"o\")\n plt.xlabel(\"m\")\n plt.ylabel(\"Losses\")\n plt.title(\"Model Loss\")\n plt.legend()\n plt.savefig(\"img/\" + datetime.now().strftime(\"%y%m%d_%H%M\") + \"_learning_curve.png\")\n plt.show()\n plt.close()\n\n return loss, val_loss", "def get_loss(self, samples):\n return self.run_on_samples(self.loss.eval, samples)", "def reset_loss_sums():\n\tglobal decoder_nat_loss, decoder_syn_loss, KLD_syn_loss, KLD_nat_loss, regressor_nat, regressor_syn\n\tdecoder_nat_loss = decoder_syn_loss = KLD_syn_loss = KLD_nat_loss = regressor_nat = regressor_syn = 0", "def reconstruction_loss(self):\n return self._reconstruction_loss", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def _total_loss(self, inputs, logits, labels, image_size, is_training=None):\n if self.cfg.WITH_RECONSTRUCTION:\n loss, classifier_loss, reconstruct_loss, reconstructed_images = \\\n self._loss_with_rec(\n inputs, logits, labels, image_size, is_training=is_training)\n else:\n loss = self._loss_without_rec(logits, labels)\n classifier_loss, reconstruct_loss, reconstructed_images = \\\n None, None, None\n\n loss = tf.identity(loss, name='loss')\n\n return loss, classifier_loss, reconstruct_loss, reconstructed_images", "def get_last_lr(self) -> Tensor:\n\n return self.lr_scheduler.get_last_lr()", "def closure_with_history(x, give_history=give_history):\r\n result = loss_closure(x)\r\n\r\n if give_history:\r\n history.append(result['metrics'])\r\n\r\n return result['loss']", "def losses(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], Loss):\n yield name", "def get_loss(self, xs, y):\n \"*** YOUR CODE HERE question 4 ***\"\n return nn.SoftmaxLoss(self.run(xs), y)", "def get_last_fit(flag='L'):\n if flag == 'L':\n return l_coeff_queue[-1]\n else:\n return r_coeff_queue[-1]", "def _get_losses(self):\n # Fast-path already loaded\n if self.__losses is not None:\n return self.__losses\n # Initialize the dictionary\n self.__losses = dict()\n # Simply populate this dictionary\n for name in dir(torch.nn.modules.loss):\n if len(name) < 5 or name[0] == \"_\" or name[-4:] != \"Loss\": # Heuristically ignore non-loss members\n continue\n builder = getattr(torch.nn.modules.loss, name)\n if isinstance(builder, type): # Still an heuristic\n self.__losses[name[:-4].lower()] = self._make_drop_params(builder)\n # Add/replace the l1 and l2 losses\n self.__losses[\"l1\"] = self._l1loss_builder\n self.__losses[\"l2\"] = self._l2loss_builder\n # Return the dictionary\n return self.__losses", "def compute_loss(\n self,\n x: torch.Tensor,\n y: torch.Tensor,\n y_hat: torch.Tensor,\n extras: List[torch.Tensor],\n train_context: TrainContext,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:", "def update_losses(losses, l_loss, c_loss):\n losses[0] += l_loss\n losses[1] += c_loss\n losses[2] += l_loss\n losses[3] += c_loss", "def pseudo_loss(self, params, batches):\n loss = 0\n for batch in batches:\n states = batch[\"states\"]\n actions = batch[\"actions\"]\n returns = batch[\"returns\"]\n\n preds = self.predict_jax(params, states)\n\n baseline = jnp.mean(returns, axis=0)\n preds_select = jnp.take_along_axis(preds, jnp.expand_dims(actions, axis=2), axis=2).squeeze()\n loss += (-jnp.mean(jnp.sum(preds_select * (returns - baseline))))\n\n return loss + self.l2_regularizer(params, 0.001) # try to divide by len(batches)?", "def early_stopping(stats, curr_patience, prev_val_loss):\n # TODO implement early stopping\n curr_val_loss = stats[-1][1]\n if (curr_val_loss > prev_val_loss):\n curr_patience += 1\n else:\n prev_val_loss = curr_val_loss\n curr_patience = 0\n #\n return curr_patience, prev_val_loss", "def build_losses(self,\n labels,\n model_outputs,\n metrics,\n aux_losses=None) -> Tuple[tf.Tensor, tf.Tensor]:\n with tf.name_scope('MaskedLMTask/losses'):\n metrics = dict([(metric.name, metric) for metric in metrics])\n lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(\n labels['masked_lm_ids'],\n tf.cast(model_outputs['mlm_logits'], tf.float32),\n from_logits=True)\n lm_label_weights = labels['masked_lm_weights']\n lm_numerator_loss = tf.reduce_sum(lm_prediction_losses *\n lm_label_weights)\n lm_denominator_loss = tf.reduce_sum(lm_label_weights)\n mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)\n metrics['lm_example_loss'].update_state(mlm_loss)\n if 'next_sentence_labels' in labels:\n sentence_labels = labels['next_sentence_labels']\n sentence_outputs = tf.cast(\n model_outputs['next_sentence'], dtype=tf.float32)\n sentence_loss = tf.reduce_mean(\n tf.keras.losses.sparse_categorical_crossentropy(\n sentence_labels, sentence_outputs, from_logits=True))\n metrics['next_sentence_loss'].update_state(sentence_loss)\n total_loss = mlm_loss + sentence_loss\n else:\n total_loss = mlm_loss\n\n if aux_losses:\n total_loss += tf.add_n(aux_losses)\n return total_loss, lm_prediction_losses", "def backward(self):\n self.loss_similarity = [LNCC(warped_img, self.batch_fixed, self.corr_kernel) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [LNCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:], self.corr_kernel) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def kl_loss_batch(self):\n return sum([e for m in self.modules for e in m._kl_losses])", "def compute_loss(self, **kwargs):\n raise NotImplementedError", "def calculate_loss(self, a, label):\n if self.loss == 'mse':\n diff = a - label\n err = np.square(diff).mean(axis=0).mean()\n elif self.loss == 'ce':\n return sum(-np.log2(a[label > 0]))\n else:\n raise ValueError('loss function not implemented')\n return err", "def stats(self, epoch, loader, setname):\n losses = self.compute_stats(epoch, loader, setname)\n self.test_loss_hook(losses)\n logging.info(f'Epoch: {epoch}. losses: {losses}')\n return losses[\"NMSE\"]", "def current_losses(loss_name):\n loss = OrderedDict()\n for item in loss_name:\n loss[item] = []\n return loss", "def compute_loss(self, x, y):\n\n self.batch_size = x.shape[0]\n self.x = x\n self.y = y\n self.soft = self.softmax(x) + 10**(-11)\n out = np.zeros(self.batch_size)\n for i in range(self.batch_size):\n out[i] = -(y[i] @ np.log(self.soft[i]))\n\n return out", "def top_losses(self, n=4, val_data=None, preproc=None):\n\n # check validation data and arguments\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n if val is None:\n raise Exception(\"val_data must be supplied to get_learner or top_losses\")\n if type(n) == type(42):\n n = (0, n)\n\n # multilabel = True if U.is_multilabel(val) else False\n classification, multilabel = U.is_classifier(self.model)\n\n # get predicictions and ground truth\n y_pred = self.predict(val_data=val)\n y_true = self.ground_truth(val_data=val)\n y_true = y_true.astype(\"float32\")\n\n # adjust y_true for regression problems\n if (\n not classification\n and len(y_true.shape) == 1\n and (len(y_pred.shape) == 2 and y_pred.shape[1] == 1)\n ):\n y_true = np.expand_dims(y_true, -1)\n\n # compute loss\n # this doesn't work in tf.keras 1.14\n # losses = self.model.loss_functions[0](tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred))\n # if U.is_tf_keras():\n # L = self.model.loss_functions[0].fn\n # else:\n # L = self.model.loss_functions[0]\n L = U.loss_fn_from_model(self.model)\n losses = L(tf.convert_to_tensor(y_true), tf.convert_to_tensor(y_pred))\n if DISABLE_V2_BEHAVIOR:\n losses = tf.Session().run(losses)\n else:\n losses = losses.numpy()\n\n class_names = [] if preproc is None else preproc.get_classes()\n if preproc is None:\n class_fcn = lambda x: \"%s\" % (x)\n else:\n class_fcn = lambda x: class_names[x]\n\n # regression output modifications\n if not classification:\n if len(y_pred.shape) == 2 and y_pred.shape[1] == 1:\n y_pred = np.squeeze(y_pred)\n y_pred = np.around(y_pred, 2)\n if len(y_true.shape) == 2 and y_true.shape[1] == 1:\n y_true = np.squeeze(y_true)\n y_true = np.around(y_true, 2)\n\n # sort by loss and prune correct classifications, if necessary\n if classification and not multilabel:\n y_pred = np.squeeze(y_pred)\n y_true = np.squeeze(y_true)\n if len(y_pred.shape) == 1:\n y_p = np.where(y_pred > 0.5, 1, 0)\n y_t = np.where(y_true > 0.5, 1, 0)\n else:\n y_p = np.argmax(y_pred, axis=1)\n y_t = np.argmax(y_true, axis=1)\n tups = [\n (i, x, class_fcn(y_t[i]), class_fcn(y_p[i]))\n for i, x in enumerate(losses)\n if y_p[i] != y_t[i]\n ]\n else:\n tups = [\n (i, x, y_true[i], np.around(y_pred[i], 2)) for i, x in enumerate(losses)\n ]\n tups.sort(key=operator.itemgetter(1), reverse=True)\n\n # prune by given range\n tups = tups[n[0] : n[1]] if n is not None else tups\n return tups", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def get_loss(self, x, y):\n \"*** YOUR CODE HERE question 3 ***\"\n return nn.SoftmaxLoss(self.run(x), y)" ]
[ "0.7297349", "0.69542986", "0.6948095", "0.6948095", "0.68919253", "0.6775551", "0.665083", "0.66488665", "0.6633766", "0.6528974", "0.6520181", "0.65189093", "0.6494421", "0.64789355", "0.64417124", "0.64047754", "0.6394247", "0.639117", "0.6388315", "0.631335", "0.62982076", "0.62982076", "0.6297565", "0.6277467", "0.6260354", "0.6249968", "0.62422276", "0.6229282", "0.62292683", "0.62263453", "0.61967695", "0.61758083", "0.6171666", "0.6169488", "0.61640495", "0.6162833", "0.61547005", "0.61537474", "0.6147654", "0.61435544", "0.6141797", "0.61409223", "0.6134339", "0.612572", "0.61256766", "0.6101712", "0.6092262", "0.6090935", "0.6058237", "0.60455525", "0.6026139", "0.60044444", "0.5994804", "0.59645647", "0.5963608", "0.5956679", "0.59408975", "0.59280825", "0.59203655", "0.5916718", "0.5912077", "0.59023696", "0.58972895", "0.5895752", "0.5886038", "0.5886038", "0.587793", "0.58774614", "0.5873677", "0.586588", "0.5855802", "0.5852673", "0.58507854", "0.5846547", "0.584532", "0.5844267", "0.5842773", "0.5834787", "0.58326674", "0.5830279", "0.5829887", "0.58294636", "0.58220524", "0.5809637", "0.5808103", "0.5801551", "0.5791376", "0.57902706", "0.5785137", "0.5781665", "0.5775075", "0.57680225", "0.57640326", "0.5763404", "0.5763293", "0.5762454", "0.57591236", "0.5756835", "0.57515025", "0.57456267" ]
0.7003179
1
Compute any branch of the stable or unstable submanifolds of a saddle. Accepts fixed point instances of class fixedpoint_2D.
def find_saddle_manifolds(fp, xname, ds=None, ds_gamma=None, ds_perp=None, tmax=None, max_arclen=None, ic=None, eps=None, ev_dirn=1, ic_ds=None, max_pts=1000, directions=(1,-1), which=('s', 'u'), other_pts=None, rel_scale=None, ds_perp_fac=0.75, verboselevel=0, fignum=None): if verboselevel > 1: figure_name, layer_name = plotter.active_layer _, layer_struct = plotter.active_layer_structs assert layer_struct is not None assert fp.classification == 'saddle' and not fp.degenerate if fp.evals[0] < 0: eval_s = fp.evals[0] eval_u = fp.evals[1] evec_s = fp.evecs[0] evec_u = fp.evecs[1] else: eval_s = fp.evals[1] eval_u = fp.evals[0] evec_s = fp.evecs[1] evec_u = fp.evecs[0] gen = fp.gen assert 'Gamma_out_plus' in gen.eventstruct, "Detection event surface(s) not present" assert 'Gamma_out_minus' in gen.eventstruct, "Detection event surface(s) not present" if eps is None: # Dividing fixed point's inherited epsilon tolerance by 100 eps = fp.eps / 100 ds_perp_eps = 1e-12 if ds_perp_fac >= 1 or ds_perp_fac <= 0: raise ValueError("ds_perp_fac must be between 0 and 1") normord = fp.normord if rel_scale is None: rel_scale = (1,1) dsscaled = dx_scaled_2D(ds, rel_scale) if isinstance(ds_gamma, dict): assert len(ds_gamma) == 2, "Invalid value for ds_gamma" assert remain(list(ds_gamma.keys()), [1,-1]) == [], \ "Invalid value for ds_gamma" else: try: ds_gamma = {1: ds_gamma, -1: ds_gamma} except: raise TypeError("Invalid type for ds_gamma") try: xcoord_ix = fp.point.coordnames.index(xname) except ValueError: raise ValueError("Invalid x coordinate name '%s'"%xname) else: # x coordinate index is either 0 or 1 for this 2D system # y coordinate index is therefore 1-xcoord_ix ycoord_ix = 1-xcoord_ix yname = fp.point.coordnames[ycoord_ix] if verboselevel>1: # validate coord names xn, yn = layer_struct.axes_vars if xname != xn and yname != yn: raise ValueError("x and y name mismatch with Plotter") def test_fn(x, dircode): if verboselevel>1: dm.log.msg("Integrate from test point", x=x[xname], y=x[yname], direction=dircode) gen.set(ics=x) try: test = gen.compute('test', dirn=dircode) except KeyboardInterrupt: raise except: raise RuntimeError("Integration failed") events = gen.getEvents() if verboselevel>1: pts=test.sample(coords=x.coordnames) # only show first 25 points unless Gamma bd not met plotter.add_data((pts[xname][:25],pts[yname][:25]), style='b-', layer=layer_name, name=dm.get_unique_name('test_traj_first25_')) if events['Gamma_out_plus'] is None: if events['Gamma_out_minus'] is None: if verboselevel>1: pts = test.sample(coords=x.coordnames) dm.log.msg("Error", err_msg="Did not reach Gamma surfaces", status="fail", last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) plotter.add_data((pts[xname],pts[yname]), style='b-', layer=layer_name, name=dm.get_unique_name('test_traj_full'), log=dm.log) raise RuntimeError("Did not reach Gamma surfaces") else: # hit Gamma_out_minus if verboselevel>1: dm.log.msg("Reached Gamma minus", t=events['Gamma_out_minus']['t'][0], last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) sgn = -1 else: if events['Gamma_out_minus'] is None: # hit Gamma_out_plus if verboselevel>1: dm.log.msg("Reached Gamma plus", t=events['Gamma_out_plus']['t'][0], last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) sgn = 1 else: # both were non-None, i.e. both events happened: impossibru! if verboselevel>1: pts = test.sample(coords=x.coordnames) dm.log.msg("Error", err_msg="Both Gamma surfaces reached", status="fail", last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) plotter.add_data((pts[xname],pts[yname]), style='b-', layer=layer_name, name=dm.get_unique_name('universe_fail'), log=dm.log) raise RuntimeError("Both Gamma surfaces reached, impossibly") return sgn def onto_manifold(x_ic, dn, normal_dir, dircode='f'): try: return bisection(test_fn, x_ic+dn*normal_dir, x_ic-dn*normal_dir, args=(dircode,), xtol=eps, maxiter=100, normord=normord) except AssertionError: if verboselevel>1: xp = x_ic+dn*normal_dir xm = x_ic-dn*normal_dir dm.log.msg("Error", err_msg="onto_manifold bisection fail", status="fail", point_p=xp, point_m=xm) plotter.add_data([xp[xname],xp[yname]], style='gx', layer=layer_name, name=dm.get_unique_name('xp'), log=dm.log) plotter.add_data([xm[xname],xm[yname]], style='gx', layer=layer_name, name=dm.get_unique_name('xm'), log=dm.log) plotter.show() raise RuntimeError("ds_perp too small? +/- initial displacement did not straddle manifold") except RuntimeError: if verboselevel>1: xp = x_ic+dn*normal_dir xm = x_ic-dn*normal_dir dm.log.msg("Error", err_msg="onto_manifold bisection fail", status="fail", point_p=xp, point_m=xm) plotter.add_data([xp[xname],xp[yname]], style='gx', layer=layer_struct.name, name=dm.get_unique_name('xp'), log=dm.log) plotter.add_data([xm[xname],xm[yname]], style='gx', layer=layer_struct.name, name=dm.get_unique_name('xm'), log=dm.log) plotter.show() raise gen.eventstruct['Gamma_out_plus'].activeFlag=True # terminal gen.eventstruct['Gamma_out_minus'].activeFlag=True # terminal assert tmax > 0 manifold = {'s': {1: None, -1: None}, 'u': {1: None, -1: None}} man_names = {'s': 'stable', 'u': 'unstable'} for w in which: # w = 's' => stable branch # w = 'u' => unstable branch if verboselevel>0: print("Starting %s branch" % man_names[w]) if w == 's': col = 'g' w_sgn = -1 integ_dircode = 'f' evec = evec_u evec_other = evec_s elif w == 'u': col = 'r' w_sgn = 1 integ_dircode = 'b' evec = evec_s evec_other = evec_u # set Gamma_out surfaces on "outgoing" branch # (polarity is arbitrary) p0_plus = fp.point + ds_gamma[1]*evec p0_minus = fp.point - ds_gamma[-1]*evec evec_perp = get_perp(evec) gen.eventstruct.setEventDir('Gamma_out_plus', ev_dirn) gen.eventstruct.setEventDir('Gamma_out_minus', -ev_dirn) gen.set(pars={'Gamma_out_plus_p_'+xname: p0_plus[xname], 'Gamma_out_plus_p_'+yname: p0_plus[yname], 'Gamma_out_plus_dp_'+xname: evec_perp[xname], 'Gamma_out_plus_dp_'+yname: evec_perp[yname], 'Gamma_out_minus_p_'+xname: p0_minus[xname], 'Gamma_out_minus_p_'+yname: p0_minus[yname], 'Gamma_out_minus_dp_'+xname: evec_perp[xname], 'Gamma_out_minus_dp_'+yname: evec_perp[yname], ## 'fp_'+xname: fp.point[xname], 'fp_'+yname: fp.point[yname] }, tdata = [0,tmax]) if verboselevel>1: if fignum is None: fignum=figure() else: figure(fignum) # plot event surfaces for gamma plus and minus exit events # ISSUE: Convert to plotter.add_data plot([p0_plus[xname]-dsscaled*evec_perp[xname],p0_plus[xname]+dsscaled*evec_perp[xname]], [p0_plus[yname]-dsscaled*evec_perp[yname],p0_plus[yname]+dsscaled*evec_perp[yname]], 'k-', linewidth=2) plot([p0_minus[xname]-dsscaled*evec_perp[xname],p0_minus[xname]+dsscaled*evec_perp[xname]], [p0_minus[yname]-dsscaled*evec_perp[yname],p0_minus[yname]+dsscaled*evec_perp[yname]], 'k-', linewidth=2) draw() check_other_pts = other_pts is not None if ic_ds is None: ic_ds = dsscaled else: ic_ds = dx_scaled_2D(ic_ds, rel_scale) if ic is None: ic = fp.point f_ic = -w_sgn * evec_other dirn_fix = 1 # not used for this case if verboselevel>0: # ISSUE: Convert to log entry print("f_ic from evec_other") print("evec_other " + str(evec_other)) print("f_ic = " + str(f_ic)) curve_len = 0 # initial estimate x0 = a point close to f.p. along manifold with # opposite stability else: # initial curve length from previous independent variable, if present # otherwise, assume zero if isinstance(ic, Pointset): assert len(ic) == 1, "Only pass a length-1 pointset" # (guarantee curve_len > 0) # BUG: for direction=-1 case, arc_len will be negative # and index 0 will have the smallest arc_len, not the # largest. Better not to use ic as Pointset option and # fix arc_len outside of call curve_len = abs(ic['arc_len'][0]) ic = ic[0] else: curve_len = 0 # ensure correct sign relative to starting point (if ic is None) sgns_orig = sign(-w_sgn * evec_other) f_ic_alpha = gen.Rhs(0, ic, gen.pars) # array in alpha order # f_ic here isn't normalized to length 1 like the case above that uses # evec_other (which is already normalized) f_ic = Point({xname: f_ic_alpha[xcoord_ix], yname: f_ic_alpha[ycoord_ix]}) sgns_f_ic = sign(f_ic) if any(sgns_orig != sgns_f_ic): dirn_fix = -1 f_ic = -f_ic else: dirn_fix = 1 if verboselevel>0: # ISSUE: Convert to log entry print("f_ic = " + str(f_ic)) for sgn in directions: piece = {} if verboselevel>0: # ISSUE: Convert to log entry print("Starting direction", sgn) # PREDICTION x0_ic = ic+w_sgn*sgn*ic_ds*f_ic/norm(f_ic, normord) if verboselevel>1: figure(fignum) # show starting point (initial estimate) as green circle # ISSUE: Convert to plotter.add_data plot(x0_ic[xname], x0_ic[yname], 'go', linewidth=1) # put x0 initial estimate onto stable manifold f_alpha = dirn_fix * gen.Rhs(0, x0_ic, gen.pars) # array in alpha order f = Point({xname: f_alpha[xcoord_ix], yname: f_alpha[ycoord_ix]}) normf = norm(f, normord) norm_to_flow = get_perp(f/normf) if verboselevel>1: # show flow direction from IC as solid red line plotter.add_data(([x0_ic[xname], x0_ic[xname]+dsscaled*f[xname]/normf], [x0_ic[yname], x0_ic[yname]+dsscaled*f[yname]/normf]), style='r-', name=dm.get_unique_name('flow_fwd'), log=dm.log) # show normal to flow direction from IC as dotted red line plotter.add_data(([x0_ic[xname], x0_ic[xname]+dsscaled*norm_to_flow[xname]], [x0_ic[yname], x0_ic[yname]+dsscaled*norm_to_flow[yname]]), style='r:', name=dm.get_unique_name('flow_perp'), log=dm.log) ds_perp_default = ds_perp # CORRECTION while ds_perp > ds_perp_eps: try: x = onto_manifold(x0_ic, ds_perp, norm_to_flow, dircode=integ_dircode) except RuntimeError as e: ds_perp *= ds_perp_fac else: break if ds_perp <= ds_perp_eps: # RuntimeError was raised and could not continue reducing ds_perp print("ds_perp reached lower tolerance =", ds_perp_eps) print(e) raise RuntimeError("Initial point did not converge") else: curve_len += norm(x-ic, normord) piece[sgn*curve_len] = x num_pts = 1 last_x = x if verboselevel>0: print("Initial point converged to (%.6f, %.6f)\n" % \ (x[xname], x[yname])) ds_perp = ds_perp_default last_f = f_ic # step backwards along local linear flow to predict next starting # position on manifold while curve_len < max_arclen and num_pts < max_pts: if verboselevel>0: # ISSUE: Convert to plotter.add_data figure(fignum) plot(last_x[xname], last_x[yname], col+'.', linewidth=1) if check_other_pts and sometrue([norm(last_x - pt, normord) < ds \ for pt in other_pts]): # we've hit a different fixed point (or other feature), so stop break f_alpha = dirn_fix * gen.Rhs(0, last_x, gen.pars) # array f = Point({xname: f_alpha[xcoord_ix], yname: f_alpha[ycoord_ix]}) if all(sign(f) != sign(last_f)): f = -f # on other side of manifold so must keep stepping in the # same direction, therefore switch signs! # PREDICTION x_ic = last_x + w_sgn*sgn*dsscaled*f/norm(f,normord) last_f = f if verboselevel>1: print("\nStarting from point ", last_x) delta = w_sgn*sgn*dsscaled*f/norm(f,normord) print("Trying point ", x_ic, "in direction (%.6f, %.6f)\n" % (delta[xname], delta[yname])) ds_perp = ds_perp_default # CORRECTION while ds_perp > ds_perp_eps: try: x = onto_manifold(x_ic, ds_perp, get_perp(f/norm(f,normord)), dircode=integ_dircode) except RuntimeError as e: ds_perp *= 0.75 else: break if ds_perp <= ds_perp_eps: # RuntimeError was raised and could not continue reducing ds_perp print("ds_perp reached lower tolerance =", ds_perp_eps) print(e) break # end while search else: curve_len += norm(x-last_x, normord) piece[sgn*curve_len] = x last_x = x num_pts += 1 if verboselevel>1: print("\nManifold has %i points" % num_pts) elif verboselevel>0: print(".", end=' ') sys.stdout.flush() indepvar, piece_sorted = sortedDictLists(piece, byvalue=False) manifold[w][sgn] = pointsToPointset(piece_sorted, indepvarname='arc_len', indepvararray=indepvar, norm=normord) if verboselevel>0: # finish the line on stdout print(" ") gen.eventstruct['Gamma_out_plus'].activeFlag=False gen.eventstruct['Gamma_out_minus'].activeFlag=False ## gen.eventstruct['fp_closest'].activeFlag=False return manifold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exact_saddle(V,X,Y,Z,dim,Z0=None):\n #from all_functions import find_saddle,sum_of_e_field\n if dim==3:\n print \"here\"\n print find_saddle(V,X,Y,Z,3)\n [I,J,K]=find_saddle(V,X,Y,Z,3) # guess saddle point; Z0 not needed\n print I,J,K\n r0=[X[I],Y[J],Z[K]]\n if I<2 or I>V.shape[0]-2: \n print('exact_saddle.py: Saddle point out of bounds in radial direction.')\n return r0\n if J<2 or J>V.shape[1]-2:\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.')\n return r0\n if K<2 or K>V.shape[2]-2:\n print('exact_saddle.py: Saddle point out of bounds in axial direction.')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # change grid vectors as well\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n #################################### Minimize\n r=spo.minimize(sum_of_e_field,r0,args=(Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],r[2] \n ################################################################################################# \n if dim==2: \n if len(V.shape)==3:\n K=0 # in case there is no saddle\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n K=i-1\n Vs = V.shape\n if K>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,K-1] # potential to left\n v2=V[:,:,K] # potential to right (actually right at estimate; K+1 to be actually to right)\n V2=v1+(v2-v1)*(Z0-Z[K-1])/(Z[K]-Z[K-1]) # averaged potential around given coordinate\n [I,J,K0]=find_saddle(V,X,Y,Z,2,Z0) \n r0=X[I],Y[J]\n print 1\n if (I<2 or I>V.shape[0]-2): \n print('exact_saddle.py: Saddle point out of bounds in radial direction.\\n')\n return r0\n if (J<2 or J>V.shape[1]-1):\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.\\n')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # Matlab 4, not 2\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n ################################## Minimize\n r=spo.minimize(sum_of_e_field_2d,r0,args=(Z0,Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],Z0\n print Xs\n print Ys\n print Zs\n return [Xs,Ys,Zs]", "def layer_sweep(self):\n for fixed_id, fixed_layer in enumerate(self.layers):\n if fixed_id + 1 == len(self.layers):\n break\n moving_layer = self.layers[fixed_id + 1]\n for node in moving_layer.nodes:\n self.find_neighbors(node)\n if len(node.neighbors) > 0:\n self.calculate_barycenter(node)\n else:\n node.barycenter = 0 #1000\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.barycenter, reverse=False)\n for slot, node in enumerate(sorted_nodes):\n node.slot = slot + 1\n barys = set([n.barycenter for n in sorted_nodes])\n bary_nodes = [list(filter(lambda x: x.barycenter == b, sorted_nodes)) for b in barys]\n for b in bary_nodes:\n if len(b) > 1:\n for node in b:\n if len(node.sl_neighbors) == 1:\n n_slot = node.sl_neighbors[0].slot\n if n_slot > node.slot:\n other_node = max(b, key=lambda s: s.slot)\n elif n_slot < node.slot:\n other_node = min(b, key=lambda s: s.slot)\n temp = node.slot\n node.slot = other_node.slot\n other_node.slot = temp\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.slot, reverse=False)\n moving_layer.nodes = sorted_nodes", "def cleanOpenBranches(skeleton, skelton_copy, points, radii, length, clean = True, verbose = False):\n \n assert np.isfortran(skeleton);\n assert np.isfortran(skelton_copy);\n \n timer = tmr.Timer();\n timer_all = tmr.Timer();\n \n # find branch and end points\n deg = cpl.convolve_3d_indices(skeleton, t3d.n26, points, out_dtype = 'uint8');\n branchpoints = points[deg >= 3];\n e_pts = points[deg == 1];\n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n #prepare temps\n #skel = skeleton.copy();\n skel_flat = np.reshape(skelton_copy, -1, order = 'A');\n strides = np.array(skelton_copy.strides);\n \n \n if verbose:\n timer.printElapsedTime('Detected %d branch and %d endpoints' % (branchpoints.shape[0], e_pts.shape[0]));\n timer.reset();\n \n label = np.arange(27);\n label = label.reshape([3,3,3]);\n label[1,1,1] = 0;\n \n critical_points = [e_pts];\n delete_points = [];\n \n for l in range(1, length + 1):\n #neighbours of end points\n e_pts_label = cpl.convolve_3d_indices(skelton_copy, label, e_pts);\n \n if verbose:\n timer.printElapsedTime('Done labeling %d / %d' % (l, length));\n timer.reset();\n \n #label zero points are non-critical short isolated branches\n e_pts_zero = e_pts_label == 0;\n #print 'zero length:', np.unravel_index(e_pts[e_pts_zero], skel.shape)\n if e_pts_zero.sum() > 0:\n keep = np.logical_not(e_pts_zero);\n for m in range(l):\n critical_points[m] = critical_points[m][keep];\n e_pts_label = e_pts_label[keep];\n e_pts = e_pts[keep];\n \n if verbose:\n timer.printElapsedTime('Ignored %d small branches' % (keep.sum()));\n timer.reset();\n \n e_pts_new = e_pts + np.sum((np.vstack(np.unravel_index(e_pts_label, label.shape)) - 1).T * strides, axis = 1)\n \n # did we hit a branch point\n delete = np.in1d(e_pts_new, branchpoints); #, assume_unique = True);\n keep = np.logical_not(delete);\n #print delete.shape, keep.shape, e_pts_new.shape\n \n #delete all path that hit a branch point\n if delete.sum() > 0:\n for m in range(l):\n delete_points.append(critical_points[m][delete]);\n #print 'deleting:', np.unravel_index(critical_points[m][delete], skel.shape)\n critical_points[m] = critical_points[m][keep];\n e_pts_new = e_pts_new[keep];\n \n if verbose:\n timer.printElapsedTime('Deleted %d points' % (delete.sum()));\n timer.reset();\n \n if l < length:\n skel_flat[e_pts] = False; # remove endpoints for new neighbour detection\n critical_points.append(e_pts_new);\n e_pts = e_pts_new;\n \n if verbose:\n timer.printElapsedTime('Cleanup iteration %d / %d done.' % (l, length));\n \n #gather all points\n if len(delete_points) > 0:\n delete_points = np.hstack(delete_points);\n delete_points = np.unique(delete_points);\n else:\n delete_points = np.zeros(0);\n \n if verbose:\n timer_all.printElapsedTime('Cleanup');\n \n if clean:\n skel_flat = np.reshape(skeleton, -1, order = 'F');\n skel_flat[delete_points] = False;\n keep_ids = np.logical_not(np.in1d(points, delete_points, assume_unique = True))\n points = points[keep_ids];\n radii = radii[keep_ids];\n return skeleton, points, radii\n \n return delete_points;", "def solve_step(ball_list, step,borders,obstacle=None):\n ball_list = step1(ball_list, step,borders,obstacle)\n ball_list = step2(ball_list, step)", "def branch_competetive(state, time, d):\n\n th0 = state[0] \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"])]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]):]\n \n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n ### calculate cytokine effect on rate\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2)\n \n ### differentiate effectors th1 \n alpha = d[\"alpha1\"]\n p = 1.\n dt_th1 = diff_effector2(th1, th0, alpha, beta1, d[\"beta1_p\"], p, d)\n ### differentiate effectors th2\n alpha = d[\"alpha2\"]\n p = 1.\n dt_th2 = diff_effector2(th2, th0, alpha, beta2, d[\"beta2_p\"], p, d)\n \n ### combine all cells\n dt_th0 = -(beta1+beta2)*th0\n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def __detect_branching_haghverdi16(\n self, Dseg: np.ndarray, tips: np.ndarray\n ) -> np.ndarray:\n # sort distance from first tip point\n # then the sequence of distances Dseg[tips[0]][idcs] increases\n idcs = np.argsort(Dseg[tips[0]])\n # consider now the sequence of distances from the other\n # two tip points, which only increase when being close to `tips[0]`\n # where they become correlated\n # at the point where this happens, we define a branching point\n if True:\n imax = self.kendall_tau_split(\n Dseg[tips[1]][idcs],\n Dseg[tips[2]][idcs],\n )\n if False:\n # if we were in euclidian space, the following should work\n # as well, but here, it doesn't because the scales in Dseg are\n # highly different, one would need to write the following equation\n # in terms of an ordering, such as exploited by the kendall\n # correlation method above\n imax = np.argmin(\n Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]\n )\n # init list to store new segments\n ssegs = [] # noqa: F841 # TODO Look into this\n # first new segment: all points until, but excluding the branching point\n # increasing the following slightly from imax is a more conservative choice\n # as the criterion based on normalized distances, which follows below,\n # is less stable\n if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift:\n # if \"everything\" is correlated (very large value of imax), a more\n # conservative choice amounts to reducing this\n logg.warning(\n 'shifting branching point away from maximal kendall-tau '\n 'correlation (suppress this with `allow_kendall_tau_shift=False`)'\n )\n ibranch = int(0.95 * imax)\n else:\n # otherwise, a more conservative choice is the following\n ibranch = imax + 1\n return idcs[:ibranch]", "def classify_fixedpoints(fps, scale):\n\n x_directions = []\n scale = scale\n for fp in fps:\n\n trace = np.matrix.trace(fp['jac'])\n det = np.linalg.det(fp['jac'])\n if det > 0 and trace == 0:\n print('center has been found. Watch out for limit cycles')\n elif trace**2 - 4 * det == 0:\n print(\"star nodes has been found.\")\n elif trace**2 - 4 * det < 0:\n print(\"spiral has been found\")\n e_val, e_vecs = np.linalg.eig(fp['jac'])\n ids = np.argwhere(np.real(e_val) > 0)\n countgreaterzero = np.sum(e_val > 0)\n if countgreaterzero == 0:\n print('stable fixed point was found.')\n fp['fp_stability'] = 'stable fixed point'\n elif countgreaterzero > 0:\n print('saddle point was found.')\n fp['fp_stability'] = 'saddle point'\n for id in ids:\n x_plus = fp['x'] + scale * e_val[id] * np.real(e_vecs[:, id].transpose())\n x_minus = fp['x'] - scale * e_val[id] * np.real(e_vecs[:, id].transpose())\n x_direction = np.vstack((x_plus, fp['x'], x_minus))\n x_directions.append(np.real(x_direction))\n\n return fps, x_directions", "def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike", "def solve_step(particle_list, step, size):\r\n \r\n # Detect edge-hitting and collision of every particle\r\n for i in range(len(particle_list)):\r\n particle_list[i].compute_refl(step,size)\r\n for j in range(i+1,len(particle_list)):\r\n particle_list[i].compute_coll(particle_list[j],step) \r\n\r\n \r\n # Compute position of every particle \r\n for particle in particle_list:\r\n particle.compute_step(step)", "def optimize_cobra_model(sbml, bound=INF):\n\n cobra = convert_sbml_to_cobra(sbml, bound)\n\n N, L, U = cobra['S'], list(cobra['lb']), list(cobra['ub'])\n f, b = list(cobra['c']), list(cobra['b'])\n v_sol, f_opt, conv = easy_lp(f, N, b, L, U, one=True)\n\n return v_sol, f_opt", "def eval_top_down(\n root: Node,\n x: np.ndarray,\n lls: np.ndarray,\n leaf_func: Callable[[Leaf, np.ndarray, Any], np.ndarray],\n sum_func: Callable[[Sum, np.ndarray, Any], np.ndarray],\n leaf_func_kwargs: Optional[dict] = None,\n sum_func_kwargs: Optional[dict] = None,\n inplace: bool = False,\n n_jobs: int = 0\n) -> np.ndarray:\n if leaf_func_kwargs is None:\n leaf_func_kwargs = dict()\n if sum_func_kwargs is None:\n sum_func_kwargs = dict()\n\n # Check the SPN\n check_spn(root, labeled=True, smooth=True, decomposable=True)\n\n # Copy the input array, if not inplace mode\n if not inplace:\n x = np.copy(x)\n\n def eval_backward(n):\n if isinstance(n, Leaf):\n mask = np.ix_(masks[n.id], n.scope)\n x[mask] = leaf_func(n, x[mask], **leaf_func_kwargs)\n elif isinstance(n, Product):\n for c in n.children:\n masks[c.id] |= masks[n.id]\n elif isinstance(n, Sum):\n children_lls = np.stack([lls[c.id] for c in n.children], axis=1)\n branch = sum_func(n, children_lls, **sum_func_kwargs)\n for i, c in enumerate(n.children):\n masks[c.id] |= masks[n.id] & (branch == i)\n else:\n raise NotImplementedError(f\"Top down evaluation not implemented for node of type {n.__class__.__name__}\")\n\n if n_jobs == 0:\n # Compute the topological ordering\n ordering = topological_order(root)\n if ordering is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n n_nodes, n_samples = len(ordering), len(x)\n\n # Build the array consisting of top-down path masks\n masks = np.zeros(shape=(n_nodes, n_samples), dtype=np.bool_)\n masks[root.id] = True\n for node in ordering:\n eval_backward(node)\n else:\n # Compute the layered topological ordering\n layers = topological_order_layered(root)\n if layers is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n n_nodes, n_samples = sum(map(len, layers)), len(x)\n\n # Build the array consisting of top-down path masks\n masks = np.zeros(shape=(n_nodes, n_samples), dtype=np.bool_)\n masks[root.id] = True\n parallel_layerwise_eval(layers, eval_backward, reverse=False, n_jobs=n_jobs)\n\n return x", "def identify_leaflets_cluster(self,pts,vec,topologize_time_limit=30,max_count_asymmetry=0.05):\n\t\timport scipy\n\t\timport sklearn\n\t\timport sklearn.neighbors\n\t\timport sklearn.cluster\n\t\tnlipids = len(pts)\n\t\t#---time limit on the topologize function which joins broken bilayers e.g. a saddle that crosses PBCs\n\t\ttry:\n\t\t\twith time_limit(topologize_time_limit): \n\t\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t\t**({'tol':self.topologize_tolerance} if self.topologize_tolerance else {}))\n\t\texcept TimeoutException: \n\t\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\t\twrapper = np.zeros((len(pts),3))\n\t\tfindframe = pts + wrapper*np.array(vec)\n\t\t#---ensure that all points are in the box\n\t\tfindframe += vec*(findframe<0) - vec*(findframe>vec)\n\t\t#---previous calculation of connectivity was done manually\n\t\tif False:\n\t\t\t#---conservative cutoff gets lots of nearby points\n\t\t\tcutoff = 10.0\n\t\t\tcutoff_short = 2.0\n\t\t\t#---make a K-D tree from the points\n\t\t\ttree = scipy.spatial.ckdtree.cKDTree(findframe,boxsize=np.concatenate((vec,vec))+0.*eps)\n\t\t\t#---find the nearest reference points for each instantaneous point\n\t\t\tclose,nns = tree.query(findframe,distance_upper_bound=cutoff,k=20)\n\t\t\t#---construct the neighbor list\n\t\t\tsubjects = np.where(np.all((close<cutoff,close>0),axis=0))\n\t\t\t#---get the pairs of neighbors\n\t\t\tsubjects,neighbors = subjects[0],nns[subjects]\n\t\t\tpds = np.ones((nlipids,nlipids))*0.0\n\t\t\tpds[tuple((np.arange(nlipids),np.arange(nlipids)))] = 0.0\n\t\t\tnears = np.where(np.all((close>0,close<=cutoff_short),axis=0))\n\t\t\tpds[tuple((nears[0],nns[nears]))] = 1.0#close[nears]\n\t\t\tpds[tuple((nns[nears],nears[0]))] = 1.0#close[nears]\n\t\tconnectivity = sklearn.neighbors.kneighbors_graph(findframe,\n\t\t\tn_neighbors=self.cluster_neighbors,include_self=False)\n\t\tward = sklearn.cluster.AgglomerativeClustering(n_clusters=2,\n\t\t\tconnectivity=connectivity,linkage='complete').fit(findframe)\n\t\timono = ward.labels_\n\t\tif np.mean(imono)==0.5: \n\t\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\telif (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry):\n\t\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\t\traise Exception('[ERROR] failed to identify leaflets. '\n\t\t\t\t'DEVELOPMENT NOTE!? use legacy or a different cutoff?')\n\t\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\t\treturn np.array(imono)", "def findStableState(L, boundaryConditions, Minv = None):\n\tn = L.shape[0]\n\tm = len(boundaryConditions)\n\tVb = np.zeros(m)\n\tpositions = {}\n\tfor i in range(m):\n\t\tcondition = boundaryConditions[i]\n\t\tVb[i] = condition[0]\n\t\tpositions[condition[0]] = condition[1]\n\tVb = np.sort(Vb)\n\tBPrime = np.zeros((m, n))\n\tYPrime = np.zeros((m, 3))\n\tfor i in range(m):\n\t\tBPrime[i][int(Vb[i])] = 1\n\t\tYPrime[i] = positions[Vb[i]]\n\n\tif Minv is None:\n\t\tzeroCorner = np.zeros((m, m))\n\t\tM = np.array(np.bmat([[L, -BPrime.T], [BPrime, zeroCorner]]))\n\t\tMinv = np.linalg.inv(M)\n\n\tXT = np.zeros((3, n))\n\t# find x coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[0]\n\tx = np.dot(Minv, y)\n\tXT[0] = x[:n]\n\t# find y coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[1]\n\tx = np.dot(Minv, y)\n\tXT[1] = x[:n]\n\t# find z coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[2]\n\tx = np.dot(Minv, y)\n\tXT[2] = x[:n]\n\n\treturn XT.T", "def extract_1d_boundaries(xy, NL, KL, BL, PVx, PVy, check=False):\n if PVx is None and PVy is None:\n raise RuntimeError('Not designed to allow openBC networks.')\n # PVx = np.zeros_like(KL, dtype=float)\n # PVy = np.zeros_like(KL, dtype=float)\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n # If no dangling bonds, no need to translate indices at the end\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n boundaries = []\n for boundaryloc in ['top', 'bottom']:\n # Initialize the boundary list to be as long as possible (will truncate later)\n bb = np.zeros(2 * len(xy), dtype=int)\n if boundaryloc == 'top':\n # Start with the topmost point, which is guaranteed to be\n # at the convex hull and thus also at the top outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.max(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n else:\n # Start with the bottom most point, which is guaranteed to be\n # at the convex hull and thus also at the bottom outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.min(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_1d_boundaries(): Found extremal pt: ', rightIND\n print 'le.extract_1d_boundaries(): with neighbors: ', NL[rightIND]\n print 'le.extract_1d_boundaries(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n # print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n connect = np.argwhere(np.abs(KL[rightIND]).ravel()).ravel()\n neighbors = NL[rightIND, connect]\n if check:\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1] + PVy[rightIND, connect],\n xy[neighbors, 0] - xy[rightIND, 0] + PVx[rightIND, connect]).ravel(),\n 2 * np.pi)\n if check:\n print 'le.extract_1d_boundaries(): KL[rightIND] = ', KL[rightIND]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] = ', KL[rightIND, 0]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles = ', angles\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[rightIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[rightIND, connect[angles == min(angles)][0]]\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n connect = np.argwhere(np.abs(KL[nextIND]).ravel())\n n_tmp = NL[nextIND, connect]\n\n # Get position in row of NL where NL == bb[dmyi - 1] (the previous boundary particle/site)\n # and where the PVx and PVy are opposite of the last used PVx and PVy values (to make sure we\n # are looking backwards along the boundary). We will use this to get the 'backward angle' -- the\n # angle of the previous bond in the boundary\n # Note that bb[dmyi - 1] may have been index 0, so there could be multiple matches\n nlpos = np.where(np.logical_and(NL[nextIND] == bb[dmyi - 1],\n np.abs(KL[nextIND]).ravel().astype(bool)))[0]\n if len(nlpos) > 1:\n # There is more than one connection to the previous particle. Check for where PVx and PVy\n # values are opposite the previously used values.\n ind_nlpos = np.where(np.logical_and(PVx[nextIND, nlpos] == -pvx_prev,\n PVy[nextIND, nlpos] == -pvy_prev))[0]\n print 'ind_nlpos = ', ind_nlpos\n nlpos = nlpos[ind_nlpos]\n\n # Exclude previous boundary particle (the copy of that particle in the nlpos position)\n # from the neighbors array, UNLESS IT IS THE ONLY ONE,\n # since its angle with itself is zero!\n\n # Used to remove previous particle, but this assumes that boundary is more than 2\n # particles long, which might not be true for periodic_strip bcs\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle.'\n neighbors = n_tmp\n else:\n print 'n_tmp = ', n_tmp\n neighbors = np.delete(n_tmp, nlpos)\n connect = np.delete(connect, nlpos)\n print 'n_tmp = ', n_tmp\n print 'neighbors = ', neighbors\n\n # print 'le: nlpos = ', nlpos\n forward_angles = np.arctan2(xy[neighbors, 1] - xy[nextIND, 1] + PVy[nextIND, connect],\n xy[neighbors, 0] - xy[nextIND, 0] + PVx[nextIND, connect]).ravel()\n backward_angle = np.arctan2(xy[bb[dmyi - 1], 1] - xy[nextIND, 1] + PVy[nextIND, nlpos],\n xy[bb[dmyi - 1], 0] - xy[nextIND, 0] + PVx[nextIND, nlpos]).ravel()\n if check:\n print 'le: connect = ', connect\n print 'le: forward_angles = ', forward_angles\n print 'le: backward_angle = ', backward_angle\n\n angles = np.mod(forward_angles - backward_angle, 2 * np.pi)\n if check:\n print 'le: angles = ', angles\n print 'le: angles==min--> ', angles == min(angles)\n print 'le: neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles==min--> ', angles == min(angles)\n print 'le.extract_1d_boundaries(): neighbors[angles == min(angles)] --> ', neighbors[angles == min(angles)]\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[nextIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[nextIND, connect[angles == min(angles)][0]]\n # Redefine nextIND to be the new boundary index\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n boundaries.append(boundary)\n\n return tuple(boundaries)", "def see_what_its_doing_2d_comparison(length_scale, true_values=False):\n\n a = create_points_with_spatially_correlated_pollution_2d(10, 100, 10, length_scale, 1)\n b = pick_uniform_random_points_on_map_of_maps(a, 20)\n c1 = interpolate_unknown_points_of_a_map_of_maps_of_points(b, a, RBF(length_scale), fixed=True)\n c2 = interpolate_unknown_points_of_a_map_of_maps_of_points(b, a, RBF(np.random.randint(1, 10000)), fixed=False)\n\n x1 = []\n y1 = []\n z1 = []\n for point in b[0].values():\n x1.append(point.get_x_cord())\n y1.append(point.get_y_cord())\n z1.append(point.get_pollution_value())\n\n x2_fixed = []\n y2_fixed = []\n z2_fixed = []\n\n for label, point in c1[0][0].items():\n if not label in b[0].keys():\n x2_fixed.append(point.get_x_cord())\n y2_fixed.append(point.get_y_cord())\n z2_fixed.append(point.get_pollution_value())\n\n x2_not_fixed = []\n y2_not_fixed = []\n z2_not_fixed = []\n\n for label, point in c2[0][0].items():\n if not label in b[0].keys():\n x2_not_fixed.append(point.get_x_cord())\n y2_not_fixed.append(point.get_y_cord())\n z2_not_fixed.append(point.get_pollution_value())\n\n if true_values:\n x3_true_values = []\n y3_true_values = []\n z3_true_values = []\n\n for label, point in a[0].items():\n if not label in b[0].keys():\n x3_true_values.append(point.get_x_cord())\n y3_true_values.append(point.get_y_cord())\n z3_true_values.append(point.get_actual_pollution_value())\n\n plot_numbers_3d_and_save(x3_true_values, y3_true_values, z3_true_values, x2_fixed, y2_fixed, z2_fixed,\n \"True Value Comparison Fixed Graph.gif\")\n plot_numbers_3d_and_save(x3_true_values, y3_true_values, z3_true_values, x2_not_fixed, y2_not_fixed,\n z2_not_fixed, \"True value Not Fixed Graph.gif\")\n\n plot_numbers_3d_and_save(x1, y1, z1, x2_fixed, y2_fixed, z2_fixed, \"Fixed Rotating Graph.gif\")\n plot_numbers_3d_and_save(x1, y1, z1, x2_not_fixed, y2_not_fixed, z2_not_fixed, \"Not Fixed Rotating Graph.gif\")", "def _detect_branching(\n self,\n Dseg: np.ndarray,\n tips: np.ndarray,\n seg_reference=None,\n ) -> Tuple[\n List[np.ndarray],\n List[np.ndarray],\n List[List[int]],\n List[List[int]],\n int,\n ]:\n if self.flavor == 'haghverdi16':\n ssegs = self._detect_branching_single_haghverdi16(Dseg, tips)\n elif self.flavor == 'wolf17_tri':\n ssegs = self._detect_branching_single_wolf17_tri(Dseg, tips)\n elif self.flavor == 'wolf17_bi' or self.flavor == 'wolf17_bi_un':\n ssegs = self._detect_branching_single_wolf17_bi(Dseg, tips)\n else:\n raise ValueError(\n '`flavor` needs to be in {\"haghverdi16\", \"wolf17_tri\", \"wolf17_bi\"}.'\n )\n # make sure that each data point has a unique association with a segment\n masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)\n for iseg, seg in enumerate(ssegs):\n masks[iseg][seg] = True\n nonunique = np.sum(masks, axis=0) > 1\n ssegs = []\n for iseg, mask in enumerate(masks):\n mask[nonunique] = False\n ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])\n # compute new tips within new segments\n ssegs_tips = []\n for inewseg, newseg in enumerate(ssegs):\n if len(np.flatnonzero(newseg)) <= 1:\n logg.warning(f'detected group with only {np.flatnonzero(newseg)} cells')\n secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]\n ssegs_tips.append([tips[inewseg], secondtip])\n undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]\n if len(undecided_cells) > 0:\n ssegs.append(undecided_cells)\n # establish the connecting points with the other segments\n ssegs_connects = [[], [], [], []]\n for inewseg, newseg_tips in enumerate(ssegs_tips):\n reference_point = newseg_tips[0]\n # closest cell to the new segment within undecided cells\n closest_cell = undecided_cells[\n np.argmin(Dseg[reference_point][undecided_cells])\n ]\n ssegs_connects[inewseg].append(closest_cell)\n # closest cell to the undecided cells within new segment\n closest_cell = ssegs[inewseg][\n np.argmin(Dseg[closest_cell][ssegs[inewseg]])\n ]\n ssegs_connects[-1].append(closest_cell)\n # also compute tips for the undecided cells\n tip_0 = undecided_cells[\n np.argmax(Dseg[undecided_cells[0]][undecided_cells])\n ]\n tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]\n ssegs_tips.append([tip_0, tip_1])\n ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]\n trunk = 3\n elif len(ssegs) == 3:\n reference_point = np.zeros(3, dtype=int)\n reference_point[0] = ssegs_tips[0][0]\n reference_point[1] = ssegs_tips[1][0]\n reference_point[2] = ssegs_tips[2][0]\n closest_points = np.zeros((3, 3), dtype=int)\n # this is another strategy than for the undecided_cells\n # here it's possible to use the more symmetric procedure\n # shouldn't make much of a difference\n closest_points[0, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[0]][ssegs[1]])\n ]\n closest_points[1, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[1]][ssegs[0]])\n ]\n closest_points[0, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[0]][ssegs[2]])\n ]\n closest_points[2, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[2]][ssegs[0]])\n ]\n closest_points[1, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[1]][ssegs[2]])\n ]\n closest_points[2, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[2]][ssegs[1]])\n ]\n added_dist = np.zeros(3)\n added_dist[0] = (\n Dseg[closest_points[1, 0], closest_points[0, 1]]\n + Dseg[closest_points[2, 0], closest_points[0, 2]]\n )\n added_dist[1] = (\n Dseg[closest_points[0, 1], closest_points[1, 0]]\n + Dseg[closest_points[2, 1], closest_points[1, 2]]\n )\n added_dist[2] = (\n Dseg[closest_points[1, 2], closest_points[2, 1]]\n + Dseg[closest_points[0, 2], closest_points[2, 0]]\n )\n trunk = np.argmin(added_dist)\n ssegs_adjacency = [\n [trunk] if i != trunk else [j for j in range(3) if j != trunk]\n for i in range(3)\n ]\n ssegs_connects = [\n [closest_points[i, trunk]]\n if i != trunk\n else [closest_points[trunk, j] for j in range(3) if j != trunk]\n for i in range(3)\n ]\n else:\n trunk = 0\n ssegs_adjacency = [[1], [0]]\n reference_point_in_0 = ssegs_tips[0][0]\n closest_point_in_1 = ssegs[1][\n np.argmin(Dseg[reference_point_in_0][ssegs[1]])\n ]\n reference_point_in_1 = closest_point_in_1 # ssegs_tips[1][0]\n closest_point_in_0 = ssegs[0][\n np.argmin(Dseg[reference_point_in_1][ssegs[0]])\n ]\n ssegs_connects = [[closest_point_in_1], [closest_point_in_0]]\n return ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk", "def computeB(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz = True\n \n def computeCMBY(d0):\n \"\"\"\n For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's)\n \"\"\"\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2\n \n def computeClusterY(d0):\n \"\"\"\n For cluster, y = F^T A^T N^-1 d, where F is TSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[0][ic][freq] * g_nu[freq])\n return d2\n \n def computeClusterKSZY(d0):\n \"\"\"\n For cluster, y = K^T A^T N^-1 d, where K is KSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2\n \n def computeMonopoleY(d0):\n \"\"\"\n Overall monopole amplitude.\n \"\"\"\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2 += numpy.sum(d1 * ninvs[freq])\n return(d2)\n \n \n # CMB realisation; convolve white noise map with beam and multiply by \n # signal covmat S^1/2 in harmonic space\n b0 = numpy.random.randn(ny,nx)\n a_l = numpy.fft.fft2(b0, b0.shape)\n a_l *= precond_2d * power_2d**(-0.5)\n b0 = numpy.fft.irfft2(a_l, b0.shape)\n \n # Calculate per-band noise realisation.\n # Multiply by pixel-space N^1/2, convolve with beam, and sum over \n # cluster pixels to get RHS\n b1 = 0; b4 = 0\n b2 = numpy.zeros(nCluster)\n if ksz: b3 = numpy.zeros(nCluster)\n \n for freq in range(nFreq):\n _b = numpy.random.randn(ny,nx) * ninvs[freq]**0.5\n a_l = numpy.fft.fft2(_b) * beams[freq] * precond_2d\n b1 += numpy.fft.irfft2(a_l, _b.shape)\n b4 += numpy.sum(_b)\n for ic in range(nCluster):\n b2[ic] += numpy.sum( _b * g_nu[freq] * clumaps[0][ic][freq] )\n if ksz: b3[ic] += numpy.sum( _b * clumaps[1][ic][freq] )\n\n b0 = numpy.reshape(b0,(nx*ny))\n b1 = numpy.reshape(b1,(nx*ny))\n \n\n # Compute CMB and cluster data parts of b\n b_CMB = computeCMBY(datamaps) + b0 + b1\n b_mono = computeMonopoleY(datamaps) + b4\n b_tsz = computeClusterY(datamaps) + b2\n if ksz: b_ksz = computeClusterKSZY(datamaps) + b3\n \n # Return total b vector (Ncmbpix + 1 + (1|2)*Ncluster elements in vector)\n b = numpy.append(b_CMB, b_mono)\n b = numpy.append(b, b_tsz)\n if ksz: b = numpy.append(b, b_ksz)\n return b", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def identify_lipid_leaflets_legacy(pts,vec,monolayer_cutoff,\n\tmonolayer_cutoff_retry=True,max_count_asymmetry=0.05,pbc_rewrap=True,\n\ttopologize_tolerance=None,topologize_time_limit=30):\n\t#---previous default was somewhat high, but typically came in from specs, and we reduced it incrementally\n\tif monolayer_cutoff==None: monolayer_cutoff = 2.0\n\t#---time limit on the tolerance checker\n\ttry:\n\t\twith time_limit(topologize_time_limit): \n\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t**({'tol':topologize_tolerance} if topologize_tolerance else {}))\n\texcept TimeoutException: \n\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\twrapper = np.zeros((len(pts),3))\n\tfindframe = pts + wrapper*np.array(vec)\n\tstatus('this step is somewhat slow. it uses scipy.spatial.pdist.',tag='warning')\n\tpd = [scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(findframe[:,d:d+1])) \n\t\tfor d in range(3)]\n\tif pbc_rewrap:\n\t\tpd3pbc = np.sqrt(np.sum(np.array([pd[d]-(pd[d]>vec[d]/2.)*vec[d]+(pd[d]<-1*vec[d]/2.)*vec[d] \n\t\t\tfor d in range(3)])**2,axis=0))\n\telse: pd3pbc = pd\n\tnbors = np.transpose(np.where(pd3pbc<monolayer_cutoff))\n\tnlipids = len(pts)\n\timono = np.zeros(nlipids)\n\tnlist = []\n\tfor i in range(nlipids):\n\t\tstatus('cataloging lipids',i=i,looplen=nlipids,tag='compute')\n\t\tnlist.append(nbors[np.where(nbors[:,0]==i)[0],1])\n\tiref = 0\n\tmono = np.zeros(nlipids)\n\tsearched = np.zeros(nlipids)\n\timono[iref],searched[iref] = 1,1\n\timono[nlist[iref]] = 1\n\twhile np.any(np.all((imono==1,searched==0),axis=0)):\n\t\tfor iref in np.where(np.all((imono==1,searched==0),axis=0))[0]: \n\t\t\timono[nlist[iref]] = 1\n\t\t\tsearched[iref] = 1\n\t#---check that the leaflets were properly distinguished by looking at the number in each monolayer\n\tif np.mean(imono)==0.5: \n\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\treturn imono\n\telif (monolayer_cutoff_retry and (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry)):\n\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\tstatus('[COMPUTE] leaflets = '+str(np.sum(imono))+'/'+str(len(imono)))\n\t\tstatus('[WARNING] previous monolayer_cutoff = '+str(monolayer_cutoff))\n\t\traise Exception(\n\t\t\t'[ERROR] failed to identify leaflets so we are returning an exception to the LeafletFinder')\n\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\treturn imono", "def find_saddle(V,X,Y,Z,dim,Z0=None):\n debug=False # internal code only; typically False\n from project_parameters import scale\n if (dim==2 and Z0==None):\n return 'z0 needed for evaluation'\n if dim==3:\n if len(V.shape)!=3:\n return('Problem with find_saddle.m dimensionalities.')\n f=V/float(np.amax(V)) # Normalize field\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale) # grid spacing is automatically consistent thanks to BEM-solver\n E=np.sqrt(Ex**2+Ey**2+Ez**2) # magnitude of gradient (E field)\n m=E[1,1,1]\n origin=[1,1,1]\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n for k in range(E.shape[2]):\n if E[i,j,k]<m:\n m=E[i,j,k]\n origin=[i,j,k] \n if debug:\n print('DEBUGGING...')\n fig=plt.figure()\n e=np.reshape(E,(1,E.shape[0]*E.shape[1]*E.shape[2]))\n ind,e=np.argsort(e),np.sort(e)\n e=e[0]\n ind=ind[0] #Sort V by the same indexing.\n v=np.reshape(V,(1,V.shape[0]*V.shape[1]*V.shape[2]))\n v=v[0]\n plt.plot(e/float(np.amax(e)))\n def index_sort(v,e):\n \"\"\"Takes in two lists of the same length and returns the first sorted by the indexing of i sorted.\"\"\"\n es=np.sort(e)\n ix=np.argsort(e)\n vs=np.ones(len(v)) #Sorted by the sorting defined by f being sorted. \n # If v==e, this returns es.\n for i in range(len(v)):\n j=ix[i]\n vs[i]=v[j]\n return vs\n v=index_sort(v,e) # Is it supposed to look like this?\n plt.plot(v/float(np.amax(v)))\n plt.title('Debugging: blue is sorted gradient, green is potential sorted by gradient')\n plt.show() #f is blue and smooth, v is green and fuzzy.\n if origin[0]==(1 or V.shape[0]):\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[1]):\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[2]): \n print('find_saddle: Saddle out of bounds in z (k) direction.\\n')\n return origin\n #################################################################################################\n if dim==2: # Extrapolate to the values of A at z0.\n V2=V\n if len(V.shape)==3:\n Ks=0 # in case there is no saddle point\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n Ks=i-1\n if Z0<1:\n Ks+=1\n Vs=V.shape\n if Ks>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,Ks] \n v2=V[:,:,Ks+1]\n V2=v1+(v2-v1)*(Z0-Z[Ks])/(Z[Ks+1]-Z[Ks])\n V2s=V2.shape\n if len(V2s)!=2: # Old: What is this supposed to check? Matlab code: (size(size(A2),2) ~= 2)\n return('Problem with find_saddle.py dimensionalities. It is {}.'.format(V2s))\n f=V2/float(np.max(abs(V2)))\n [Ex,Ey]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]))\n E=np.sqrt(Ex**2+Ey**2)\n m=float(np.min(E))\n if m>1e-4: # This requires a grid with step size 0.01, not just 0.1.\n if debug:\n Is,Js=np.NaN,np.NaN\n print('Warning, there seems to be no saddle point.')\n mr=E[0,0]\n Is,Js=1,1 # in case there is no saddle\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n if E[i,j]<mr:\n mr=E[i,j]\n Is,Js=i,j\n origin=[Is,Js,Ks]\n if Is==1 or Is==V.shape[0]:\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if Js==1 or Js==V.shape[1]:\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n return origin", "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def test_solve_nestedcs_bce():\n import numpy as np\n from crpm.setup_nestedcs import setup_nestedcs\n from crpm.fwdprop import fwdprop\n from crpm.lossfunctions import loss\n from crpm.gradientdecent import gradientdecent\n\n #init numpy seed\n np.random.seed(40017)\n\n #setup model\n model, data = setup_nestedcs()\n\n #calculate initial binary cross entropy error\n pred, _ = fwdprop(data[0:2,], model)\n icost, _ = loss(\"bce\", pred, data[-1,])\n\n #train model\n pred, cost, _ = gradientdecent(model, data[0:2,], data[-1,], \"bce\")\n\n #print(model)\n #print(icost)\n #print(cost)\n assert icost > cost\n assert cost < .29", "def saddle_point(I):\n #--- FILL ME IN ---\n\n m, n = I.shape\n\n #compute the inputs to the function lstsq\n\n #get sci\n sci = I.reshape(m*n, 1)\n #get A\n A = []\n for y in range(n):\n for x in range(m):\n #print((x,y))\n #print([x*x, x*y, y*y, x, y, 1])\n A.append([x*x, x*y, y*y, x, y, 1])\n\n A = np.array(A)\n \n parms = np.linalg.lstsq(A,sci)[0]\n #print(parms)\n r1 = np.array([[2*parms[0][0], parms[1][0]], \n [parms[1][0], 2*parms[2][0]]])\n r1 = np.linalg.inv(r1)\n r2 = np.array([[parms[3][0]], \n [parms[4][0]]])\n\n pt = np.negative(np.matmul(r1, r2))\n\n #------------------\n\n return pt", "def between_vec(df, switch): \n gauss1_idx, gauss2_idx = gauss_idx_func(CC_scaled)\n nga_dict = {key:list() for key in NGAs}\n \n slopes = [] \n \n def slope(a, b):\n \"\"\" find slope given two points \"\"\"\n a1, a2 = PC_matrix[:, 0][a], PC_matrix[:, 1][a]\n b1, b2 = PC_matrix[:, 0][b], PC_matrix[:, 1][b]\n \n return b1-a1, b2-a2\n \n # compute flow vector for each nga \n for nga in NGAs:\n nga_idx = df.index[df['NGA'] == nga].tolist()\n\n gauss1 = [i for i in nga_idx if i in gauss1_idx]\n gauss2 = [j for j in nga_idx if j in gauss2_idx]\n\n # use the last point in the first cluster and the first point in the second cluster\n if switch == 1: \n \n try:\n a, b = gauss1[-1], gauss2[0]\n x, y = slope(a, b)\n slopes.append((x, y))\n\n except: # lies only in one of the two clusters \n pass \n \n # use the very first time points make a transition from the first to the second\n elif switch == 2:\n \n for idx in range(len(nga_idx)-1):\n \n if nga_idx[idx] in gauss1 and nga_idx[idx+1] in gauss2:\n \n a, b = nga_idx[idx], nga_idx[idx+1]\n x, y = slope(a, b)\n slopes.append((x, y))\n \n break \n \n # take all transitions\n elif switch == 3:\n \n for idx in range(len(nga_idx)-1):\n \n if nga_idx[idx] in gauss1 and nga_idx[idx+1] in gauss2:\n \n a, b = nga_idx[idx], nga_idx[idx+1]\n x, y = slope(a, b)\n slopes.append((x, y))\n \n return slopes", "def our_own_bvp_solve(f, a, b, n, y0, dim, bc, tol=1e-2):\n\n # interpolate the initial guess function y0 on Chebyshev points of the first kind\n cf0 = []\n for y0_i in y0:\n for thing in np.polynomial.chebyshev.Chebyshev(np.zeros(n), (a, b)).interpolate(y0_i, n, (a, b)):\n cf0.append(thing)\n\n solution = root(lambda u: fun(u, a, b, dim, n, f, bc), cf0, method='lm', tol=tol)\n if not solution.success:\n print('root finding failed')\n\n cf = solution.x\n cf = cf.reshape((dim, cf.size // dim))\n\n return [np.polynomial.chebyshev.Chebyshev(cf[i], (a, b)) for i in range(dim)]", "def g_solving_subproblem_of_LR(self,vehicle_id):\r\n global_LB=-10000\r\n global_UB=10000\r\n iteration_for_RSP=20\r\n optimal_solution_for_RSP=None\r\n optimal_value_y=0\r\n self.multiplier_v=0.5\r\n\r\n #solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 4)\r\n #obtain the variance\r\n y_=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB=0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 2)\r\n LB+=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian_mean\r\n UB=Label_cost_for_lagrangian_mean+self.reliability*(variance)**0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector\r\n optimal_value_y = y\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n\r\n\r\n # step 3: update multipliers\r\n if variance-y!= 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB-global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, optimal_value_y,global_LB,global_UB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB", "def group_centers_phase1_and_2(self) -> None:\n self.rotate_U_to_U()\n self.rotate_F_to_F()\n\n if self.centers_staged():\n return\n\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # find multiple phase1 solutions\n phase1_solutions = self.lt_LR_centers_stage.solutions_via_c(solution_count=100)\n pt_state_indexes = []\n pt_state_indexes_LR_centers_special = []\n phase2_pt_state_indexes_to_phase1_solution = {}\n logger.info(f\"found {len(phase1_solutions)} phase1 solutions\")\n\n # find the phase2 solution for each phase1 solution\n for phase1_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) in phase1_solutions:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in phase1_solution:\n self.rotate(step)\n\n # stage the LR centers\n phase2_pt_state_indexes = tuple([pt.state_index() for pt in self.lt_FB_centers_stage.prune_tables])\n pt_state_indexes.append(phase2_pt_state_indexes)\n phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution\n\n # stage the LR centers and put them into one of 495 states solveable with L L' R R'\n phase2_pt_state_indexes = tuple(\n [pt.state_index() for pt in self.lt_FB_centers_stage_LR_centers_special.prune_tables]\n )\n pt_state_indexes_LR_centers_special.append(phase2_pt_state_indexes)\n phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # stage the FB centers\n phase2_solutions = self.lt_FB_centers_stage.solutions_via_c(pt_states=pt_state_indexes, solution_count=1)\n phase2_solution = phase2_solutions[0][0]\n\n # stage the FB centers and put LR centers into one of 495 states solveable with L L' R R'\n phase2_solutions_lr_centers_special = self.lt_FB_centers_stage_LR_centers_special.solutions_via_c(\n pt_states=pt_state_indexes_LR_centers_special, solution_count=1\n )\n phase2_solution_lr_centers_special = phase2_solutions_lr_centers_special[0][0]\n\n # if we can put the LR centers into one of 495 states without adding to the move count, make it so\n if len(phase2_solution_lr_centers_special) <= len(phase2_solution):\n min_phase2_solution, (\n pt0_state,\n pt1_state,\n pt2_state,\n pt3_state,\n pt4_state,\n ) = phase2_solutions_lr_centers_special[0]\n min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state, pt2_state]\n else:\n min_phase2_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) = phase2_solutions[0]\n min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state]\n\n logger.info(\n f\"phase2 solution length {len(phase2_solution)}, phase2_lr_centers_special solution length {len(phase2_solution_lr_centers_special)}\"\n )\n\n for step in min_phase1_solution:\n self.rotate(step)\n\n self.print_cube_add_comment(\"LR centers staged\", tmp_solution_len)\n\n tmp_solution_len = len(self.solution)\n for step in min_phase2_solution:\n self.rotate(step)\n\n self.print_cube_add_comment(\"UD FB centers staged\", tmp_solution_len)", "def create_bcs(dim, H, Hmin, HZ, HminZ, XYZ, inlet_velocity,\n V_0, solutes, subdomains_file,\n enable_NS, enable_PF, enable_EC, \n mesh, boundaries_Facet, **namespace):\n mvc = df.MeshValueCollection(\"size_t\", mesh, dim-1) \n with df.XDMFFile(subdomains_file) as infile:\n infile.read(mvc, \"name_to_read\")\n facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)\n\n # Re-create boundaries with facet_domain for mesh relevance\n\n boundaries = dict(\n inlet = [facet_domains, boundaries_Facet[\"inlet\"]],\n outletL = [facet_domains, boundaries_Facet[\"outletL\"]],\n outletR = [facet_domains, boundaries_Facet[\"outletR\"]],\n wall = [facet_domains, boundaries_Facet[\"wall\"]],\n )\n\n # Alocating the boundary dicts\n bcs = dict()\n bcs_pointwise = dict()\n for boundary in boundaries:\n bcs[boundary] = dict()\n\n ## Velocity Phase Flow In (Retrieve expression)\n #\n #length inlet, water inflow, X/Y/Z, Positive/neg flow along axis\n velocity_expr = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ) \n velocity_in = Fixed(velocity_expr)\n\n # Pressure set to 0 at outlet\n pressure_out = Pressure(0.0)\n # Create NoSlip function for walls\n noslip = Fixed((0., 0., 0.)) # Unlike 2D \"NoSlip()\", need 3 dimensions\n\n ## Define boundaries\n # Note we have two outlets\n if enable_NS:\n bcs[\"inlet\"][\"u\"] = velocity_in\n bcs[\"outletL\"][\"p\"] = pressure_out\n bcs[\"outletR\"][\"p\"] = pressure_out\n bcs[\"wall\"][\"u\"] = noslip\n\n # Ensure all processes have completed (Might be redundant) \n mpi_barrier()\n return boundaries, bcs, bcs_pointwise", "def branches_library(point, dict_v, dict_e, list_v, kind_v, tuple_e):\n branches=[[point]] #unfinished branches\n lista=[] # finished branches, possible branches started in endpoint and ended in trunkpoint\n next_points = []\n while branches != []:\n for branch in branches:\n next_points = detect_next_points(branch[-1], branch[:-1], tuple_e)\n temp_list=list()\n \n if len(next_points)==0:\n branches.remove(branch)\n continue\n for pointn in next_points:\n temp_list.append(branch+[pointn])\n \n branches.remove(branch)\n\n for br in temp_list:\n if control_endpoint(br[-1],list_v, kind_v)==1:\n lista.append(br)\n else:\n branches.append(br)\n if len(lista)>10:\n return lista\n return lista", "def detect_branchings(self):\n logg.debug(\n f' detect {self.n_branchings} '\n f'branching{\"\" if self.n_branchings == 1 else \"s\"}',\n )\n # a segment is a subset of points of the data set (defined by the\n # indices of the points in the segment)\n # initialize the search for branchings with a single segment,\n # that is, get the indices of the whole data set\n indices_all = np.arange(self._adata.shape[0], dtype=int)\n # let's keep a list of segments, the first segment to add is the\n # whole data set\n segs = [indices_all]\n # a segment can as well be defined by the two points that have maximal\n # distance in the segment, the \"tips\" of the segment\n #\n # the rest of the points in the segment is then defined by demanding\n # them to \"be close to the line segment that connects the tips\", that\n # is, for such a point, the normalized added distance to both tips is\n # smaller than one:\n # (D[tips[0],i] + D[tips[1],i])/D[tips[0],tips[1] < 1\n # of course, this condition is fulfilled by the full cylindrical\n # subspace surrounding that line segment, where the radius of the\n # cylinder can be infinite\n #\n # if D denotes a euclidian distance matrix, a line segment is a linear\n # object, and the name \"line\" is justified. if we take the\n # diffusion-based distance matrix Dchosen, which approximates geodesic\n # distance, with \"line\", we mean the shortest path between two points,\n # which can be highly non-linear in the original space\n #\n # let us define the tips of the whole data set\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(\n np.unravel_index(\n np.argmax(self.distances_dpt), self.distances_dpt.shape\n )\n )\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.distances_dpt[self.iroot])\n else:\n tip_0 = np.argmax(self.distances_dpt[0])\n tips_all = np.array([tip_0, np.argmax(self.distances_dpt[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n segs_connects = [[]]\n segs_undecided = [True]\n segs_adjacency = [[]]\n logg.debug(\n ' do not consider groups with less than '\n f'{self.min_group_size} points for splitting'\n )\n for ibranch in range(self.n_branchings):\n iseg, tips3 = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.debug(' partitioning converged')\n break\n logg.debug(\n f' branching {ibranch + 1}: split group {iseg}',\n ) # [third start end]\n # detect branching and update segs and segs_tips\n self.detect_branching(\n segs,\n segs_tips,\n segs_connects,\n segs_undecided,\n segs_adjacency,\n iseg,\n tips3,\n )\n # store as class members\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_undecided = segs_undecided\n # the following is a bit too much, but this allows easy storage\n self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float)\n self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int)\n for i, seg_adjacency in enumerate(segs_adjacency):\n self.segs_connects[i, seg_adjacency] = segs_connects[i]\n for i in range(len(segs)):\n for j in range(len(segs)):\n self.segs_adjacency[i, j] = self.distances_dpt[\n self.segs_connects[i, j], self.segs_connects[j, i]\n ]\n self.segs_adjacency = self.segs_adjacency.tocsr()\n self.segs_connects = self.segs_connects.tocsr()", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def solve_steady_state(self):\n # optimization has to be done on the reduced system\n # TODO: implement different comp. sizes\n s0 = self.model.get_initial_conc()\n [L_inv, L, _] = self.model.N_partitioned\n si = numpy.dot(L_inv, s0)\n t = s0 - numpy.dot(L, si)\n f = lambda x: numpy.linalg.norm(\n self.dS_dt(numpy.dot(L, x) + t, 1))\n ss_i = scipy.optimize.fmin_bfgs(f, si)\n ss = numpy.dot(L, ss_i) + t\n return ss", "def shapley(self, subgraph_nodes):\n num_nodes = self.graph.num_nodes()\n subgraph_nodes = subgraph_nodes.tolist()\n\n # Obtain neighboring nodes of the subgraph g_i, P'.\n local_region = subgraph_nodes\n for _ in range(self.num_hops - 1):\n in_neighbors, _ = self.graph.in_edges(local_region)\n _, out_neighbors = self.graph.out_edges(local_region)\n neighbors = torch.cat([in_neighbors, out_neighbors]).tolist()\n local_region = list(set(local_region + neighbors))\n\n split_point = num_nodes\n coalition_space = list(set(local_region) - set(subgraph_nodes)) + [\n split_point\n ]\n\n marginal_contributions = []\n device = self.feat.device\n for _ in range(self.shapley_steps):\n permuted_space = np.random.permutation(coalition_space)\n split_idx = int(np.where(permuted_space == split_point)[0])\n\n selected_nodes = permuted_space[:split_idx]\n\n # Mask for coalition set S_i\n exclude_mask = torch.ones(num_nodes)\n exclude_mask[local_region] = 0.0\n exclude_mask[selected_nodes] = 1.0\n\n # Mask for set S_i and g_i\n include_mask = exclude_mask.clone()\n include_mask[subgraph_nodes] = 1.0\n\n exclude_feat = self.feat * exclude_mask.unsqueeze(1).to(device)\n include_feat = self.feat * include_mask.unsqueeze(1).to(device)\n\n with torch.no_grad():\n exclude_probs = self.model(\n self.graph, exclude_feat, **self.kwargs\n ).softmax(dim=-1)\n exclude_value = exclude_probs[:, self.target_class]\n include_probs = self.model(\n self.graph, include_feat, **self.kwargs\n ).softmax(dim=-1)\n include_value = include_probs[:, self.target_class]\n marginal_contributions.append(include_value - exclude_value)\n\n return torch.cat(marginal_contributions).mean().item()", "def residual_bottleneck_unit(n,bottom, nout, s, newdepth = False, use_global_stats=False):\n \n# bottom = n.__dict__['tops'][n.__dict__['tops'].keys()[-1]]\n \n ns=dict()\n stride = newdepth if newdepth else 1\n\n ns[s + '_branch2conv1'], ns[s + '_branch2bn1'], ns[s + '_branch2scale1'] = conv_bn_scale(bottom, ks = 1, \n stride = stride, nout = nout, pad = 0,\n use_global_stats=use_global_stats)\n ns[s + '_branch2relu1'] = L.ReLU(ns[s + '_branch2scale1'], in_place=True)\n ns[s + '_branch2conv2'], ns[s + '_branch2bn2'], ns[s + '_branch2scale2'] = conv_bn_scale(ns[s + '_branch2relu1'], ks = 3,\n stride = 1, nout = nout, pad = 1,\n use_global_stats=use_global_stats)\n ns[s + '_branch2relu2'] = L.ReLU(ns[s + '_branch2scale2'], in_place=True)\n ns[s + '_branch2conv3'], ns[s + '_branch2bn3'], ns[s + '_branch2scale3'] = conv_bn_scale(ns[s + '_branch2relu2'], ks = 1,\n stride = 1, nout = nout*4, pad = 0,\n use_global_stats=use_global_stats)\n \n if newdepth:\n ns[s + '_branch1conv'], ns[s + '_branch1bn1'], ns[s + '_branch1scale1'] = conv_bn_scale(bottom, ks = 1, \n stride = stride, nout = nout*4, pad = 0,\n use_global_stats=use_global_stats)\n ns[s] = L.Eltwise(ns[s + '_branch1scale1'],ns[s + '_branch2scale3'])\n else:\n ns[s] = L.Eltwise(bottom, ns[s + '_branch2scale3'])\n\n ns[s + '_relu'] = L.ReLU(ns[s], in_place=True)\n \n dict2net(n,ns)\n return ns[s + '_relu']", "def refine_Hessian(self, kpx, kpy, kps):\n curr = self.dogs[(kps, kpy, kpx)]\n nx = self.dogs[(kps, kpy, kpx + 1)]\n px = self.dogs[(kps, kpy, kpx - 1)]\n ny = self.dogs[(kps, kpy + 1, kpx)]\n py = self.dogs[(kps, kpy - 1, kpx)]\n ns = self.dogs[(kps + 1, kpy, kpx)]\n ps = self.dogs[(kps - 1, kpy, kpx)]\n\n nxny = self.dogs[(kps, kpy + 1, kpx + 1)]\n nxpy = self.dogs[(kps, kpy - 1, kpx + 1)]\n pxny = self.dogs[(kps, kpy + 1, kpx - 1)]\n pxpy = self.dogs[(kps, kpy - 1, kpx - 1)]\n\n nsny = self.dogs[(kps + 1, kpy + 1, kpx)]\n nspy = self.dogs[(kps + 1, kpy - 1, kpx)]\n psny = self.dogs[(kps - 1, kpy + 1, kpx)]\n pspy = self.dogs[(kps - 1, kpy - 1, kpx)]\n\n nxns = self.dogs[(kps + 1, kpy, kpx + 1)]\n nxps = self.dogs[(kps - 1, kpy, kpx + 1)]\n pxns = self.dogs[(kps + 1, kpy, kpx - 1)]\n pxps = self.dogs[(kps - 1, kpy, kpx - 1)]\n\n dx = (nx - px) / 2.0\n dy = (ny - py) / 2.0\n ds = (ns - ps) / 2.0\n dxx = (nx - 2.0 * curr + px)\n dyy = (ny - 2.0 * curr + py)\n dss = (ns - 2.0 * curr + ps)\n dxy = (nxny - nxpy - pxny + pxpy) / 4.0\n dxs = (nxns - nxps - pxns + pxps) / 4.0\n dsy = (nsny - nspy - psny + pspy) / 4.0\n det = -(dxs * dyy * dxs) + dsy * dxy * dxs + dxs * dsy * dxy - dss * dxy * dxy - dsy * dsy * dxx + dss * dyy * dxx\n K00 = dyy * dxx - dxy * dxy\n K01 = dxs * dxy - dsy * dxx\n K02 = dsy * dxy - dxs * dyy\n K10 = dxy * dxs - dsy * dxx\n K11 = dss * dxx - dxs * dxs\n K12 = dxs * dsy - dss * dxy\n K20 = dsy * dxy - dyy * dxs\n K21 = dsy * dxs - dss * dxy\n K22 = dss * dyy - dsy * dsy\n\n delta_s = -(ds * K00 + dy * K01 + dx * K02) / det\n delta_y = -(ds * K10 + dy * K11 + dx * K12) / det\n delta_x = -(ds * K20 + dy * K21 + dx * K22) / det\n peakval = curr + 0.5 * (delta_s * ds + delta_y * dy + delta_x * dx)\n mask = numpy.logical_and(numpy.logical_and(abs(delta_x) < self.tresh, abs(delta_y) < self.tresh), abs(delta_s) < self.tresh)\n return kpx + delta_x, kpy + delta_y, kps + delta_s, peakval, mask", "def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##", "def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))", "def solve():\n\n s, g, e = make_lattice(21)\n stack = deque([[e]])\n vals = {s: 1}\n max_n = 0\n\n while stack:\n max_n = max(max_n, len(stack))\n n, *p = stack.pop()\n for c in g.get_connected(n):\n if c > n:\n continue\n if c in vals:\n propagate(c, [n] + p, vals)\n else:\n stack.append([c, n] + p)\n return vals[e]", "def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts", "def _updateCollisions(self):\n if self._paddle.collision(self._ball) and self._ball.getVY() <= 0:\n self._points -= PADDLE_POINTS\n self._ball.bouncePaddle(self._paddle)\n # increment _ball._bounces\n self._ball.setBounces(self._ball.getBounces()+1)\n # change speed\n if (self._ball.getBounces() >= BOUNCES_FOR_INCREASE and\n self._ball.findSpeed() <= SPEED_MAX):\n self._ball.incrementSpeed()\n self._paddle.incrementSpeed()\n self._ball.setBounces(0)\n \n stop = False\n for r in range(len(self._bricks)):\n if not stop:\n if self._bricks[r].collision(self._ball):\n self._ball.bounceTopBottom()\n self.addPointsBrick(self._bricks[r])\n if self._determine_new_fp():\n self.createFP(self._bricks[r])\n del self._bricks[r]\n stop = True\n \n for j in self._FP_list:\n if j.collision(self._paddle):\n self._points_FP = j.getValue()", "def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")", "def scatterSearch(graph, alpha, b):\n\n\t#Generate P\n\tP = []\n\trefSet = []\n\trefSet_pos = []\n\tc = 0\n\tpowb2 = pow(b,2)\n\twhile c < powb2:\n\t\tx = diversification(graph, alpha)\n\t\tS,Sp,cut_V = improvement(graph, x)\n\t\tif (not (S,Sp,cut_V) in P) and (not (Sp,S,cut_V) in P):\n\t\t\tP.append((S,Sp,cut_V))\n\t\t\tc = c + 1\n\n\t\t\t#Build RefSet\n\t\t\tif len(refSet) < b/2:\n\t\t\t\trefSet.append((S,Sp,cut_V))\n\t\t\t\trefSet_pos.append(c)\n\t\t\t\tif len(refSet) == b/2:\n\t\t\t\t\t#Get new min\n\t\t\t\t\tminrefSet, minrefSet_pos = getMinRS(refSet, b/2)\n\n\t\t\telif cut_V > minrefSet:\n\t\t\t\trefSet[minrefSet_pos] = (S,Sp,cut_V)\n\t\t\t\trefSet_pos[minrefSet_pos] = c\n\n\t\t\t\t#Get new min\n\t\t\t\tminrefSet, minrefSet_pos = getMinRS(refSet, b/2)\n\n\t#Build Diverse RefSet\n\tpos = randint(0,(c-1))\n\tfor i in range(0,(b/2)):\n\t\twhile (pos in refSet_pos):\n\t\t\t pos =randint(0,(c-1))\n\n\t\trefSet.append(P[pos])\n\t\trefSet_pos.append(pos)\n\n\t#Get new min\n\tminrefSet, minrefSet_pos = getMinRS(refSet, b)\t\t\n\n\n\tnewSolutions = True\n\twhile newSolutions:\n\t\tnewSolutions = False\n\t\t#Generate subset\n\t\tnewSubSet = generateSubSet(refSet,b)\n\t\tfor subSet in newSubSet:\n\t\t\ttrial_sol = combination(graph, subSet)\n\t\t\t#Improvement\n\t\t\tfor sol in trial_sol:\n\t\t\t\tI,Ip,cut_I = improvement(graph, sol)\n\n\t\t\t\t#Update RefSet\n\t\t\t\tif cut_I > minrefSet:\n\t\t\t\t\tif (not (I,Ip,cut_I) in refSet) and (not (Ip,I,cut_I) in refSet):\n\t\t\t\t\t\trefSet[minrefSet_pos] = (I,Ip,cut_I)\n\t\t\t\t\t\tnewSolutions = True\n\t\t\t\t\t\tminrefSet, minrefSet_pos = getMinRS(refSet, b)\n\n\treturn getMax(refSet,b)", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def _detect_branching_single_haghverdi16(self, Dseg, tips):\n # compute branchings using different starting points the first index of\n # tips is the starting point for the other two, the order does not\n # matter\n ssegs = []\n # permutations of tip cells\n ps = [\n [0, 1, 2], # start by computing distances from the first tip\n [1, 2, 0], # -\"- second tip\n [2, 0, 1], # -\"- third tip\n ]\n for i, p in enumerate(ps):\n ssegs.append(self.__detect_branching_haghverdi16(Dseg, tips[p]))\n return ssegs", "def get_bounding_ball(S, epsilon=1e-7, rng=numpy.random.default_rng()):\n\n # Iterative implementation of Welzl's algorithm, see\n # \"Smallest enclosing disks (balls and ellipsoids)\" Emo Welzl 1991\n\n def circle_contains(D, p):\n c, r2 = D\n return numpy.square(p - c).sum() <= r2\n\n def get_boundary(R):\n if len(R) == 0:\n return numpy.zeros(S.shape[1]), 0.0\n\n if len(R) <= S.shape[1] + 1:\n return get_circumsphere(S[R])\n\n c, r2 = get_circumsphere(S[R[: S.shape[1] + 1]])\n if numpy.all(\n numpy.fabs(numpy.square(S[R] - c).sum(axis=1) - r2) < epsilon\n ):\n return c, r2\n\n class Node(object):\n def __init__(self, P, R):\n self.P = P\n self.R = R\n self.D = None\n self.pivot = None\n self.left = None\n self.right = None\n\n def traverse(node):\n stack = [node]\n while len(stack) > 0:\n node = stack.pop()\n\n if len(node.P) == 0 or len(node.R) >= S.shape[1] + 1:\n node.D = get_boundary(node.R)\n elif node.left is None:\n pivot_index = rng.integers(len(node.P))\n node.pivot = node.P[pivot_index]\n node.left = Node(\n node.P[:pivot_index] + node.P[pivot_index + 1:],\n node.R\n )\n stack.extend((node, node.left))\n elif node.right is None:\n if circle_contains(node.left.D, S[node.pivot]):\n node.D = node.left.D\n else:\n node.right = Node(node.left.P, node.R + [node.pivot])\n stack.extend((node, node.right))\n else:\n node.D = node.right.D\n node.left, node.right = None, None\n\n S = S.astype(float, copy=False)\n root = Node(list(range(S.shape[0])), [])\n traverse(root)\n return root.D", "def V_checkers_global(s_grid, s_n, g_n, s_others, f1=2, k1=[3,5],\n n_h1_1=128, n_h1_2=32, n_h2=32, stage=1, bias=True):\n conv = convnet_1(s_grid, f1=f1, k1=k1, s1=[1,1], scope='conv')\n concated = tf.concat( [conv, s_n, g_n], axis=1 )\n branch1 = tf.layers.dense(inputs=concated, units=n_h1_1, activation=tf.nn.relu, use_bias=bias, name='V_branch1')\n W_branch1_h2 = get_variable(\"W_branch1_h2\", [n_h1_1, n_h2])\n\n list_mult = []\n list_mult.append( tf.matmul(branch1, W_branch1_h2) )\n\n if stage > 1:\n with tf.variable_scope(\"stage-2\"):\n others = tf.layers.dense(inputs=s_others, units=n_h1_2, activation=tf.nn.relu, use_bias=bias, name='V_branch2')\n W_branch2_h2 = get_variable('W_branch2_h2', [n_h1_2, n_h2])\n list_mult.append(tf.matmul(others, W_branch2_h2))\n\n h2 = tf.nn.relu(tf.add_n(list_mult, name='V_h2'))\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=bias, name='V_out')\n \n return out", "def bridge_problem3(here):\r\n\r\n def all_over(state):\r\n here, _ = state\r\n return not here or here == set([\"light\"])\r\n\r\n start = (frozenset(here) | frozenset([\"light\"]), frozenset())\r\n return lowest_cost_search(start, bsuccessors2, all_over, bcost)", "def step1(ball_list, step,borders,obstacle=None):\n\n index_list = range(len(ball_list))\n for i in index_list:\n ball_list[i].compute_refl(step,borders,obstacle)\n for j in index_list:\n if i!=j:\n ball_list[i].compute_coll(ball_list[j],step)\n return ball_list", "def get_bc_parts(mesh, lst):\n if len(lst) > 0:\n shift = max(0, -min(e.value for e in lst))\n else:\n return [], [], 0, FacetFunction(\"size_t\", mesh, 0)\n # values must be shifted by smallest Steklov value since size_t is unsigned\n fun = FacetFunction(\"size_t\", mesh, shift)\n for bc in lst:\n sub = OnBoundary()\n # overwrite inside function with the one from bc\n sub.inside = bc.getTest()\n sub.mark(fun, bc.value + shift)\n # some conditions may cancel eachother\n exist = set(np.unique(fun.array()))\n lst = [e for e in lst if e.value+shift in exist]\n # separate Robin and Steklov, Dirichlet and Neumann are irrelevant\n Robin = [e for e in lst if e.value > 1 and e.parValue != 0]\n Steklov = [e for e in lst if e.value < 0 and e.parValue != 0]\n return Robin, Steklov, shift, fun", "def ball_pair_collision_update(self, b1, b2):\r\n n_vec = tu.Vec2D(b2.x-b1.x,b2.y-b1.y)\r\n n_vec_mag = abs(n_vec)\r\n if n_vec_mag == 0:\r\n return # too close\r\n\r\n sep_dist = n_vec_mag - b1.r - b2.r\r\n if sep_dist < -b1.r*.001:\r\n if BallCollision2D.track_collision:\r\n print(f\"\\nsep_dist:{sep_dist:.3f} {sep_dist/b1.r:.3f}*r\"\r\n f\" {b1.color}:{b2.color}\")\r\n print(f\" n_vec:{n_vec}\")\r\n b1_vec = tu.Vec2D(b1.x, b1.y)\r\n b2_vec = tu.Vec2D(b2.x, b2.y)\r\n un_vec = n_vec * (1 / n_vec_mag)\r\n if BallCollision2D.track_collision:\r\n print(f\" un_vec:{un_vec}\")\r\n sep_vec = (sep_dist/2)*un_vec # Split the diff\r\n if BallCollision2D.track_collision:\r\n print(f\" sep_vec:{sep_vec}\")\r\n b1_backoff_vec = b1_vec + sep_vec\r\n b2_backoff_vec = b2_vec - sep_vec\r\n b1.x, b1.y = b1_backoff_vec\r\n b2.x, b2.y = b2_backoff_vec\r\n new_dist = sqrt((b2.x-b1.x)**2 + (b2.y-b1.y)**2)\r\n new_sep = new_dist - b1.r - b2.r\r\n if BallCollision2D.track_collision:\r\n print(f\" new_sep:{new_sep/b1.r:.3f}*r: {b1.color}:{b2.color}\")\r\n\r\n # Setup based on adjusted positioning\r\n n_vec = tu.Vec2D(b2.x - b1.x, b2.y - b1.y)\r\n n_vec_mag = abs(n_vec)\r\n if n_vec_mag == 0:\r\n return # too close\r\n\r\n un_vec = n_vec*(1/n_vec_mag)\r\n ut_vec = un_vec.rotate(-90)\r\n v1_vec = tu.Vec2D(b1.vx, b1.vy)\r\n v1n = un_vec*v1_vec\r\n v1t = ut_vec*v1_vec\r\n \r\n v2_vec = tu.Vec2D(b2.vx, b2.vy)\r\n v2n = un_vec*v2_vec\r\n v2t = ut_vec*v2_vec\r\n\r\n vp1t = v1t # No change to tangential velocity\r\n vp2t = v2t\r\n\r\n # given m1 == m2\r\n # vp1n = v2n, vp2n = v1n\r\n # Exchanging normal velocities\r\n # If too overlapping don't change normal vel\r\n\r\n if sep_dist < -(b1.r+b2.r)*.2:\r\n vp1n = v1n\r\n vp2n = v2n\r\n else:\r\n vp1n = v2n\r\n vp2n = v1n\r\n\r\n vp1n_vec = vp1n*un_vec\r\n vp1t_vec = vp1t*ut_vec\r\n \r\n vp2n_vec = vp2n*un_vec\r\n vp2t_vec = vp2t*ut_vec\r\n\r\n vp1_vec = vp1n_vec + vp1t_vec\r\n vp2_vec = vp2n_vec + vp2t_vec\r\n\r\n # Update each ball's vx,vy\r\n b1.vx, b1.vy = vp1_vec\r\n b2.vx, b2.vy = vp2_vec", "def generate_velocity_all_balls():\n for ball in balls:\n generate_velocity(balls, balls.index(ball))\n for super_ball in super_balls:\n generate_velocity(super_balls, super_balls.index(super_ball))", "def computePairBetweenness(self,s0,s1=None):\r\n if s1==None:s1=s0[1];s0=s0[0]; #if given only one argument it should be a two tuple\r\n \r\n #calculate PB\r\n #Note that PB(u,v)=PB(v,u)\r\n #Every path is counted only once for s0!=s1 because :\r\n #delta(v,s0,s1)>0 => delta(v,s1,s0)=0\r\n #delta(v,s1,s0)>0 => delta(v,s0,s1)=0\r\n result = 0.0\r\n for u in range(self._n):\r\n #calculate Delta(u,{s0,s1},*)\r\n dd1 = self._deltaDot[u,s1] * self.getDelta(u,s0,s1)\r\n #paths from s0 and s1 already counted in deltaDot matrix\r\n result += dd1\r\n #when s0=s1 every path is counted twice (in both directions)\r\n #if s0==s1: self._PB[s0][s1] /= 2\r\n return result", "def find_basin(self, s):\n \n assert s.size==self.n\n atMin = False\n thisState = s.astype(np.int8)\n\n while not atMin: \n dE = self.neighbor_dE(thisState)\n if np.any( dE<0 ):\n ix = dE.argmin()\n thisState[ix] *= -1\n else:\n atMin = True\n return thisState", "def phase_seperation_detection(g_x_func, s, p, P, T, n=100, LLE_only=False,\n VLE_only=False): \n # TODO: Update this documentation\n \n # Generate sampling points.\n import numpy\n from UQToolbox.sobol_lib import i4_sobol_generate\n from tgo import tgo\n m = p.m['n'] - 1\n skip = 4\n Points = i4_sobol_generate(m, n, skip)\n Points = numpy.column_stack([Points[i] for i in range(m)])\n Points = Points[numpy.sum(Points, axis=1) <= 1.0]\n S = numpy.empty(n, dtype=bool)\n\n # Update P, T to specified value\n print Points[0]\n s = s.update_state(s, p, P=P, T=T, X = Points[0], Force_Update=True)\n \n def subset_eqp(Points, X_I, X_II):\n # Retunrs a subset of \"Points\" outside EQP\n import numpy\n for i in range(p.m['n']-1):\n P_new_low = Points[Points[:,i] < \n min(X_I[i], X_II[i])]\n \n P_new_high = Points[Points[:,i] > \n max(X_I[i], X_II[i])] \n \n return numpy.append(P_new_low, P_new_high, axis=0)\n \n \n # Detect instability in a same volume root phase:\n if not VLE_only:\n # define LLE instability func\n def instability_point_calc(Points, g_x_func, s, p, n, k, P=P, T=T):\n # Find an instability point, calculated equilibrium and return\n # new feasible subset.\n Stop = False # Boolean to run main while loop\n for i, X in zip(range(n), Points):\n # Test for instability at current equilibrium point.\n S[i] = stability(X, g_x_func, s, p, k=ph)\n if not S[i]: # If point is unstable find equilibrium point.\n s = phase_equilibrium_calculation(s, p, g_x_func, X, k=k,\n P=P, T=T, \n tol=1e-9, \n Print_Results=False, \n Plot_Results=False) \n \n s.m['ph equil P'] = [s.m['X_I'], s.m['X_II']]\n # TODO: Improve finding feasible subspace of points.\n P_new = Points[(i+1):]\n\n P_new = subset_eqp(P_new, s.m['X_I'], s.m['X_II'])\n \n # Stop if no values in subset\n if numpy.shape(P_new)[0] == 0: \n Stop = True\n \n return P_new, s.m['ph equil P'], Stop\n \n # If no instability was found, stop the main for loop and set eq.\n # point to None.\n s.m['ph equil P'] = None\n Stop = True\n return Points, s.m['ph equil P'], Stop\n \n # Main looping\n s.m['ph equil'] = {} # Range of equilibrium points.\n for ph in p.m['Valid phases']:\n Stop = False\n s.m['ph equil'][ph] = []\n while not Stop:\n Points, s.m['ph equil P'], Stop = instability_point_calc(\n Points, \n g_x_func, \n s, p, n, ph)\n \n # Save an equilibrium point to the range of points in the\n # current phase if found.\n if s.m['ph equil P'] is not None: \n s.m['ph equil'][ph].append(s.m['ph equil P'])\n \n \n # Detect phase seperation accross volume root phases:\n if not LLE_only:\n # Define difference function\n def g_diff(X, g_x_func, s, p, ph1, ph2, ref):\n # Returns difference between Gibbs energy of phases 'ph1' & 'ph2'\n # Note, all phases must be at same composition for meaningful \n # comparison\n s = s.update_state(s, p, X = X, Force_Update=True)\n return (g_x_func(s, p, k=ph1, ref=ref).m['g_mix'][ph1]\n - g_x_func(s, p, k=ph2, ref=ref).m['g_mix'][ph2])\n \n \n # Define objective function for feed search\n def g_diff_obj(X, g_x_func, s, p, ph1, ph2, ref):\n # Returns difference between Gibbs energy of phases 'ph1' & 'ph2'\n # Note, all phases must be at same composition for meaningful \n # comparison\n s = s.update_state(s, p, X = X, Force_Update=True)\n return abs(g_x_func(s, p, k=ph1, ref=ref).m['g_mix'][ph1]\n - g_x_func(s, p, k=ph2, ref=ref).m['g_mix'][ph2])\n \n # Calculated difference of Gibbs energies between all phases at all\n # sampling points.\n s.m['mph equil'] = []\n s.m['mph phase'] = []\n for i in range(len(p.m['Valid phases'])):\n for j in range(i + 1, len(p.m['Valid phases'])):\n ph1 = p.m['Valid phases'][i]\n ph2 = p.m['Valid phases'][j]\n print ph1\n print ph2\n Fd = numpy.empty(n)\n for l, X in zip(range(n), Points):\n Fd[l] = g_diff(X, g_x_func, s, p, ph1, ph2, ph1)\n \n # Look for sign cross phase seperation\n if not numpy.all(Fd > 0) or numpy.all(Fd < 0):\n # (if all values are not greater than or less than zero)\n Bounds = [(1e-6, 0.99999)]\n Args=(g_x_func, s, p, ph1, ph2, ph1)\n Z_0 = tgo(g_diff_obj, Bounds, args=Args, n=1000, k_t = 5)\n print Z_0\n \n s = phase_equilibrium_calculation(s, p, g_x_func, Z_0,\n P=P, T=T, \n tol=1e-2, \n Print_Results=True, \n Plot_Results=True) \n \n s.m['mph equil P'] = [s.m['X_I'], s.m['X_II']]\n s.m['mph equil'].append(s.m['mph equil P'])\n s.m['mph phase'].append([s.m['Phase eq. I'], \n s.m['Phase eq. II']])\n return s", "def method_2d(knots,y_n,num):\n cv_iter = 10 # number of iteration for cross-validation \n GSV = np.zeros((cv_iter,cv_iter))\n# tr = np.zeros((cv_iter,cv_iter))\n# fun =np.zeros((cv_iter,cv_iter))\n lam_x = np.linspace(0,0.2,cv_iter)\n lam_y = np.linspace(0,0.2,cv_iter)\n num_knots = len(knots)\n linear_knots = knots[1:num_knots-1]\n num_knots = num_knots-4\n znam = np.zeros((num_knots))\n basis = np.zeros((num,num_knots))\n basis_1 = np.zeros((num,num_knots))\n basis_deriative = np.zeros((num,num_knots))\n basis_deriative_1 = np.zeros((num,num_knots))\n S = np.zeros((num_knots,num_knots,num))\n vs = BsplineVectorSpace(2, knots)\n vs_1 = BsplineVectorSpace(1, linear_knots)\n I_i = np.eye(num_knots)\n for i in xrange(0,num_knots):\n basis[:,i] = vs.basis_der(i,0)(np.linspace(0,1,num))\n basis_deriative[:,i] = vs.basis_der(i,1)(np.linspace(0,1,num))/num\n basis_1[:,i] = vs_1.basis_der(i,0)(np.linspace(0,1,num))\n basis_deriative_1[:,i] = vs_1.basis_der(i,1)(np.linspace(0,1,num))/num\n B = abs(basis_deriative-basis_1)\n S = np.zeros((num_knots,num_knots,num))\n k = np.zeros((num_knots,num_knots,num))\n for i in xrange(num_knots):\n for j in xrange(num_knots):\n S[i,j,:] = B[:,i]*B[:,j]\n k[i,j,:] =basis_deriative_1[:,i] * basis_deriative_1[:,j]\n S_int = np.zeros((num_knots,num_knots))\n k_int = np.zeros((num_knots,num_knots))\n for i in xrange(num_knots):\n for j in xrange(num_knots):\n S_int[i,j] = integrate.trapz(S[i,j,:])\n k_int[i,j] = integrate.trapz(k[i,j,:])\n basis_product = np.kron(basis,basis)\n S_x = np.kron(S_int,I_i)\n S_y = np.kron(I_i,S_int)\n K_x = np.kron(k_int,I_i)\n K_y = np.kron(I_i,k_int)\n for i in xrange(cv_iter):\n for j in xrange(cv_iter):\n influence_matrix = np.dot(np.dot(basis_product,(np.linalg.inv(np.dot(np.transpose(\n basis_product),basis_product)+lam_x[i]*S_x+lam_y[j]*S_y+lam_x[i]* K_x+lam_y[j]*K_y))),np.transpose(basis_product))\n for k in xrange(num_knots):\n znam[k] =(1-influence_matrix[k,k])**2\n tr = np.sum(znam)\n fun = np.sum((y_n-np.dot(influence_matrix,y_n))**2)\n GSV[i,j] =fun/(num*tr)\n print i,j\n a,b = np.unravel_index(GSV.argmin(), GSV.shape)\n# a = np.argmin(np.argmin(GSV,axis = 0))\n# b = np.argmin(np.argmin(GSV,axis = 1))\n lamb_x = lam_x[a]\n lamb_y = lam_y[b]\n print lamb_x,lamb_y\n model_fit = np.dot(np.dot(np.dot(basis_product,(np.linalg.inv(np.dot(np.transpose(\n basis_product),basis_product)+lamb_x*S_x+lamb_y*S_y+lamb_x* K_x+lamb_y*K_y))),np.transpose(basis_product)),y_n)\n return model_fit,GSV", "def SolveSCP(self):\n\n t0 = time()\n\n # Some predicates\n Lu_min = 0.\n niters_max = self._maxiters\n maxfracchange = self._maxfracchange\n\n # initialization, resetting ...\n self.reset_all() # including _u_naught(), first application\n scp_min = self.greedy()\n\n # re-initialization iteration; col fixing ignored for the moment\n niters = 0\n f_change = _largenumber\n while (f_change>maxfracchange) and (niters<niters_max):\n # re-initialize u\n if (np.mod(niters, 2)==0): \n self.reset_u(random=True)\n else:\n self.reset_u()\n u_tmp, Lu_tmp = self.subgradient() # find a near-optimal solution \n u, Lu = self.subgradient() # rerun subgradient to get a set of Lagrangian multipliers\n\n scp_all = np.zeros(self._subg_nsteps)\n for i in np.arange(self._subg_nsteps):\n #self.reset_s()\n self.s = np.copy(self.f)\n scp_all[i] = self.greedy(u=u[:,i])\n\n # check if the solution is gettting better\n imin_tmp = (np.where(scp_all==np.amin(scp_all)))[0]\n imin = imin_tmp[np.argmax(Lu[imin_tmp])]\n imax = np.argmax(Lu)\n if (np.mod(niters, 5)==0):\n print(\"This Best solution: UB={0}, LB={1}, UB1={2}, LB1={3}\".format(scp_all[imin], Lu[imin], scp_all[imax], Lu[imax]))\n if (niters==0) or ((scp_all[imin]<=scp_min) and ((Lu[imin]-Lu_min)>-(np.fabs(Lu_min)*self._LB_maxfracchange))):\n scp_min = scp_all[imin]\n u_min = np.copy(u[:,imin])\n Lu_min = Lu[imin]\n self.stepsize = _stepsize\n\n LB = Lu_min\n\n # final step, needs to get u_min back\n self.u = np.copy(u_min)\n self.s = np.copy(self.f)\n UB = self.greedy()\n\n # Which is better? absolute change or fractional change? \n # Both are fine, but cost should be normalized over the mean/median.\n GAP = (UB-LB)/np.fabs(UB)\n f_change = GAP\n if (np.mod(niters, 5)==0):\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n niters = niters + 1\n if (niters == niters_max): \n #warnings.warn(\"Iteration reaches maximum = {0}\".format(niters))\n print(\"Iteration in re-initialization reaches maximum number = {0}\".format(niters))\n\n # Need to remove redundant columns\n # self.remove_redundant() # this itself is NP-hard ...\n\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n print(\"Final Best solution: {0}\".format(UB))\n time_used = (time()-t0)/60.\n print(\"Took {0:.3f} minutes to reach current solution.\".format(time_used))\n\n return (UB,time_used)", "def nearest_neighbors_isolation(all_pcs, all_labels, this_unit_id, max_spikes_for_nn, n_neighbors, seed):\n \n rng = np.random.default_rng(seed=seed)\n \n all_units_ids = np.unique(all_labels)\n other_units_ids = np.setdiff1d(all_units_ids, this_unit_id)\n\n isolation = np.zeros(len(other_units_ids),)\n # compute isolation with each cluster\n for other_unit_id in other_units_ids:\n n_spikes_target_unit = np.sum(all_labels==this_unit_id)\n pcs_target_unit = all_pcs[all_labels==this_unit_id, :]\n\n n_spikes_other_unit = np.sum(all_labels==other_unit_id)\n pcs_other_unit = all_pcs[all_labels==other_unit_id]\n\n spikes_for_nn_actual = np.min([n_spikes_target_unit, n_spikes_other_unit, max_spikes_for_nn])\n\n if spikes_for_nn_actual < n_spikes_target_unit:\n pcs_target_unit_idx = rng.choice(np.arange(n_spikes_target_unit), size=spikes_for_nn_actual)\n pcs_target_unit = pcs_target_unit[pcs_target_unit_idx]\n\n if spikes_for_nn_actual < n_spikes_other_unit:\n pcs_other_unit_idx = rng.choice(np.arange(n_spikes_other_unit), size=spikes_for_nn_actual)\n pcs_other_unit = pcs_other_unit[pcs_other_unit_idx]\n\n pcs_concat = np.concatenate((pcs_target_unit, pcs_other_unit), axis=0)\n label_concat = np.concatenate((np.zeros(spikes_for_nn_actual),np.ones(spikes_for_nn_actual)))\n \n # if n_neighbors is greater than the number of spikes in both clusters, then set it to max possible\n if n_neighbors > len(label_concat):\n n_neighbors_adjusted = len(label_concat)-1\n else:\n n_neighbors_adjusted = n_neighbors\n \n _, membership_ind = NearestNeighbors(n_neighbors=n_neighbors_adjusted, algorithm='auto').fit(pcs_concat).kneighbors()\n \n target_nn_in_target = np.sum(label_concat[membership_ind[:spikes_for_nn_actual]]==0)\n other_nn_in_other = np.sum(label_concat[membership_ind[spikes_for_nn_actual:]]==1) \n\n isolation[other_unit_id==other_units_ids] = (target_nn_in_target + other_nn_in_other) / (2*spikes_for_nn_actual) / n_neighbors_adjusted\n \n nearest_neighbor_isolation = np.min(isolation)\n return nearest_neighbor_isolation", "def take_closer_bd(x, y, cls_bd, dis2cls_bd, boundary_points, boundary_labels):\n if cls_bd is None:\n cls_bd = boundary_points\n dis2cls_bd = np.linalg.norm(np.reshape((boundary_points - x),\n (x.shape[0], -1)),\n axis=-1)\n return cls_bd, dis2cls_bd\n else:\n d = np.linalg.norm(np.reshape((boundary_points - x), (x.shape[0], -1)),\n axis=-1)\n for i in range(cls_bd.shape[0]):\n if d[i] < dis2cls_bd[i] and y[i] != boundary_labels[i]:\n dis2cls_bd[i] = d[i]\n cls_bd[i] = boundary_points[i]\n return cls_bd, dis2cls_bd", "def detect_branching(\n self,\n segs: Sequence[np.ndarray],\n segs_tips: Sequence[np.ndarray],\n segs_connects,\n segs_undecided,\n segs_adjacency,\n iseg: int,\n tips3: np.ndarray,\n ):\n seg = segs[iseg]\n # restrict distance matrix to points in segment\n if not isinstance(self.distances_dpt, OnFlySymMatrix):\n Dseg = self.distances_dpt[np.ix_(seg, seg)]\n else:\n Dseg = self.distances_dpt.restrict(seg)\n # given the three tip points and the distance matrix detect the\n # branching on the segment, return the list ssegs of segments that\n # are defined by splitting this segment\n result = self._detect_branching(Dseg, tips3, seg)\n ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk = result\n # map back to global indices\n for iseg_new, seg_new in enumerate(ssegs):\n ssegs[iseg_new] = seg[seg_new]\n ssegs_tips[iseg_new] = seg[ssegs_tips[iseg_new]]\n ssegs_connects[iseg_new] = list(seg[ssegs_connects[iseg_new]])\n # remove previous segment\n segs.pop(iseg)\n segs_tips.pop(iseg)\n # insert trunk/undecided_cells at same position\n segs.insert(iseg, ssegs[trunk])\n segs_tips.insert(iseg, ssegs_tips[trunk])\n # append other segments\n segs += [seg for iseg, seg in enumerate(ssegs) if iseg != trunk]\n segs_tips += [\n seg_tips for iseg, seg_tips in enumerate(ssegs_tips) if iseg != trunk\n ]\n if len(ssegs) == 4:\n # insert undecided cells at same position\n segs_undecided.pop(iseg)\n segs_undecided.insert(iseg, True)\n # correct edges in adjacency matrix\n n_add = len(ssegs) - 1\n prev_connecting_segments = segs_adjacency[iseg].copy()\n if self.flavor == 'haghverdi16':\n segs_adjacency += [[iseg] for i in range(n_add)]\n segs_connects += [\n seg_connects\n for iseg, seg_connects in enumerate(ssegs_connects)\n if iseg != trunk\n ]\n prev_connecting_points = segs_connects[ # noqa: F841 TODO Evaluate whether to assign the variable or not\n iseg\n ]\n for jseg_cnt, jseg in enumerate(prev_connecting_segments):\n iseg_cnt = 0\n for iseg_new, seg_new in enumerate(ssegs):\n if iseg_new != trunk:\n pos = segs_adjacency[jseg].index(iseg)\n connection_to_iseg = segs_connects[jseg][pos]\n if connection_to_iseg in seg_new:\n kseg = len(segs) - n_add + iseg_cnt\n segs_adjacency[jseg][pos] = kseg\n pos_2 = segs_adjacency[iseg].index(jseg)\n segs_adjacency[iseg].pop(pos_2)\n idx = segs_connects[iseg].pop(pos_2)\n segs_adjacency[kseg].append(jseg)\n segs_connects[kseg].append(idx)\n break\n iseg_cnt += 1\n segs_adjacency[iseg] += list(\n range(len(segs_adjacency) - n_add, len(segs_adjacency))\n )\n segs_connects[iseg] += ssegs_connects[trunk]\n else:\n import networkx as nx\n\n segs_adjacency += [[] for i in range(n_add)]\n segs_connects += [[] for i in range(n_add)]\n kseg_list = [iseg] + list(range(len(segs) - n_add, len(segs)))\n for jseg in prev_connecting_segments:\n pos = segs_adjacency[jseg].index(iseg)\n distances = []\n closest_points_in_jseg = []\n closest_points_in_kseg = []\n for kseg in kseg_list:\n reference_point_in_k = segs_tips[kseg][0]\n closest_points_in_jseg.append(\n segs[jseg][\n np.argmin(\n self.distances_dpt[reference_point_in_k, segs[jseg]]\n )\n ]\n )\n # do not use the tip in the large segment j, instead, use the closest point\n reference_point_in_j = closest_points_in_jseg[\n -1\n ] # segs_tips[jseg][0]\n closest_points_in_kseg.append(\n segs[kseg][\n np.argmin(\n self.distances_dpt[reference_point_in_j, segs[kseg]]\n )\n ]\n )\n distances.append(\n self.distances_dpt[\n closest_points_in_jseg[-1], closest_points_in_kseg[-1]\n ]\n )\n # print(jseg, '(', segs_tips[jseg][0], closest_points_in_jseg[-1], ')',\n # kseg, '(', segs_tips[kseg][0], closest_points_in_kseg[-1], ') :', distances[-1])\n idx = np.argmin(distances)\n kseg_min = kseg_list[idx]\n segs_adjacency[jseg][pos] = kseg_min\n segs_connects[jseg][pos] = closest_points_in_kseg[idx]\n pos_2 = segs_adjacency[iseg].index(jseg)\n segs_adjacency[iseg].pop(pos_2)\n segs_connects[iseg].pop(pos_2)\n segs_adjacency[kseg_min].append(jseg)\n segs_connects[kseg_min].append(closest_points_in_jseg[idx])\n # if we split two clusters, we need to check whether the new segments connect to any of the other\n # old segments\n # if not, we add a link between the new segments, if yes, we add two links to connect them at the\n # correct old segments\n do_not_attach_kseg = False\n for kseg in kseg_list:\n distances = []\n closest_points_in_jseg = []\n closest_points_in_kseg = []\n jseg_list = [\n jseg\n for jseg in range(len(segs))\n if jseg != kseg and jseg not in prev_connecting_segments\n ]\n for jseg in jseg_list:\n reference_point_in_k = segs_tips[kseg][0]\n closest_points_in_jseg.append(\n segs[jseg][\n np.argmin(\n self.distances_dpt[reference_point_in_k, segs[jseg]]\n )\n ]\n )\n # do not use the tip in the large segment j, instead, use the closest point\n reference_point_in_j = closest_points_in_jseg[\n -1\n ] # segs_tips[jseg][0]\n closest_points_in_kseg.append(\n segs[kseg][\n np.argmin(\n self.distances_dpt[reference_point_in_j, segs[kseg]]\n )\n ]\n )\n distances.append(\n self.distances_dpt[\n closest_points_in_jseg[-1], closest_points_in_kseg[-1]\n ]\n )\n idx = np.argmin(distances)\n jseg_min = jseg_list[idx]\n if jseg_min not in kseg_list:\n segs_adjacency_sparse = sp.sparse.lil_matrix(\n (len(segs), len(segs)), dtype=float\n )\n for i, seg_adjacency in enumerate(segs_adjacency):\n segs_adjacency_sparse[i, seg_adjacency] = 1\n G = nx.Graph(segs_adjacency_sparse)\n paths_all = nx.single_source_dijkstra_path(G, source=kseg)\n if jseg_min not in paths_all:\n segs_adjacency[jseg_min].append(kseg)\n segs_connects[jseg_min].append(closest_points_in_kseg[idx])\n segs_adjacency[kseg].append(jseg_min)\n segs_connects[kseg].append(closest_points_in_jseg[idx])\n logg.debug(f' attaching new segment {kseg} at {jseg_min}')\n # if we split the cluster, we should not attach kseg\n do_not_attach_kseg = True\n else:\n logg.debug(\n f' cannot attach new segment {kseg} at {jseg_min} '\n '(would produce cycle)'\n )\n if kseg != kseg_list[-1]:\n logg.debug(' continue')\n continue\n else:\n logg.debug(' do not add another link')\n break\n if jseg_min in kseg_list and not do_not_attach_kseg:\n segs_adjacency[jseg_min].append(kseg)\n segs_connects[jseg_min].append(closest_points_in_kseg[idx])\n segs_adjacency[kseg].append(jseg_min)\n segs_connects[kseg].append(closest_points_in_jseg[idx])\n break\n segs_undecided += [False for i in range(n_add)]", "def find_p_hat(boxes, predictions, bk, lk, S, num_iterations, epsilon):\r\n\r\n num_boxes = predictions.shape[0]\r\n num_classes = predictions.shape[1]\r\n\r\n if num_boxes <= 1:\r\n return predictions\r\n\r\n if num_boxes <= bk:\r\n bk = num_boxes - 1\r\n\r\n if num_classes <= lk:\r\n lk = num_classes\r\n\r\n box_centers = torch.empty(size=(num_boxes, 2), dtype=torch.double).to(device)\r\n box_centers[:, 0] = ((boxes[:, 2] - boxes[:, 0]) / 2) + boxes[:, 0]\r\n box_centers[:, 1] = ((boxes[:, 3] - boxes[:, 1]) / 2) + boxes[:, 1]\r\n\r\n box_nearest = torch.empty(size=(num_boxes, bk), dtype=torch.long).to(device)\r\n for i in range(len(boxes)):\r\n box_center = box_centers[i]\r\n distances = torch.sqrt((box_center[0] - box_centers[:, 0]) ** 2 + (box_center[1] - box_centers[:, 1]) ** 2)\r\n distances[i] = float('inf')\r\n box_nearest[i] = torch.argsort(distances)[0:bk]\r\n\r\n S_highest = torch.zeros(size=(num_classes, num_classes), dtype=torch.double).to(device)\r\n for i in range(len(S)):\r\n S_args = torch.argsort(S[i])[-lk:]\r\n S_highest[i, S_args] = S[i, S_args]\r\n\r\n p_hat_init = torch.full(size=(num_boxes, num_classes), fill_value=(1 / num_classes), dtype=torch.double).to(device)\r\n p_hat = p_hat_init\r\n for i in range(num_iterations):\r\n p_hat_temp = torch.clone(p_hat)\r\n for b in range(num_boxes):\r\n p = predictions[b]\r\n num = torch.sum(torch.mm(S_highest, torch.transpose(p_hat_temp[box_nearest[b]], 0, 1)), 1)\r\n denom = torch.sum(S_highest, dim=1) * bk\r\n p_hat[b] = (1 - epsilon) * torch.squeeze(torch.div(num, denom)) + epsilon * p\r\n p_hat[b] = torch.nan_to_num(p_hat[b])\r\n\r\n return p_hat", "def _objective_car(z, p, n):\n speed_limit = p[p_idx.SpeedLimit]\n target_speed = p[p_idx.TargetSpeed]\n kReg_dDelta = p[p_idx.kReg_dDelta]\n kLag = p[p_idx.kLag]\n kLat = p[p_idx.kLat]\n pLeftLane = p[p_idx.pLeftLane]\n kReg_dAb = p[p_idx.kReg_dAb]\n kAboveTargetSpeedCost = p[p_idx.kAboveTargetSpeedCost]\n kBelowTargetSpeedCost = p[p_idx.kBelowTargetSpeedCost]\n kAboveSpeedLimit = p[p_idx.kAboveSpeedLimit]\n kSlack = p[p_idx.kSlack]\n pointsO = params.n_opt_param\n pointsN = params.n_bspline_points\n obj = 0\n\n for k in range(n):\n upd_x_idx = k * params.n_states + (n - 1) * params.n_inputs\n upd_u_idx = k * params.n_inputs\n\n points = getPointsFromParameters(p, pointsO + k * pointsN * 3, pointsN) # todo check indices\n radii = getRadiiFromParameters(p, pointsO + k * pointsN * 3, pointsN) # todo check indices\n\n # get the fancy spline\n splx, sply = casadiDynamicBSPLINE(z[x_idx.S + upd_x_idx], points)\n spldx, spldy = casadiDynamicBSPLINEforward(z[x_idx.S + upd_x_idx], points)\n splsx, splsy = casadiDynamicBSPLINEsidewards(z[x_idx.S + upd_x_idx], points)\n r = casadiDynamicBSPLINERadius(z[x_idx.S + upd_x_idx], radii)\n\n forward = vertcat(spldx, spldy)\n sidewards = vertcat(splsx, splsy)\n\n realPos = vertcat(z[x_idx.X + upd_x_idx], z[x_idx.Y + upd_x_idx])\n centerPos = realPos\n\n wantedpos = vertcat(splx, sply)\n wantedpos_CL = vertcat(splx, sply) + r / 2 * sidewards\n # todo clarify what is this cost function\n error = centerPos - wantedpos\n error_CL = centerPos - wantedpos_CL\n lagerror = mtimes(forward.T, error)\n laterror = mtimes(sidewards.T, error)\n laterror_CL = mtimes(sidewards.T, error_CL)\n speedcostA = speedPunisherMax(z[x_idx.Vx + upd_x_idx], target_speed) * kAboveTargetSpeedCost\n speedcostB = speedPunisherMin(z[x_idx.Vx + upd_x_idx], target_speed) * kBelowTargetSpeedCost\n speedcostM = speedPunisherMax(z[x_idx.Vx + upd_x_idx], speed_limit) * kAboveSpeedLimit\n slack = z[u_idx.Slack_Lat + upd_u_idx] ** 2\n slackcoll = z[u_idx.Slack_Coll + upd_u_idx] ** 2\n slackobs = z[u_idx.Slack_Obs + upd_u_idx] ** 2\n lagcost = kLag * lagerror ** 2\n leftLaneCost = pLeftLane * latErrorPunisher(laterror, 0)\n latcostCL = kLat * laterror_CL ** 2\n regAB = z[u_idx.dAcc + upd_u_idx] ** 2 * kReg_dAb\n regBeta = z[u_idx.dDelta + upd_u_idx] ** 2 * kReg_dDelta\n obj = obj + lagcost + leftLaneCost + latcostCL + regAB + regBeta + speedcostA + speedcostB + speedcostM + \\\n kSlack * slack + kSlack * slackcoll + kSlack * slackobs\n\n return obj", "def hypergraph_two_sided_sweep(x, hypergraph):\n # Get useful data about the hypergraph that we will use repeatedly in the algorithm\n all_vertices = hypergraph.nodes\n all_edges = hypergraph.edges\n dict_vertices_to_adjacent_edges = {v: [] for v in all_vertices}\n hyplogging.logger.debug(\"Constructing dictionary of adjacent edges.\")\n for edge in all_edges:\n for vertex in edge:\n dict_vertices_to_adjacent_edges[vertex].append(set(edge))\n\n best_l = set()\n best_r = set()\n best_bipartiteness = 1\n current_l = set()\n current_r = set()\n\n # We will update the computation of the bipartiteness as we go along.\n current_vol = 0\n current_numerator = 0\n\n # Get the sorted indices of x, in order of highest absolute value to lowest\n hyplogging.logger.debug(\"Sorting the vertices according to given vector.\")\n ordering = reversed(np.argsort(abs(x)))\n\n # Perform the sweep\n hyplogging.logger.debug(\"Checking each sweep set.\")\n for i, vertex_index in enumerate(ordering):\n if i % 1000 == 0:\n hyplogging.logger.debug(f\"Checking sweep set number {i}/{hypergraph.num_vertices}.\")\n\n # Add the next vertex to the candidate set\n added_to_l = False\n if x[vertex_index] >= 0:\n current_r.add(all_vertices[vertex_index])\n else:\n added_to_l = True\n current_l.add(all_vertices[vertex_index])\n\n # Update the bipartiteness values\n current_vol += hypergraph.degree(all_vertices[vertex_index])\n for edge in dict_vertices_to_adjacent_edges[all_vertices[vertex_index]]:\n edge_l_intersection = len(current_l.intersection(edge))\n edge_r_intersection = len(current_r.intersection(edge))\n edge_entirely_inside_l = edge_l_intersection == len(edge)\n edge_entirely_inside_r = edge_r_intersection == len(edge)\n\n if edge_entirely_inside_l:\n current_numerator += 1\n if edge_entirely_inside_r:\n current_numerator += 1\n if edge_l_intersection > 0 and edge_r_intersection == 0:\n current_numerator += 1\n if edge_r_intersection > 0 and edge_l_intersection == 0:\n current_numerator += 1\n\n # Remove 1 from the numerator if we were already counting one.\n if added_to_l:\n if edge_l_intersection >= 2 and edge_r_intersection == 0:\n current_numerator -= 1\n if edge_l_intersection == 1 and edge_r_intersection > 0:\n current_numerator -= 1\n else:\n if edge_r_intersection >= 2 and edge_l_intersection == 0:\n current_numerator -= 1\n if edge_r_intersection == 1 and edge_l_intersection > 0:\n current_numerator -= 1\n\n # Get the bipartiteness and check if it is best so far\n beta = current_numerator / current_vol if current_vol != 0 else 1\n if beta < best_bipartiteness:\n best_bipartiteness = beta\n best_l = current_l.copy()\n best_r = current_r.copy()\n\n hyplogging.logger.debug(f\"Best bipartiteness in sweep set: {best_bipartiteness}\")\n return list(best_l), list(best_r)", "def test_batch_vector_substitutions(\n free_alg, full_balance, simplify\n):\n\n dr = free_alg\n p = dr.names\n\n a = IndexedBase('a')\n x = IndexedBase('x')\n y = IndexedBase('y')\n i, j = p.i, p.j\n v = p.v\n v_dag = Vec('v', indices=(CR,))\n\n #\n # Spin flipping\n #\n\n orig1 = dr.sum((i, p.R), (j, p.R), a[i, j] * v[i, UP] * v[j, DOWN])\n defs1 = [\n dr.define(v[i, UP], v[i, DOWN]), dr.define(v[i, DOWN], v[i, UP])\n ]\n\n # Sequentially apply the definitions of the substitutions\n expected_sequential = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * v[i, UP] * v[j, UP]\n )\n res = orig1.subst_all(\n defs1, simult_all=False, full_balance=full_balance, simplify=simplify\n )\n assert res == expected_sequential\n\n # Simultaneously apply the definitions of the substitutions\n expected_simutaneous = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * v[i, DOWN] * v[j, UP]\n )\n res = orig1.subst_all(\n defs1, simult_all=True, full_balance=full_balance, simplify=simplify\n )\n assert res == expected_simutaneous\n\n #\n # In-place BCS transformation\n #\n\n orig2 = dr.einst(\n a[i, j] * v_dag[i, UP] * v[j, UP] +\n a[i, j] * v_dag[i, DOWN] * v[j, DOWN]\n )\n defs2 = [\n dr.define(v_dag[i, UP], x[i] * v_dag[i, UP] - y[i] * v[i, DOWN]),\n dr.define(v_dag[i, DOWN], x[i] * v_dag[i, DOWN] + y[i] * v[i, UP]),\n dr.define(v[i, UP], x[i] * v[i, UP] - y[i] * v_dag[i, DOWN]),\n dr.define(v[i, DOWN], x[i] * v[i, DOWN] + y[i] * v_dag[i, UP]),\n ]\n\n # Sequentially apply the definitions of the substitutions\n expected_sequential = orig2\n for def_ in defs2:\n expected_sequential = def_.act(expected_sequential)\n expected_sequential = expected_sequential.simplify()\n res = orig2.subst_all(\n defs2, simult_all=False, full_balance=full_balance, simplify=simplify\n ).simplify()\n assert res == expected_sequential\n\n # Simultaneously apply the definitions of the substitutions\n expected_simutaneous = dr.sum(\n (i, p.R), (j, p.R), a[i, j] * (\n (x[i] * v_dag[i, UP] - y[i] * v[i, DOWN])\n * (x[j] * v[j, UP] - y[j] * v_dag[j, DOWN])\n + (x[i] * v_dag[i, DOWN] + y[i] * v[i, UP])\n * (x[j] * v[j, DOWN] + y[j] * v_dag[j, UP])\n )\n ).simplify()\n res = orig2.subst_all(\n defs2, simult_all=True, full_balance=full_balance, simplify=simplify\n ).simplify()\n assert res == expected_simutaneous", "def ball_collision_update(self):\r\n ball_pairs = self.balls_colliding()\r\n for ball_pair in ball_pairs:\r\n b1,b2 = ball_pair\r\n self.ball_pair_collision_update(b1,b2)", "def check_correctness_bc01(f):\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False", "def _compute_shared(self, x=None, y=None):\n with tf.name_scope(\"comput_shared\") as name:\n assert (x is None) != (y is None)\n is_x = x is not None\n\n with tf.name_scope(\"piece0\"):\n range_min = tf.convert_to_tensor(self.range_min, name='range_min')\n kx = _knot_positions(self.bin_widths, range_min)\n ky = _knot_positions(self.bin_heights, range_min)\n kd = _padded(_ensure_at_least_1d(self.knot_slopes), lhs=1, rhs=1)\n kx_or_ky = kx if is_x else ky\n kx_or_ky_min = kx_or_ky[..., 0]\n kx_or_ky_max = kx_or_ky[..., -1]\n x_or_y = x if is_x else y\n out_of_bounds = tf.zeros_like(x_or_y) #; (x_or_y <= kx_or_ky_min) | (x_or_y >= kx_or_ky_max)\n x_or_y = x_or_y # tf.where(out_of_bounds, kx_or_ky_min, x_or_y)\n\n with tf.name_scope(\"piece0b\"):\n shape = functools.reduce(\n tf.broadcast_dynamic_shape,\n (\n tf.shape(x_or_y[..., tf.newaxis]), # Add a n_knots dim.\n tf.shape(kx),\n tf.shape(ky),\n tf.shape(kd)))\n\n with tf.name_scope(\"piece1\"):\n\n bc_x_or_y = tf.broadcast_to(x_or_y, shape[:-1])\n bc_kx = tf.broadcast_to(kx, shape)\n bc_ky = tf.broadcast_to(ky, shape)\n bc_kd = tf.broadcast_to(kd, shape)\n bc_kx_or_ky = bc_kx if is_x else bc_ky\n\n with tf.name_scope(\"piece1b\"):\n indices = tf.clip_by_value(\n tf.searchsorted(\n bc_kx_or_ky[..., :-1],\n bc_x_or_y[..., tf.newaxis],\n side='right',\n out_type=tf.int32) - 1, 0, 1000)\n\n def gather_squeeze(params, indices):\n rank = tensorshape_util.rank(indices.shape)\n if rank is None:\n raise ValueError('`indices` must have statically known rank.')\n return tf.gather(params, indices, axis=-1, batch_dims=rank - 1)[..., 0]\n\n with tf.name_scope(\"piece2\"):\n x_k = gather_squeeze(bc_kx, indices)\n x_kp1 = gather_squeeze(bc_kx, indices + 1)\n y_k = gather_squeeze(bc_ky, indices)\n y_kp1 = gather_squeeze(bc_ky, indices + 1)\n d_k = gather_squeeze(bc_kd, indices)\n d_kp1 = gather_squeeze(bc_kd, indices + 1)\n h_k = y_kp1 - y_k\n w_k = x_kp1 - x_k\n s_k = h_k / w_k\n\n return _SplineShared(\n out_of_bounds=out_of_bounds,\n x_k=x_k,\n y_k=y_k,\n d_k=d_k,\n d_kp1=d_kp1,\n h_k=h_k,\n w_k=w_k,\n s_k=s_k)", "def skel_flow(point, skeleton):\n \n def dist_pt2ln(pt, ln):\n \"\"\"\n finds euclidean distance between point and line\n Args:\n pt (3x, array)\n ln (2x3 array)\n \"\"\"\n p1 = ln[0,:]\n p2 = ln[1,:]\n return np.linalg.norm(np.cross(p2-p1, p1-pt))/np.linalg.norm(p2-p1)\n \n try:\n assert type(point) is list and len(point)==3\n except:\n print point\n nodes = skeleton.get_nodes()\n dists = np.array([np.linalg.norm(np.array(node-point)) for node in nodes])\n nodes = [nodes[i] for i in np.argsort(dists)]\n #print nodes\n edges = skeleton.get_edges()\n edge_vecs = [edge[0]-edge[1] for edge in edges]\n edge_vecs = [np.divide(edge_vec, float(edge_vec[0])) for edge_vec in edge_vecs] # normalize such that z-comp is 1\n # iterate over nodes till you find a non-horizontal edge at a node closest to point\n found_flow = False\n check_edges = []\n while not found_flow:\n try:\n check_node = nodes.pop(0) # closest node to input point\n #print check_node\n except:\n print \"Error! Could not find non-horizontal flow vector at this point\"\n for i, (edge, edge_vec) in enumerate(zip(edges, edge_vecs)):\n allow_edge = ((np.all(check_node==edge[0]) or np.all(check_node==edge[1])) \n and not np.any(np.isnan(edge_vec)) and not np.any(np.isinf(edge_vec)))\n if allow_edge:\n check_edges.append(i)\n found_flow = (len(check_edges)>0)\n\n #print edges[check_edges]\n #print check_node\n # find edges for which the point is z-midway\n midway_edges = []\n for i in check_edges:\n z_lower = min(edges[i][0,0], edges[i][1,0])\n z_higher = max(edges[i][0,0], edges[i][1,0])\n if z_lower > point[0] or z_higher < point[0]:\n midway_edges.append(i)\n if len(midway_edges)>0: \n check_edges = midway_edges\n # find edge closest to point\n check_dists = [dist_pt2ln(np.array(point), edges[i]) for i in check_edges]\n flow_edge_id = check_edges[np.argmin(check_dists)]\n #print edges[flow_edge_id]\n flow_vec = edge_vecs[flow_edge_id]\n return flow_vec", "def _computeSurfJacobian(self, fd=True):\n\n # timing stuff:\n t1 = time.time()\n tesp = 0\n teval = 0\n tcomm = 0\n\n # counts\n nDV = self.getNDV()\n if self.maxproc is None:\n nproc = self.comm.size\n else:\n if self.maxproc <= self.comm.size:\n nproc = self.maxproc\n else:\n nproc = self.comm.size\n rank = self.comm.rank\n\n # arrays to collect local pointset info\n ul = np.zeros(0) # local u coordinates\n vl = np.zeros(0) # local v coordinates\n tl = np.zeros(0) # local t coordinates\n faceIDl = np.zeros(0, dtype=\"intc\") # surface index\n bodyIDl = np.zeros(0, dtype=\"intc\") # body index\n edgeIDl = np.zeros(0, dtype=\"intc\") # edge index\n uvlimitsl = np.zeros((0, 4))\n tlimitsl = np.zeros((0, 2))\n any_ptset_nondistributed = False\n any_ptset_distributed = False\n for ptSetName in self.pointSets:\n # initialize the Jacobians\n self.pointSets[ptSetName].jac = np.zeros((3 * self.pointSets[ptSetName].nPts, nDV))\n if self.pointSets[ptSetName].distributed:\n any_ptset_distributed = True\n else:\n any_ptset_nondistributed = True\n\n # first, we need to vstack all the point set info we have\n # counts of these are also important, saved in ptSet.nPts\n ul = np.concatenate((ul, self.pointSets[ptSetName].u))\n vl = np.concatenate((vl, self.pointSets[ptSetName].v))\n tl = np.concatenate((tl, self.pointSets[ptSetName].t))\n faceIDl = np.concatenate((faceIDl, self.pointSets[ptSetName].faceID))\n bodyIDl = np.concatenate((bodyIDl, self.pointSets[ptSetName].bodyID))\n edgeIDl = np.concatenate((edgeIDl, self.pointSets[ptSetName].edgeID))\n uvlimitsl = np.concatenate((uvlimitsl, self.pointSets[ptSetName].uvlimits0))\n tlimitsl = np.concatenate((tlimitsl, self.pointSets[ptSetName].tlimits0))\n if any_ptset_distributed and any_ptset_nondistributed:\n raise ValueError(\n \"Both nondistributed and distributed pointsets were added to this DVGeoESP which is not yet supported\"\n )\n\n if any_ptset_distributed:\n # need to get ALL the coordinates from every proc on every proc to do the parallel FD\n if self.maxproc is not None:\n raise ValueError(\"Max processor limit is not usable with distributed pointsets\")\n # now figure out which proc has how many points.\n sizes = np.array(self.comm.allgather(len(ul)), dtype=\"intc\")\n # displacements for allgather\n disp = np.array([np.sum(sizes[:i]) for i in range(nproc)], dtype=\"intc\")\n # global number of points\n nptsg = np.sum(sizes)\n ug, vg, tg, faceIDg, bodyIDg, edgeIDg, uvlimitsg, tlimitsg, sizes = self._allgatherCoordinates(\n ul, vl, tl, faceIDl, bodyIDl, edgeIDl, uvlimitsl, tlimitsl\n )\n else:\n nptsg = len(ul)\n ug = ul\n vg = vl\n tg = tl\n faceIDg = faceIDl\n bodyIDg = bodyIDl\n edgeIDg = edgeIDl\n uvlimitsg = uvlimitsl\n tlimitsg = tlimitsl\n # create a local new point array. We will use this to get the new\n # coordinates as we perturb DVs. We just need one (instead of nDV times the size)\n # because we get the new points, calculate the jacobian and save it right after\n ptsNewL = np.zeros(len(ul) * 3)\n\n # we now have all the point info on all procs.\n tcomm += time.time() - t1\n\n # We need to evaluate all the points on respective procs for FD computations\n\n # determine how many DVs this proc will perturb.\n n = 0\n for iDV in range(self.getNDV()):\n # I have to do this one.\n if iDV % nproc == rank:\n n += 1\n if fd:\n # evaluate all the points\n pts0 = self._evaluatePoints(ug, vg, tg, uvlimitsg, tlimitsg, bodyIDg, faceIDg, edgeIDg, nptsg)\n # allocate the approriate sized numpy array for the perturbed points\n ptsNew = np.zeros((n, nptsg, 3))\n\n # perturb the DVs on different procs and compute the new point coordinates.\n i = 0 # Counter on local Jac\n\n for iDV in range(self.getNDV()):\n # I have to do this one.\n if iDV % nproc == rank:\n # Get the DV object for this variable\n dvName = self.globalDVList[iDV][0]\n dvLocalIndex = self.globalDVList[iDV][1]\n dvObj = self.DVs[dvName]\n # Step size for this particular DV\n dh = dvObj.dh\n\n # Perturb the DV\n dvSave = dvObj.value.copy()\n dvObj.value[dvLocalIndex] += dh\n\n # update the esp model\n t11 = time.time()\n self._updateModel()\n t12 = time.time()\n tesp += t12 - t11\n\n t11 = time.time()\n # evaluate the points\n\n ptsNew[i, :, :] = self._evaluatePoints(\n ug, vg, tg, uvlimitsg, tlimitsg, bodyIDg, faceIDg, edgeIDg, nptsg\n )\n t12 = time.time()\n teval += t12 - t11\n # now we can calculate the jac and put it back in ptsNew\n ptsNew[i, :, :] = (ptsNew[i, :, :] - pts0[:, :]) / dh\n\n # Reset the DV\n dvObj.value = dvSave.copy()\n\n # increment the counter\n i += 1\n\n # Now, we have perturbed points on each proc that perturbed a DV\n\n # reset the model.\n t11 = time.time()\n self._updateModel()\n t12 = time.time()\n tesp += t12 - t11\n\n else:\n raise NotImplementedError(\"ESP analytic derivatives are not implemented\")\n\n ii = 0\n # loop over the DVs and scatter the perturbed points to original procs\n for iDV in range(self.getNDV()):\n # Get the DV object for this variable\n dvName = self.globalDVList[iDV][0]\n dvLocalIndex = self.globalDVList[iDV][1]\n dvObj = self.DVs[dvName]\n # Step size for this particular DV\n dh = dvObj.dh\n\n t11 = time.time()\n root_proc = iDV % nproc\n if any_ptset_distributed:\n # create the send/recv buffers for the scatter\n if root_proc == rank:\n sendbuf = [ptsNew[ii, :, :].flatten(), sizes * 3, disp * 3, MPI.DOUBLE]\n else:\n sendbuf = [np.zeros((0, 3)), sizes * 3, disp * 3, MPI.DOUBLE]\n recvbuf = [ptsNewL, MPI.DOUBLE]\n # scatter the info from the proc that perturbed this DV to all procs\n self.comm.Scatterv(sendbuf, recvbuf, root=root_proc)\n else:\n # create the send/recv buffers for the bcast\n if root_proc == rank:\n bcastbuf = [ptsNew[ii, :, :].flatten(), MPI.DOUBLE]\n ptsNewL[:] = ptsNew[ii, :, :].flatten()\n else:\n bcastbuf = [ptsNewL, MPI.DOUBLE]\n # bcast the info from the proc that perturbed this DV to all procs\n self.comm.Bcast(bcastbuf, root=root_proc)\n self.comm.Barrier()\n\n t12 = time.time()\n tcomm += t12 - t11\n\n # calculate the jacobian here for the pointsets\n offset = 0\n for ptSet in self.pointSets:\n # number of points in this pointset\n nPts = self.pointSets[ptSet].nPts\n\n # indices to extract correct points from the long pointset array\n ibeg = offset * 3\n iend = ibeg + nPts * 3\n\n # ptsNewL has the jacobian itself...\n self.pointSets[ptSet].jac[:, iDV] = ptsNewL[ibeg:iend].copy()\n\n # increment the offset\n offset += nPts\n\n # pertrub the local counter on this proc.\n # This loops over the DVs that this proc perturbed\n if iDV % nproc == rank:\n ii += 1\n\n t2 = time.time()\n if rank == 0:\n print(\"FD jacobian calcs with DVGeoESP took\", (t2 - t1), \"seconds in total\")\n print(\"updating the esp model took\", tesp, \"seconds\")\n print(\"evaluating the new points took\", teval, \"seconds\")\n print(\"communication took\", tcomm, \"seconds\")\n\n # set the update flags\n for ptSet in self.pointSets:\n self.updatedJac[ptSet] = True", "def boundary_op_n(v):\r\n h = list(v.dic.keys())[0]\r\n p = len(h) - 1\r\n s = P_chains([],[])\r\n if (p != 0) and (isinstance(h, str) != True) and (isinstance(h, frozenset) != True) and (isinstance(h, ImmutableMatrix) != True):\r\n if (is_int(list(v.dic.keys())) == True):\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n return s\r\n else:\r\n aux = P_chains([],[])\r\n D = {}\r\n ct = 0\r\n st = []\r\n for u in v.dic.keys():\r\n for x in u:\r\n if x not in st:\r\n st.append(x)\r\n for i in st:\r\n D[tuple([ct])] = i\r\n ct = ct + 1\r\n for u in v.dic.keys():\r\n w2 = []\r\n for x in u:\r\n for y in list(D.keys()):\r\n if (x == D[y]):\r\n w2.append(y)\r\n aux = aux + P_chains([tuple(w2)],[v.dic[u]]) \r\n v = aux\r\n for u in v.dic.keys():\r\n c = 0\r\n for i in u: \r\n w = list(u)[:]\r\n w.remove(i)\r\n if (orientation_function(tuple(tuple_sorted(tuple(w))),tuple(w),p) == True):\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**c) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n else:\r\n s1 = P_chains([tuple(tuple_sorted(tuple(w)))],[abs(v.dic[u])])\r\n if (np.sign((v.dic[u])*(-1)**(c+1)) < 0):\r\n s = s - s1\r\n else:\r\n s = s + s1\r\n c = c+1\r\n s2 = P_chains([],[])\r\n for u in s.dic.keys():\r\n w2=[]\r\n for i in u:\r\n w2.append(D[i])\r\n s2 = s2 + P_chains([tuple(w2)],[s.dic[u]])\r\n \r\n return s2\r\n else:\r\n return s", "def makeSlopeMap():\n a=numpy.zeros((ncents/2,2),numpy.int32)\n subFlag=makeSubapMap()#subapFlag.copy()\n # for i in range(7):#ngs 1-3, truth, lgs, lofs, hofs\n # tmp=subFlag[nsub[:i].sum():nsub[:i+1].sum()]\n # tmp.shape=nsuby[i],nsubx[i]\n # if i==5:#lofs\n # tmp[:]=sfNoObs*(i+1)\n # elif i==6:#hofs\n # tmp[:]=sf14NoObs*(i+1)\n # else:\n # tmp[:]=individualSubapFlag*(i+1)\n pos=0\n for i in range(subFlag.size):\n if subFlag[i]!=0:\n a[pos]=subFlag[i]\n pos+=1\n return a", "def sweep(B,s,chi,H_mpo,Lp,Rp):\n \n d = B[0].shape[0]\n for i_bond in [0,1]:\n ia = np.mod(i_bond-1,2); ib = np.mod(i_bond,2); ic = np.mod(i_bond+1,2)\n chia = B[ib].shape[1]; chic = B[ic].shape[2]\n\n # Construct theta matrix #\n H = H_mixed(Lp,Rp,H_mpo[i_bond],H_mpo[i_bond])\n theta,e0 = diag(B,s,H,ia,ib,ic,chia,chic)\n \n # Schmidt deomposition #\n X, Y, Z = np.linalg.svd(theta); Z = Z.T\n\n chib = np.min([np.sum(Y>10.**(-8)), chi])\n X=np.reshape(X[:d*chia,:chib],(d,chia,chib))\n Z=np.transpose(np.reshape(Z[:d*chic,:chib],(d,chic,chib)),(0,2,1))\n \n # Update Environment #\n Lp = np.tensordot(Lp, H_mpo[i_bond], axes=(2,0))\n Lp = np.tensordot(Lp, X, axes=([0,3],[1,0]))\n Lp = np.tensordot(Lp, np.conj(X), axes=([0,2],[1,0]))\n Lp = np.transpose(Lp,(1,2,0))\n\n Rp = np.tensordot(H_mpo[i_bond], Rp, axes=(1,2))\n Rp = np.tensordot(np.conj(Z),Rp, axes=([0,2],[2,4]))\n Rp = np.tensordot(Z,Rp, axes=([0,2],[2,3]))\n\n # Obtain the new values for B and s #\n s[ib] = Y[:chib]/np.sqrt(sum(Y[:chib]**2))\n B[ib] = np.transpose(np.tensordot(np.diag(s[ia]**(-1)),X,axes=(1,1)),(1,0,2))\n B[ib] = np.tensordot(B[ib], np.diag(s[ib]),axes=(2,1))\n\n B[ic] = Z\n return Lp,Rp,e0", "def GetBoundaryLoops(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.InferBoundaryElementType() != \"line\":\n raise NotImplementedError(\"Computing boundary loops is only supported for tri/quad meshes for now\")\n\n self.GetEdges()\n\n # First create a node to neighbour map i.e. node as key and its two neighbouring nodes as value\n nodeToNeighboursMap = dict()\n for i in range(self.edges.shape[0]):\n\n if self.edges[i,0] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,0]] = [self.edges[i,1],-1]\n else:\n nodeToNeighboursMap[self.edges[i,0]][1] = self.edges[i,1]\n\n if self.edges[i,1] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,1]] = [self.edges[i,0],-1]\n else:\n nodeToNeighboursMap[self.edges[i,1]][1] = self.edges[i,0]\n\n # Now create a vector of face loops\n faceLoops = []\n while nodeToNeighboursMap:\n # Insert the first node from node to edge map and its two neighbours in order and erase it from the map\n faceLoop = []\n mapBegin = next(iter(nodeToNeighboursMap))\n faceLoop.append(nodeToNeighboursMap[mapBegin][0])\n faceLoop.append(mapBegin)\n faceLoop.append(nodeToNeighboursMap[mapBegin][1])\n nodeToNeighboursMap.pop(mapBegin, None)\n\n while True:\n # Pick the last node in the current face loop and find its neighbours\n if faceLoop[-1] in nodeToNeighboursMap:\n tmp = faceLoop[-1]\n mapIter = nodeToNeighboursMap[faceLoop[-1]]\n # Check if we have not reached the end of the loop i.e. the first element\n if mapIter[0] != faceLoop[0] and mapIter[1] != faceLoop[0]:\n if mapIter[0] == faceLoop[-2]:\n faceLoop.append(mapIter[1])\n elif mapIter[1] == faceLoop[-2]:\n faceLoop.append(mapIter[0])\n else:\n nodeToNeighboursMap.pop(faceLoop[0], None)\n\n nodeToNeighboursMap.pop(tmp, None)\n else:\n faceLoop = np.array(faceLoop)\n faceLoops.append(faceLoop)\n break\n\n return faceLoops", "def saddle_point(self):\n\n maxmin_value, maxmin_strategy_set = self.maxmin(0)\n minmax_value, minmax_strategy_set = self.minmax(1)\n\n if maxmin_value == minmax_value:\n return maxmin_strategy_set.intersection(minmax_strategy_set)\n return None", "def df_search(grid, level):\n states_we_have_seen_before = Set(grid)\n\n def recur(inner_grid, itter, level):\n counter = 0\n next_states = Set()\n\n for gg in legal_moves(inner_grid):\n if gg not in states_we_have_seen_before:\n states_we_have_seen_before.add(gg)\n next_states.add(gg)\n\n for t in next_states:\n if match_level(t, level):\n return (size * size * size - itter, t)\n\n if itter > 0:\n for t in next_states:\n r = recur(t, itter - 1, level)\n if r:\n return r\n return None\n\n return recur(grid, size * size * size, level)", "def calc_refl(velocity, shotloc_x, shotloc_z, layer_idxs):\n solver_dg = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_dg.vv.min_coords = velocity.min_coords\n solver_dg.vv.node_intervals = velocity.node_intervals\n solver_dg.vv.npts = velocity.npts\n solver_dg.vv.values = velocity.values\n\n #shotloc = 2.56 # km\n src_idx = (int((shotloc_x - velocity.min_coords[0])/velocity.node_intervals[0]), int(shotloc_z/velocity.node_intervals[1]), 0)\n solver_dg.tt.values[src_idx] = 0\n solver_dg.unknown[src_idx] = False\n solver_dg.trial.push(*src_idx)\n solver_dg.solve()\n\n solver_ug = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_ug.vv.min_coords = solver_dg.vv.min_coords\n solver_ug.vv.node_intervals = solver_dg.vv.node_intervals\n solver_ug.vv.npts = solver_dg.vv.npts\n solver_ug.vv.values = solver_dg.vv.values\n\n for ix in range(solver_ug.tt.npts[0]):\n #idx = (ix, solver_ug.tt.npts[1]-1, 0)\n idx = (ix, layer_idxs[ix], 0)\n solver_ug.tt.values[idx] = solver_dg.tt.values[idx]\n #print(idx, solver_dg.tt.values[idx])\n solver_ug.unknown[idx] = False\n solver_ug.trial.push(*idx)\n solver_ug.solve()\n \n return solver_ug.tt.values[:,0,0]", "def branching(self, data, linked_clusters, cluster, re = False):\n\n if re is False:\n # Form the branch\n self.clusters[cluster.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n linked_cluster = form_a_branch(cluster, data, descendants = linked_clusters)\n else:\n # If the conditions have been relaxed, we need to ensure we don't\n # create any redundant branches.\n _linked_clusters = [_cluster.antecessor for _cluster in linked_clusters]\n _linked_clusters = remove_branch(self, data, _linked_clusters, cluster)\n\n # Slot the cluster in at the correct level in the hierarchy.\n # Find the closest matching linked cluster\n var = []\n for link in _linked_clusters:\n var = get_var(self, data, cluster, link, var)\n # Keep the closest matching cluster\n keepidx = np.squeeze(np.where(np.asarray(var) == min(np.asarray(var))))\n if keepidx.size != 1:\n keepidx = keepidx[0]\n _linked_cluster_ = _linked_clusters[keepidx]\n _cluster_ = cluster\n _linked_cluster = _linked_cluster_\n\n # Merge downwards until loedis reaches the antecessor.\n\n if _linked_cluster.antecedent is None:\n pass\n else:\n while _linked_cluster.antecedent is not None:\n _linked_cluster = _linked_cluster.antecedent\n _linked_cluster = merge_clusters(_linked_cluster, _cluster_, data, branching=True)\n\n _cluster_members = cluster.cluster_members\n _cluster_indices = []\n for j in range(np.size(_cluster_members)):\n _cluster_indices.append(cluster.cluster_indices[j])\n\n # Form the branch\n self.clusters[cluster.cluster_idx] = cluster\n update_cluster_array(self, cluster, cluster.cluster_idx)\n linked_cluster = form_a_branch(cluster, data, descendants = _linked_clusters)\n\n # Merge data into correct _linked_cluster_ afterwards - otherwise points\n # will be linked more than once\n if np.size(_cluster_indices)==1:\n _linked_cluster_ = merge_data(_linked_cluster_, _cluster_members[0], _cluster_indices[0], data)\n else:\n for j in range(np.size(_cluster_indices)):\n _linked_cluster_ = merge_data(_linked_cluster_, _cluster_members[j], _cluster_indices[j], data)\n\n return linked_cluster", "def test_nearest_boundary_even():\n assert _nearest_boundary(10, 20, 14, 0) == 0\n assert _nearest_boundary(10, 20, 14, 1) == 0\n assert _nearest_boundary(10, 20, 15, 0) == 1\n assert _nearest_boundary(10, 20, 15, 1) == 1", "def build_bonds(self):\n shape_prime = np.array([self.shape[0]-1,self.shape[1]-1,self.shape[2]-1])\n zeros = np.array([0,0,0])\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for b,bond in enumerate(self.cell.bonds):\n newbond = copy.deepcopy(bond)\n newbond.cell1 += [i,j,k]\n newbond.cell2 += [i,j,k]\n #ToDo make a function to shorten those lines\n if np.prod(newbond.cell1 <= shape_prime) and np.prod(newbond.cell2<=shape_prime) and np.prod(zeros <=newbond.cell1) and np.prod(zeros <= newbond.cell2):\n newbond.coordinate1 = self.sites[newbond.cell1[0],newbond.cell1[1],newbond.cell1[2],newbond.site1].coordinate\n newbond.coordinate2 = self.sites[newbond.cell2[0],newbond.cell2[1],newbond.cell2[2],newbond.site2].coordinate\n self.bonds.append(newbond)", "def cut_bonds_z_highest(xy, NL, KL, BL, target_z, check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n BLbulk = BL[binds]\n BLboun = BL[Binds]\n\n # bBinds bonds connect bulk to boundary\n # Treat these as is connecting bulk(z) to bulk(z)\n bBinds = np.where(np.logical_xor(is_a, is_b))[0]\n BLbB = BL[bBinds]\n\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n # Can write in terms of bonds? 2have = zt\n # nbulk2cut = int(max([0,round((z_start - target_z)*0.5*float(NP_bulk))]))\n # nbulk2have = len(binds) - nbulk2cut\n # print 'nboun2have = ', nboun2have\n # print 'nbulk2have = ', nbulk2have\n\n # CUT BONDS FROM HIGHEST Z NODES (sum of endpts)\n # Unfortunately, this has to be done iteratively.\n # Algorithm: find zvals of all bonds. For all bonds with zval = max(zval),\n # cut all the bonds that don't share endpts with any of the other bonds.\n # Find these by going through in-place-randomized B2cut and cross off if later bonds share indices.\n # Let boundary bonds be cut, or not, and pay no attention to them, since lattice will be cropped.\n\n # First cut most coordinated, whether on bulk or boundary, but keep track of which.\n # Get bonds with highest z pairs of nodes\n NN = np.shape(KL)[1]\n zz = np.sum(KL, axis=1)\n # print 'zz = ', zz\n zbulk = float(np.sum(zz[bulk])) / float(len(bulk))\n print 'zbulk so far = ', zbulk\n\n # As long as we haven't cut enough bonds, cut some more\n while zbulk > target_z:\n print 'zbulk = ', zbulk\n zb = zz[BL[:, 0]] + zz[BL[:, 1]]\n zcut = np.where(zb == max(zb))[0]\n np.random.shuffle(zcut)\n B2cut = BL[zcut]\n # print 'B2cut = ', B2cut\n\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n # for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.title('Initial counting marks these')\n plt.pause(0.01)\n plt.clf()\n\n # print 'B2cut = ', B2cut\n # Cross off if later bonds share indices\n keep = np.ones(len(B2cut), dtype=bool)\n for ii in range(len(B2cut)):\n row = B2cut[ii]\n if row[0] in B2cut[ii + 1:, :].ravel():\n # print 'found ', row[0], 'in rest of array '\n # print ' --> len BL[ii+1:,:] = ', len(B2cut[ii+1:,:] )\n keep[ii] = False\n elif row[1] in B2cut[ii + 1:, :].ravel():\n keep[ii] = False\n\n # print 'keep = ', keep\n # print 'keep.any() = ', keep.any()\n if keep.any():\n B2cut = B2cut[keep]\n else:\n print 'The highest nodes are all connected to at least one other. Killing one bond...'\n B2cut = B2cut[0:1]\n\n # Only interested in the bulk bonds for measurement, but cutting boundary\n # bonds will get us out of a situation where bulk is less coordinated than\n # boundary so don't do --> B2cut = intersect2d(B2cut,BLbulk)\n\n N2cut = len(B2cut)\n\n # See what would happen if we cut all of these\n BLt = dh.setdiff2d(BL, B2cut)\n NLt, KLt = BL2NLandKL(BLt, NP=NP, NN=NN)\n zzt = np.sum(KLt, axis=1)\n zbulk = np.float(np.sum(zzt[bulk])) / float(len(bulk))\n\n # If we can cut all of these, do that. Otherwise, cut only as many as needed after shuffling.\n if len(np.where(zzt == 0)[0]) > 0:\n print 'There are dangling points. Removing bonds2cut that would make these...'\n # There are dangling points.\n # Remove the bonds that make zzt elems zero from the bonds to cut list\n # and recalculate.\n dangle_pts = np.where(zzt == 0)[0]\n # protect dangle points --> there is only one bond to find since we have run a \"keep\" search on B2cut\n inb0 = np.where(np.in1d(B2cut[:, 0], dangle_pts))[0]\n inb1 = np.where(np.in1d(B2cut[:, 1], dangle_pts))[0]\n keep = np.setdiff1d(np.arange(len(B2cut)), inb0)\n keep = np.setdiff1d(keep, inb1)\n print 'Protecting dangling bond: keep for dangle =', keep\n\n # Check --> show bond numbers and bond to cut and protect (dangles)\n if check:\n display_lattice_2D(xy, BL, close=False)\n for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n plt.text((xy[BL[ii, 0], 0] + xy[BL[ii, 1], 0]) * 0.5, (xy[BL[ii, 0], 1] + xy[BL[ii, 1], 1]) * 0.5,\n str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.plot([xy[B2cut[keep, 0], 0], xy[B2cut[keep, 1], 0]], [xy[B2cut[keep, 0], 1], xy[B2cut[keep, 1], 1]],\n 'b-', lw=5)\n plt.show()\n plt.clf()\n\n B2cut = B2cut[keep]\n N2cut = len(B2cut)\n\n BLt = dh.setdiff2d(BL, B2cut)\n NLt, KLt = BL2NLandKL(BLt, NP=NP, NN=NN)\n zzt = np.sum(KLt, axis=1)\n zbulk = np.float(np.sum(zzt[bulk])) / float(len(bulk))\n\n # If we end up in a place where these are the only bonds to cut, raise exception\n # --> means target_z is just too low for our given lattice.\n if np.size(B2cut) == 0:\n raise RuntimeError('target_z is too low for the given lattice! Cutting bonds led to dangling points.')\n\n if zbulk > target_z:\n print 'Still above: zbulk = ', zbulk\n\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n # for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n\n plt.pause(0.01)\n plt.clf()\n\n # move pointers\n BL, BLt = BLt, BL\n NL, NLt = NLt, NL\n KL, KLt = KLt, KL\n zz, zzt = zzt, zz\n else:\n print 'Approaching z = ', target_z, ' tuning one bond at a time...'\n # Cut a bond unless there is only one to cut\n # (in which case we are within threshold)\n if N2cut == 1:\n zbulk = 0.\n # move pointers\n BL, BLt = BLt, BL\n NL, NLt = NLt, NL\n KL, KLt = KLt, KL\n zz, zzt = zzt, zz\n else:\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n plt.text((xy[BL[ii, 0], 0] + xy[BL[ii, 1], 0]) * 0.5,\n (xy[BL[ii, 0], 1] + xy[BL[ii, 1], 1]) * 0.5, str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.pause(0.01)\n plt.clf()\n\n BL = dh.setdiff2d(BL, B2cut[0:1])\n NL, KL = BL2NLandKL(BL, NP=NP, NN=NN)\n zz = np.sum(KLt, axis=1)\n print 'zz = ', zz\n zbulk = np.float(np.sum(zz[bulk])) / float(len(bulk))\n\n # IGNORE BOUNDARY: MUST CUT OUT DESIRED REGION. OTHERWISE, IT'S JUST TOO HARD TO MAKE IT RIGHT.\n # Only interested in the boundary bonds now\n # number of bonds to cut in the boundary = nbulkcut * (# boundary bonds)/(#bulk bonds)\n # nB2cut = int(round(nbulk2cut * float(len(Binds))/float(len(binds))))\n # nboun2have = len(Binds) - nB2cut\n #\n # while nboun > nboun2have:\n # zz = np.sum(KL, axis=1)\n # zb = zz[BL[:,0]] + zz[BL[:,1]]\n # zcut = np.where(zb== max(zb))[0]\n # np.random.shuffle(zcut)\n # B2cut = BL[zcut]\n # # Only interested in the boundary bonds now\n # B2cut = intersect2d(B2cut,BLboun)\n # # Cross off if later bonds share indices\n # keep = np.ones(len(B2cut),dtype = bool)\n # for ii in range(len(B2cut)):\n # row = B2cut[ii]\n # if row[0] in BL[ii+1,:].ravel():\n # keep[ii] = False\n # B2cut = B2cut[keep]\n # # Cut only as many as needed\n # nboun2cut = min([nboun - nboun2have, len(B2cut)])\n # BL = dh.setdiff2d(BL,B2cut[0:nboun2cut])\n # nboun = len(intersect2d(BL,BLboun))\n # print 'nbound so far =', nboun\n # NL, KL = BL2NLandKL(BL,NP=NP,NN=NN)\n\n zz = np.sum(KL, axis=1)\n zbulk = np.float(np.sum(zz[bulk])) / float(len(bulk))\n print 'Tuned to zbulk = ', zbulk\n\n if check:\n display_lattice_2D(xy, BL, close=False)\n plt.show()\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n\n return NL, KL, BL", "def branching(self, df, layer, side):\n min_err = 1\n # Search for the best cut\n for i in range(self.dim):\n ddf = df.sort_values(i)\n Y = ddf.y.values\n\n for j in range(1, len(ddf)):\n err = self.impurity(Y, j)\n if err < min_err:\n best_d, best_val, min_err = i, ddf.iloc[j][i], err\n\n # Record the best branching parameters at this node\n self.Branch[(layer, side)] = best_d, best_val\n return best_d, best_val", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def fn(i, s0, s1, c0, c1):\n if s0 > n or s1 > n: return 0 # impossible \n if i == len(balls): return int(c0 == c1)\n ans = 0 \n for x in range(balls[i]+1): \n ans += fn(i+1, s0+x, s1+balls[i]-x, c0+(x > 0), c1+(x < balls[i])) * comb(balls[i], x)\n return ans", "def get_centroids_for_bisection(keypoints: Sequence[Keypoint], fallback: bool) -> Tuple[Keypoint, Keypoint, Keypoint]:\n global body_direction_counter\n global normal_body_direction_counter\n global fallback_body_direction_counter\n body_direction_counter = body_direction_counter + 1\n\n keypoints_np = np.array(keypoints)\n left_kp = keypoints_np[config[\"bisection\"][\"left_pose_points\"]]\n right_kp = keypoints_np[config[\"bisection\"][\"right_pose_points\"]]\n\n left_shoulder_kp = keypoints_np[config[\"bisection\"][\"fallback\"][\"left_shoulder_kp\"]]\n right_shoulder_kp = keypoints_np[config[\"bisection\"][\"fallback\"][\"right_shoulder_kp\"]]\n left_eye_kp = keypoints_np[config[\"bisection\"][\"fallback\"][\"left_eye_kp\"]]\n right_eye_kp = keypoints_np[config[\"bisection\"][\"fallback\"][\"right_eye_kp\"]]\n with_fallback_kps: Sequence[Tuple[Keypoint,Keypoint]] = []\n if fallback:\n fb_used = False\n for idx, t in enumerate(zip(left_kp, right_kp)): # 0,1,8 & 11\n if t[0].isNone or t[1].isNone:\n fb_used = True\n if idx == 0: # top point fallback\n with_fallback_kps.append((left_eye_kp, right_eye_kp))\n if idx == 1: # middle point fallback\n with_fallback_kps.append((left_shoulder_kp, right_shoulder_kp))\n if idx == 2: # bottom point fallback\n l_middle_point_pair, r_middle_point_pair = with_fallback_kps[-1]\n left_bottom_fallback = Keypoint(l_middle_point_pair.x, l_middle_point_pair.y + 40, 1)\n right_bottom_fallback = Keypoint(r_middle_point_pair.x, r_middle_point_pair.y + 40, 1)\n with_fallback_kps.append((left_bottom_fallback, right_bottom_fallback)) #actually they can be the same. Since we calculate the centroid anyways\n else:\n with_fallback_kps.append(t)\n if fb_used:\n fallback_body_direction_counter = fallback_body_direction_counter + 1\n else:\n normal_body_direction_counter = normal_body_direction_counter + 1\n\n bisection_keypoint_pairs: Sequence[Tuple[Keypoint,Keypoint]] = list(\n filter(lambda x: not (x[0].isNone or x[1].isNone), with_fallback_kps if fallback else zip(left_kp, right_kp,))\n )\n if len(bisection_keypoint_pairs) != 3:\n raise ValueError('some keypoints for bisection calculation are missing!')\n keypoint_pairs = [\n Keypoint(*p(cast(Point, LineString([k(a),k(b)]).centroid)), np.mean([a.score, b.score]))\n for a,b in bisection_keypoint_pairs\n ]\n top_kp, middle_kp, bottom_kp = keypoint_pairs\n # print(\"body_direction_counter, normal_body_direction_counter, fallback_body_direction_counter\", body_direction_counter, normal_body_direction_counter, fallback_body_direction_counter)\n return top_kp, middle_kp, bottom_kp", "def branch_precursor(state, time, d):\n assert d[\"alpha_IL2\"] < d[\"alpha1\"] and d[\"alpha_IL2\"] < d[\"alpha2\"]\n \n th0 = state[0]\n \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"]+1)]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]+1):]\n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n \n t_eff = th1_all+th2_all\n t_il2 = np.sum(th1[:d[\"alpha_IL2\"]]) + np.sum(th2[:d[\"alpha_IL2\"]])\n\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n \n conc_il2 = d[\"rate_il2\"]*t_il2/(d[\"K_il2\"]+t_eff)\n\n # compute feedbacks\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2) \n \n ### calculate probability, note that these are adjusted to beta1 beta2 so that\n # they are not necessarily \\in (0,1)\n p1, p2 = get_prob(d, beta1, beta2, cyto_1, cyto_2)\n \n #print(beta1*p1_adj/(beta1*p1_adj+beta2))\n beta1_p = d[\"beta1_p\"]\n beta2_p = d[\"beta2_p\"]\n rate_death = d[\"d_eff\"] \n \n # check for homeostasis regulation\n if d[\"crit\"] == False:\n update_t0(d, time, conc_il2, t_eff)\n elif d[\"death_mode\"] == False:\n assert d[\"crit\"] == True \n beta1_p = beta1_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n beta2_p = beta2_p*np.exp(-d[\"decay_p\"]*(time-d[\"t0\"]))\n\n else:\n rate_death = rate_death*np.exp(time-d[\"t0\"])\n\n # this is the actual differentiation where odes are computed \n dt_th1 = diff_precursor(th1, th0, d[\"alpha1\"], beta1, beta1_p, p1, rate_death, d)\n dt_th2 = diff_precursor(th2, th0, d[\"alpha2\"], beta2, beta2_p, p2, rate_death, d)\n dt_th0 = -(beta1*p1+beta2)*th0 \n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def cleaveSurfBond(entry,max_bonds=1,supercell=2,group_structs=True,prec=1E-4):\n \n \n struct = copy.deepcopy(entry[0])\n results = getStructureType(entry,supercell=supercell,returnSS=True)\n \n # Proceed only if the structure is classified as periodic\n # in all directions\n if results[0]=='conventional':\n struct.make_supercell(supercell)\n binary_matrix= getDistMat(struct,entry[1]-1)\n og_num_bonds = sum(sum(np.array(binary_matrix)))/2\n \n # Get dictionary of directional bonds in the system, \n # and the associated atomic species\n bond_dir = getBondVectors(struct,entry[1]-1,prec)\n\n \n # Create the list of bonds to be broken\n all_structs=[]\n combos=[]\n for s1 in bond_dir:\n for s2 in bond_dir[s1]:\n for cleave in bond_dir[s1][s2]: \n combos.append(cleave[1])\n \n # Create pairings of bonds to be broken, up to \n # max_bonds number of bonds\n \n final_combos=[]\n for i in range(1,max_bonds+1):\n for mix in list(itertools.combinations(combos,max_bonds)):\n final_combos.append(mix)\n seed_index=0\n old_cluster_size=len(buildNetwork(binary_matrix,seed_index))/supercell**3\n for combo in final_combos:\n modified_matrix = np.array(binary_matrix)\n for sett in combo:\n for pair in sett:\n i,j = pair\n modified_matrix[i][j]=0\n modified_matrix[j][i]=0\n new_num_bonds=sum(sum(modified_matrix))/2\n \n # Number of bonds broken in the search. Not necessarily\n # the number of bonds broken to cleave the surface\n \n broken=int(og_num_bonds-new_num_bonds)\n \n cluster = buildNetwork(modified_matrix,seed_index)\n hetero=False\n if cluster!=set():\n scale = len(cluster)/old_cluster_size\n compo = Composition.from_dict(Counter([struct[l].specie.name \n for l in list(cluster)]))\n if compo.reduced_formula != struct.composition.reduced_formula:\n # i.e. the cluster does not have the same composition\n # as the overall crystal; therefore there are other\n # clusters of varying composition.\n hetero = True\n motiif = getDim(scale,supercell)\n\n if not hetero:\n if motiif=='layered':\n cluster_sites = [struct.sites[n] for n in cluster]\n all_structs.append(struct.from_sites(cluster_sites))\n\n if group_structs:\n matched = [x[0] for x in \n StructureMatcher(stol=1E-6,primitive_cell=False,\n scale=False).group_structures(all_structs)]\n else:\n matched=all_structs\n return(matched) \n\n\n else:\n print('Material is does not have a 3D motiif')\n print('Try increasing radii tolerance if appropriate')\n return([])", "def g_solving_subproblem_of_ALR(self,vehicle_id):\r\n global_LB = -10000\r\n global_UB = 10000\r\n iteration_for_RSP = 20\r\n optimal_solution_for_RSP = None\r\n self.multiplier_v = 0.5\r\n\r\n # solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 3)\r\n\r\n # obtain the variance\r\n y_ =self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB = 0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 1)\r\n LB += self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_searching_mean\r\n UB = Label_cost_for_lagrangian_mean + self.reliability * (variance) ** 0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector[0]\r\n\r\n # step 3: update multipliers\r\n if variance- y != 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB - global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, global_LB", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def V_checkers_ablation(s_grid, s_n, g_n, s_others,\n f1=4, k1=[3,5], n_h1=128, n_h2=32):\n with tf.variable_scope(\"stage-2\"):\n conv = convnet_1(s_grid, f1=f1, k1=k1, s1=[1,1], scope='conv')\n concated = tf.concat( [conv, s_n, g_n, s_others], axis=1 )\n h1 = tf.layers.dense(inputs=concated, units=n_h1, activation=tf.nn.relu, use_bias=True, name='V_h1')\n h2 = tf.layers.dense(inputs=h1, units=n_h2, activation=tf.nn.relu, use_bias=True, name='V_h2')\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=False, name='V_out')\n return out", "def crank_nicolson_fd(main_args,\n boundary_left_args, boundary_right_args,\n initial_func,\n min_x, max_x,\n max_t,\n step_x, step_t,\n boundary_approximation_func='first_order_two_points',\n theta=0.5):\n\n d = {\n 'first_order_two_points': _o1p2, # o - order, p - points\n 'second_order_two_points': _o2p2,\n 'second_order_three_points': _o2p3\n }\n (complete_matrix,\n complete_vector) = d[boundary_approximation_func](main_args,\n boundary_left_args, boundary_right_args,\n step_x, step_t,\n min_x, max_x)\n\n m = int(max_t / step_t) + 1\n n = int((max_x - min_x) / step_x) + 1\n u = [None for _ in range(m)]\n u[0] = [initial_func(min_x + x * step_x) for x in range(n)]\n\n a, b, c, f = main_args\n\n A = a * (1 - theta) / step_x ** 2 - b * (1 - theta) / (2 * step_x)\n B = c * (1 - theta) - 2 * a * (1 - theta) / step_x ** 2 - 1 / step_t\n C = a * (1 - theta) / step_x ** 2 + b * (1 - theta) / (2 * step_x)\n\n X = b * theta / (2 * step_x) - a * theta / step_x ** 2\n Y = 2 * a * theta / step_x ** 2 - c * theta - 1 / step_t\n Z = - a * theta / step_x ** 2 - b * theta / (2 * step_x)\n\n matrix_u_t = Matrix(size=(n, 3))\n for i in range(1, n - 1):\n matrix_u_t[i] = [A, B, C]\n complete_matrix(matrix_u_t)\n\n for t in range(1, m):\n v = Vector(size=(n, 1))\n for x in range(1, n - 1):\n v[x] = (u[t - 1][x - 1] * X +\n u[t - 1][x] * Y +\n u[t - 1][x + 1] * Z +\n (theta - 1) * f(min_x + x * step_x, t * step_t) -\n theta * f(min_x + x * step_x, (t - 1) * step_t))\n complete_vector(v, t * step_t, matrix_u_t, u[t-1][0], u[t-1][-1])\n u[t] = list(TDMA(mtrx=matrix_u_t, vec=v).solve())\n\n return u", "def planeSliceGFig2(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = int(len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n if comp == True:\n diff = difference(nreal) # determine number of complex solutions\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 1, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n \n fogain = np.zeros([nzones, npoints])\n zogain = np.zeros([nzones, npoints])\n for i in range(nzones):\n nroots = nreal[i]\n if nroots == 1:\n fogain[i] = np.abs(allfields[i])**2\n zogain[i] = np.abs(allfields[i])**2\n else:\n fogain[i] = np.abs(np.sum(allfields[i], axis = 0))**2\n zog = 0\n for j in range(nroots):\n zog = zog + np.abs(allfields[i][j])**2\n zogain[i] = zog\n \n fogain = fogain.flatten()\n zogain = zogain.flatten()\n\n # Construct uniform asymptotics\n # asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n # interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n # finx = np.linspace(xmin, xmax, 4*npoints)\n # asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(2, 2)\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[:, 0]), plt.subplot(grid[0, 1])\n # ax0, ax2 = plt.subplot(grid[0]), plt.subplot(grid[1])\n ax2 = plt.subplot(grid[1, 1], sharex=ax1)\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n cbar.set_label('G', fontsize = 18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\", linewidth = 1.)\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n xaxis = upxvecs.flatten()\n ax1.plot(xaxis, zogain, color = 'red', label = r'$0^{th}$ order GO gain')\n ax1.set_ylim(-cdist, np.max(G) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n # ax1.set_xlabel(r\"$u'_x$\")\n ax1.set_ylabel('G', fontsize = 18)\n ax1.legend(loc = 1, fontsize = 12)\n ax1.tick_params(labelsize = 14)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n \n # Plot gain along observer motion\n ax2.plot(rx2, G, color='blue', label=\"FFT gain\", linewidth=1.)\n for caus in upcross.T[0]:\n ax2.plot([caus, caus], [-10, 1000], ls='dashed', color='black')\n ax2.plot(xaxis, fogain, color='orange', label=r'$1^{st}$ order GO gain')\n ax2.set_ylim(-cdist, np.max(G) + 1.)\n ax2.set_xlim(np.min(rx2), np.max(rx2))\n ax2.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax2.set_ylabel('G', fontsize = 18)\n ax2.legend(loc = 1, fontsize = 12)\n # ax1.set_title(\"Slice Gain\")\n ax2.tick_params(labelsize = 14)\n ax2.grid()\n grid.tight_layout(fig)\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n plt.show()\n return", "def V_particle_global(v_state_n, v_goal, v_state_others, v_goal_others,\n n_h1_branch1=64, n_h1_branch2=64, n_h2=64, stage=1):\n concated = tf.concat( [v_state_n, v_goal], axis=1 )\n branch1 = tf.layers.dense(inputs=concated, units=n_h1_branch1, activation=tf.nn.relu, use_bias=True, name='V_particle_branch1')\n W_branch1_h2 = get_variable(\"W_branch1_h2\", [n_h1_branch1, n_h2])\n\n list_mult = []\n list_mult.append( tf.matmul(branch1, W_branch1_h2) )\n\n if stage > 1:\n with tf.variable_scope(\"stage-2\"):\n concated2 = tf.concat( [v_state_others, v_goal_others], axis=1 )\n others = tf.layers.dense(inputs=concated2, units=n_h1_branch2, activation=tf.nn.relu, use_bias=True, name='V_particle_branch2')\n W_branch2_h2 = get_variable('W_branch2_h2', [n_h1_branch2, n_h2])\n list_mult.append(tf.matmul(others, W_branch2_h2))\n\n h2 = tf.nn.relu(tf.add_n(list_mult, name='V_particle_h2'))\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=False, name='V_particle_out')\n \n return out", "def khorne_slide(obs, berzerker_x, berzerker_y):\n def environment_fits(obs, berzerker_x, berzerker_y):\n \"\"\" environment fits constraints \"\"\"\n # if prey has the ball\n if obs[\"ball_owned_team\"] == 1:\n prey_x = obs[\"right_team\"][obs[\"ball_owned_player\"]][0]\n prey_y = obs[\"right_team\"][obs[\"ball_owned_player\"]][1]\n # by x position, amount of berzerker's team players between prey and goal of berzerker's team\n players_amount = 0\n for i in range(1, len(obs[\"left_team\"])):\n if obs[\"left_team\"][i][0] < prey_x:\n players_amount += 1\n prey_x_direction = obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][0]\n future_prey_x = prey_x + obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][0]\n future_prey_y = prey_y + obs[\"right_team_direction\"][obs[\"ball_owned_player\"]][1]\n future_berzerker_x = berzerker_x + obs[\"left_team_direction\"][obs[\"active\"]][0]\n future_berzerker_y = berzerker_y + obs[\"left_team_direction\"][obs[\"active\"]][1]\n distance_to_prey = get_distance(berzerker_x, berzerker_y, prey_x, prey_y)\n future_distance_to_prey = get_distance(future_berzerker_x, future_berzerker_y, future_prey_x, future_prey_y)\n # if berzerker is not close to his own penalty zone\n # and prey is beyond x position of too many players of berzerker's team\n # and berzerker is close enough to prey\n # and berzerker is running in direction of prey\n if ((berzerker_x > -0.65 or abs(berzerker_y) > 0.3) and\n players_amount <= 7 and\n future_distance_to_prey < 0.015 and\n distance_to_prey > future_distance_to_prey):\n return True\n return False\n \n def get_action(obs, berzerker_x, berzerker_y):\n \"\"\" get action of this memory pattern \"\"\"\n return Action.Slide\n \n return {\"environment_fits\": environment_fits, \"get_action\": get_action}", "def _holt__(x, xi, p, y, l, b, s, m, n, max_seen):\n alpha, beta, phi, alphac, betac, y_alpha = _holt_init(x, xi, p, y, l, b)\n for i in range(1, n):\n l[i] = (y_alpha[i - 1]) + (alphac * (l[i - 1]))\n return sqeuclidean(l, y)", "def fvm(states: States, grid: Gridlines, topo: Topography, config: Config, runtime: DummyDict):\n # pylint: disable=invalid-name\n\n # calculate source term contributed from topography gradients\n states = topography_gradient(states, topo, config.params.gravity)\n\n # calculate slopes of piecewise linear approximation\n states = minmod_slope(states, grid, config.params.theta, runtime.tol)\n\n # interpolate to get discontinuous conservative quantities at cell faces\n states = get_discontinuous_cnsrv_q(states, grid)\n\n # fix non-physical negative depth\n states = correct_negative_depth(states, topo)\n\n # get non-conservative variables at cell faces\n states = decompose_variables(states, topo, runtime.epsilon)\n\n # get local speed at cell faces\n states = get_local_speed(states, config.params.gravity)\n\n # get discontinuous PDE flux at cell faces\n states = get_discontinuous_flux(states, topo, config.params.gravity)\n\n # get common/continuous numerical flux at cell faces\n states = central_scheme(states, runtime.tol)\n\n # get final right hand side\n states.rhs.w = \\\n (states.face.x.num_flux.w[:, :-1] - states.face.x.num_flux.w[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.w[:-1, :] - states.face.y.num_flux.w[1:, :]) / grid.y.delta + \\\n states.src.w\n\n states.rhs.hu = \\\n (states.face.x.num_flux.hu[:, :-1] - states.face.x.num_flux.hu[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hu[:-1, :] - states.face.y.num_flux.hu[1:, :]) / grid.y.delta + \\\n states.src.hu\n\n states.rhs.hv = \\\n (states.face.x.num_flux.hv[:, :-1] - states.face.x.num_flux.hv[:, 1:]) / grid.x.delta + \\\n (states.face.y.num_flux.hv[:-1, :] - states.face.y.num_flux.hv[1:, :]) / grid.y.delta + \\\n states.src.hv\n\n # remove rounding errors\n states.rhs = remove_rounding_errors(states.rhs, runtime.tol)\n\n # obtain the maximum safe dt\n amax = nplike.max(nplike.maximum(states.face.x.plus.a, -states.face.x.minus.a))\n bmax = nplike.max(nplike.maximum(states.face.y.plus.a, -states.face.y.minus.a))\n max_dt = min(0.25*grid.x.delta/amax, 0.25*grid.y.delta/bmax)\n\n return states, max_dt", "def minimum_spanning_arborescence(sol):", "def bs_chudnovsky(a, b, level, verbose):\n if b-a == 1:\n g = MPZ((6*b-5)*(2*b-1)*(6*b-1))\n p = b**3 * CHUD_C**3 // 24\n q = (-1)**b * g * (CHUD_A+CHUD_B*b)\n else:\n if verbose and level < 4:\n print(\" binary splitting\", a, b)\n mid = (a+b)//2\n g1, p1, q1 = bs_chudnovsky(a, mid, level+1, verbose)\n g2, p2, q2 = bs_chudnovsky(mid, b, level+1, verbose)\n p = p1*p2\n g = g1*g2\n q = q1*p2 + q2*g1\n return g, p, q", "def getBondVectors(struct,tol,prec): \n \n \n binary_matrix= getDistMat(struct,tol)\n bond_dir = {}\n distance_matrix = struct.distance_matrix\n lattice = np.array(struct.lattice.as_dict()['matrix'])\n iterations = list(itertools.product([1,0,-1],repeat=3))\n # Loop over list of atoms\n for i in range(len(binary_matrix)):\n for j in range(i+1,len(binary_matrix)):\n # Proceed if the entries are listed as \"bonded\" \n if binary_matrix[i][j]==1: \n s1 = struct.species[i]\n s2 = struct.species[j]\n # Organize dictionary so it is always in order of increasing\n # atomic number\n if s1.number>s2.number:\n s1 = struct.species[j]\n s2 = struct.species[i] \n if s1 not in bond_dir:\n bond_dir[s1]={}\n if s2 not in bond_dir[s1]:\n bond_dir[s1][s2]=[]\n valid_vs = []\n \n # Get the vector between atomic positions\n \n bond_vector = np.array(struct.sites[j].coords-\n struct.sites[i].coords) \n \n # The positions of the atoms may not be in the right locations\n # to be the minimum distance from each other. As a result,\n # a translation is applied to the resulting \"bond vector\" \n # (alternatively, one of the atoms is translated)\n for shift in iterations:\n bondShift = bond_vector + np.dot(lattice.T,shift)\n if abs(distance_matrix[i][j]-magni(bondShift))<=prec:\n valid_vs.append(bondShift)\n break\n # See if the vector is already present in the collection of \n # vectors. If so, add the coordinates to the entry. Else,\n # create a new entry for the direction of the bond.\n for v in valid_vs:\n if np.any([magni(v-x[0])<=prec for x in bond_dir[s1][s2]]):\n for k in range(len(bond_dir[s1][s2])):\n if magni(v-bond_dir[s1][s2][k][0])<=prec:\n bond_dir[s1][s2][k][1].append([i,j])\n break\n \n else:\n bond_dir[s1][s2].append([v,[[i,j]]])\n return(bond_dir)", "def BM2BSM(xy, NL, KL, BM0):\n # Check if 3D or 2D\n # np.sqrt( (xy[NL[i,0],0]-xy[BL[:,1],0])**2+(xy[BL[:,0],1]-xy[BL[:,1],1])**2) ]\n '''this isn't finished....'''", "def dual_problem(\n states: list[np.ndarray], probs: list[float] = None, dist_method=\"min-error\"\n) -> float:\n constraints = []\n meas = []\n\n dim_x, _ = states[0].shape\n\n y_var = cvxpy.Variable((dim_x, dim_x), hermitian=True)\n objective = cvxpy.Minimize(cvxpy.trace(cvxpy.real(y_var)))\n\n dim = int(np.log2(dim_x))\n dim_list = [2] * int(np.log2(dim_x))\n sys_list = list(range(1, dim, 2))\n # dim_list = [3, 3]\n\n if dist_method == \"min-error\":\n for i, _ in enumerate(states):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[i] * states[i])\n >> partial_transpose(meas[i], sys=sys_list, dim=dim_list)\n )\n\n if dist_method == \"unambiguous\":\n for j, _ in enumerate(states):\n sum_val = 0\n for i, _ in enumerate(states):\n if i != j:\n sum_val += cvxpy.real(cvxpy.Variable()) * probs[i] * states[i]\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var - probs[j] * states[j] + sum_val)\n >> partial_transpose(meas[j], sys=sys_list, dim=dim_list)\n )\n\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(\n cvxpy.real(y_var) >> partial_transpose(meas[-1], sys=sys_list, dim=dim_list)\n )\n\n problem = cvxpy.Problem(objective, constraints)\n sol_default = problem.solve()\n\n # print(np.around(y_var.value, decimals=3))\n\n return sol_default", "def update(self, paddle_1, paddle_2):\r\n done = False\r\n \r\n p1_reward = 0\r\n p2_reward = 0\r\n\r\n # Move ball and move to edges if past boundary\r\n x_ = self.x + self.vx\r\n y_ = self.y + self.vy\r\n\r\n if x_ < self.left_x:\r\n x_ = self.left_x\r\n elif x_ > self.right_x:\r\n x_ = self.right_x\r\n\r\n if y_ < self.top_y:\r\n y_ = self.top_y\r\n elif y_ > self.bot_y:\r\n y_ = self.bot_y\r\n\r\n\r\n # Contact with top or bottom\r\n if y_ == self.top_y or y_ == self.bot_y:\r\n self.vy *= -1\r\n\r\n\r\n # Left side\r\n if x_ == self.left_x:\r\n if paddle_1.y <= y_ <= (paddle_1.y + paddle_1.Height):\r\n x_ += self.Radius\r\n change = abs(paddle_1.vy//8)\r\n self.vx = -1*self.vx + change//2\r\n if self.vy < 0:\r\n self.vy -= change\r\n else:\r\n self.vy += change\r\n\r\n\r\n self.rallies += 1\r\n\r\n p1_reward += 100\r\n p2_reward -= 0\r\n else:\r\n p1_reward -= 100\r\n p2_reward += 0\r\n done = True\r\n\r\n\r\n # Right side\r\n elif x_ == self.right_x:\r\n if paddle_2.y <= y_ <= (paddle_2.y + paddle_2.Height):\r\n x_ -= self.Radius\r\n change = abs(paddle_2.vy//8)\r\n self.vx = -1*self.vx - change//2\r\n if self.vy < 0:\r\n self.vy -= change\r\n else:\r\n self.vy += change\r\n\r\n self.rallies += 1\r\n\r\n p1_reward -= 0\r\n p2_reward += 100\r\n else:\r\n p1_reward += 0\r\n p2_reward -= 100\r\n done = True\r\n\r\n\r\n\r\n # Update ball position and velocity if exceeded\r\n if not done:\r\n self.x = x_\r\n self.y = y_\r\n\r\n if self.vx > self.V_max:\r\n self.vx = self.V_max\r\n elif self.vx < -self.V_max:\r\n self.vx = -self.V_max\r\n \r\n if self.vy > self.V_max:\r\n self.vy = self.V_max\r\n elif self.vy < -self.V_max:\r\n self.vy = -self.V_max\r\n\r\n\r\n p1_state, p2_state = self.state_observation(paddle_1, paddle_2)\r\n\r\n return p1_state, p2_state, p1_reward, p2_reward, done" ]
[ "0.58253855", "0.56931674", "0.54437244", "0.5257577", "0.52070004", "0.51921296", "0.5183743", "0.5179858", "0.51775", "0.51654345", "0.5148147", "0.51285076", "0.51230913", "0.5090722", "0.5089123", "0.50647557", "0.5053416", "0.5052067", "0.50175494", "0.50117624", "0.4977015", "0.4969425", "0.49470815", "0.49426764", "0.49317843", "0.49298218", "0.4927969", "0.49272484", "0.49262154", "0.4905312", "0.49040797", "0.48890597", "0.4876174", "0.48667154", "0.4862967", "0.485568", "0.48536083", "0.4849768", "0.48308778", "0.48274255", "0.48048848", "0.48035473", "0.47929692", "0.4792691", "0.4788311", "0.47840834", "0.47765118", "0.4769112", "0.473552", "0.47210577", "0.4696963", "0.46882778", "0.46879342", "0.4684726", "0.46831405", "0.46793288", "0.46776378", "0.46763706", "0.4674901", "0.4672066", "0.46643984", "0.46557686", "0.46426353", "0.46400443", "0.46384546", "0.46309963", "0.46271527", "0.46187866", "0.46169698", "0.46157458", "0.4615633", "0.46155715", "0.4613577", "0.46043527", "0.46022683", "0.4601494", "0.46013495", "0.4601115", "0.45954737", "0.45919356", "0.45896363", "0.45883834", "0.45872557", "0.45820886", "0.45816544", "0.45789623", "0.45770603", "0.4576892", "0.4573583", "0.45633227", "0.45626962", "0.45569077", "0.455669", "0.4555635", "0.45540026", "0.45518622", "0.4550786", "0.4550261", "0.4548863", "0.4547991" ]
0.57671916
1
A more traditional view that also demonstrate an alternative way to use Haystack. Useful as an example of for basing heavily custom views off of. Also has the benefit of threadsafety, which the ``SearchView`` class may not be.
def search(request): query = '' results = [] qs = create_queryset(request.user) form = DateAuthorSearchForm(request.GET, searchqueryset=qs, load_all=True) if form.is_valid(): results = form.search() context = { 'form': form, 'results': results, } return render_to_response("search/search.html", context, context_instance=RequestContext(request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_view(self, h, *args):\n return self.view(h)", "def search(request):\n raise NotImplementedError", "def shared_view(request):\n return view(shared_view, template=\"a_shared_view\")", "def a_shared_view(request):\n return view(a_shared_view)", "def search(request):\n return render(request, \"search.html\")", "def as_view(cls):\n \n @csrf_exempt\n @slack_augment\n def view(request):\n return cls(request).dispatch()\n return view", "def search(request, query):\n try:\n if(query == ''):\n query = request.GET['query']\n posts = Post.search.query(query)\n context = { 'posts': list(posts),'query': query, 'search_meta':posts._sphinx }\n except:\n context = { 'posts': list() }\n\n return render_to_response('search/search_results.html', context, context_instance=RequestContext(request))", "def index(request):\r\n form = forms.SearchForm()\r\n \r\n return render_to_response('search/search.html', {'form': form})", "def viewHeadOn(*args, **kwargs)->None:\n pass", "def search( self, view_name, handler='_fti/_design', wrapper=None, schema=None, **params):\n return ViewResults(self.raw_view,\n \"/%s/%s\" % (handler, view_name),\n wrapper=wrapper, schema=schema, params=params)", "def searchResultsView(request):\n bloggerName = request.GET.get(\"search_blogger\")\n bloggers = Blogger.objects.all().filter(name__icontains=bloggerName)\n\n context = {\n \"bloggers\": bloggers,\n }\n return render(request, \"blog/search_results.html\", context)", "def search_api(request):\n data = ApiViewFilters(request.GET, queryset=ApiView.objects.all())\n return render(request, 'template.html', {'filter': data})", "def search(request):\n\n try:\n q = request.GET['q']\n result = Entry.objects.filter(title__contains=q)\n\n return render_to_response('search.html',\n {'result': result,\n 'username': request.user.username})\n except KeyError:\n return render_to_response('search.html',\n {'username': request.user.username})", "def add_view( *args, **kwargs ):", "def advanced_search():\n\n return render_template('Advanced_Search.html')", "def index(request):\n search_form = SearchForm()\n return render(request, 'store/index.html', {'search_form': search_form})", "def view(self):", "def search(request):\n title = \"Voices search\"\n search_term = request.params.get('search_term','')\n form = Form(request)\n searchstring = u'%%%s%%' % search_term\n\n # generic_filter can be applied to all Node (and subclassed) objects\n\n generic_filter = or_(\n Content.title.like(searchstring),\n Content.body.like(searchstring),\n )\n\n results = DBSession.query(Content).filter(Content.type !='listing').filter(generic_filter).\\\n order_by(Content.title.asc()).all()\n\n\n page_url = PageURL_WebOb(request)\n page = int(request.params.get(\"page\", 1))\n paginator = Page(results,\n page=page,\n items_per_page=10,\n url=page_url)\n\n return render_to_response(\"buddy:templates/home/searchresult.mako\",\n dict(paginator=paginator,title=title,\n form=FormRenderer(form)),request=request)", "def view(self):\n raise NotImplementedError", "def search():\r\n return render_template(\"/home/search.html\")", "def show(self, *args, **kwargs) -> None:\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "def as_view(cls, *class_args, **class_kwargs):\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view", "def search(request):\n template = loader.get_template('searchresult.html')\n\n context = {\n 'asd': 'asd',\n }\n return HttpResponse(template.render(context, request))", "def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)", "def search(request, template_name=\"testimonials/search.html\"):\n has_index = get_setting('site', 'global', 'searchindex')\n query = request.GET.get('q', None)\n\n if has_index and query:\n testimonials = Testimonial.objects.search(query, user=request.user)\n else:\n filters = get_query_filters(request.user, 'testimonials.view_story')\n testimonials = Testimonial.objects.filter(filters).distinct()\n if request.user.is_authenticated():\n testimonials = testimonials.select_related()\n testimonials = testimonials.order_by('-create_dt')\n\n EventLog.objects.log()\n\n return render_to_response(template_name, {'testimonials': testimonials},\n context_instance=RequestContext(request))", "def index_view(request):\n return render(request, 'index.html')", "def search(request):\n\t\n\t# User's query\n\tquery = request.GET.get('query')\n\n\t# Search for 50 most popular tweets about user's query\n\ttweets = tweepy.Cursor(api.search, q=query, lang=\"en\", tweet_mode='extended', include_entities=True, result_type='popular').items(50)\n\n\t# Search for 20 most relevant news about user's query\n\tall_news = newsapi.get_everything(q=query, language='en', sort_by='relevancy')\n\n\t# Search for 25 hottest subreddits about user's query\n\tsubreddit = reddit.subreddit('all')\n\treddit_news = subreddit.search(query, limit=25, sort='hot')\n\n\tcontext = {\n\t\t\"tweets\": tweets, # most popular tweets\n\t\t\"all_news\": all_news, # most relevant google news\n\t\t\"reddit_news\": reddit_news # hottest subreddits\n\t}\n\n\treturn render(request, 'hashtrend/search.html', context)", "def my_view(cls):\n return cls.__my_view", "def search(request):\n\n query = request.GET.get('q', '')\n\n if query:\n q1 = Q(title__search=query)\n q2 = Q(body__search=query)\n\n results = BlogEntry.objects.filter(q1 | q2).order_by(\n '-created').distinct()\n else:\n results = []\n\n search_str = 'Search results for: %s' % (query)\n data = {'entries': paginate_objects(request, results),\n 'action_str': search_str, 'blog_info': get_blog_info()}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))", "def do_search(request):\n products = Product.objects.filter(name__icontains=request.GET['q'])\n return render(request, \"search_results.html\", {\"products\": products})", "def show(*args, **kwargs):\n from . import core\n\n return core.show(*args, **kwargs)", "def index(request):\n\n if request.method != \"POST\":\n #No data submitted: render home page\n form = SearchForm()\n\n #Display a blank form\n context = {\n \"form\": form,\n }\n\n return render(request, \"news_site/index.html\", context)", "def do_search(request):\n products = Product.objects.filter(title__icontains=request.GET['q'])\n return render(request, \"products.html\", {\"products\": products})", "def index(request):\n query = (request.POST.get('q') or request.GET.get('q', '')).strip()\n short = (request.POST.get('short') or request.GET.get('short', '')).strip()\n shortcut = None\n\n # Split query if no shortcut is to selected\n if query and not short:\n q = query.split(' ', 1)\n short = q[0]\n query = q[1] if len(q) >= 2 else ''\n\n # Find shortcut\n if short:\n try:\n shortcut = Shortcut.objects.get(short=short)\n url = shortcut.get_url(query)\n return redirect(url)\n except Shortcut.DoesNotExist:\n query = (short + ' ' + query) if query else short\n short = ''\n\n return render(request, 'search.html', {\n 'q': query,\n 'short': short,\n 'shortcuts': Shortcut.objects.order_by('short')})", "def feature_view_home(request):\n return render(request, 'SNP_Feature_View/feature_view_home.html')", "def home_view(request: HttpRequest) -> HttpResponse:\n return render(request=request, template_name='todo/home.html')", "def _wrapped_view(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)", "def setup_view(view, request=None, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def search(request):\n context = {}\n q = \"\"\n try:\n if request.POST:\n q = request.POST['q']\n else:\n q = request.GET['q']\n except MultiValueDictKeyError:\n pass\n context['query'] = q\n context['search_entry_list'] = watson.search(q)\n return render(request, 'search.html', context)", "def test_home_urls(\n return_views,\n reverse_url,\n kwargs,\n resolve_url,\n view_func,\n create_averages\n):\n\n create_averages()\n reverse_view, resolve_view = return_views(\n reverse_url, resolve_url, kwargs\n )\n\n try:\n assert reverse_view.func == view_func\n assert resolve_view.func == view_func\n except (AttributeError, AssertionError):\n assert reverse_view.func.view_class == view_func\n assert resolve_view.func.view_class == view_func", "def get(self, request, *args, **kwargs):\n provider_metadata = request \\\n .QUERY_PARAMS \\\n .get('provider_metadata') == 'true'\n result = super(StackHostsAPIView, self).get(request, *args, **kwargs)\n\n if not provider_metadata or not result.data['results']:\n return result\n\n stack = self.get_object()\n query_results = stack.query_hosts()\n\n # TODO: query_results are highly dependent on the underlying\n # salt-cloud driver and there's no guarantee that the result\n # format for AWS will be the same for Rackspace. In the future,\n # we should probably pass the results off to the cloud provider\n # implementation to format into a generic result for the user\n for host in result.data['results']:\n hostname = host['hostname']\n host['provider_metadata'] = query_results[hostname]\n\n return result", "def indexView(request):\n return render(request, 'auvsi_suas/index.html')", "def index(self):\n return self.load_view('index.html')", "def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })", "def index(request):\n return render_to_response('index.html', RequestContext(request))", "def mock_as_view(view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def search_page():\n return render_template('page_query.html', search_label=g_search_type)", "def hashtag_view(request, hashtag_slug=None):\r\n # get hashtag by its slug.\r\n hashtag = get_object_or_404(Hashtag, slug=hashtag_slug)\r\n # get all items that have this hashtag.\r\n items = Item.objects.filter(hashtags=hashtag)\r\n context = {'hashtag':hashtag, 'items':items}\r\n return render(request, 'explore/hashtag.html', context)", "def home_view(request):\n message = 'Hello World'\n return Response(body=message, status=200)", "def filter_search(self, request, search, view):\n raise NotImplementedError(\".filter_search() must be overridden.\")", "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def display(self, *args, **kwargs):\n return self.show(*args, **kwargs)", "def hello_world():\n\n keyword_query = 'Gamestop' # Change it to something you're interested in!\n article_data = get_article_data(keyword_query)\n\n return render_template(\n \"index.html\",\n topic=keyword_query,\n headlines=article_data['headlines'],\n snippets=article_data['snippets'],\n )", "def test_create_view(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n\n # Create Link - a view can hvae 0 to * links\n link = Link(name=\"TextNode\") # Name of a collection in the database\n linkAnalyzers = AnalyzerList([\"identity\"])\n link.analyzers = linkAnalyzers\n\n # A link can have 0..* fields\n field = Field(\n \"text\",\n AnalyzerList([\"text_en\", \"invalid_analyzer\", \"analyzer_sample\"]),\n ) # text_en is a predifined analyzer from arango\n field.analyzers.filter_invalid_analyzers(\n db, verbose=1\n ) # filters out the analyzer that are not defined in the database\n\n assert (\n str(field.analyzers)\n == \"AnalyzerList(analyzerList=['text_en', 'analyzer_sample'], database=None)\"\n )\n\n link.add_field(field)\n\n ## Show the dict format of all the fields in a link\n assert link.get_fields_dict() == {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n }\n\n # create view\n view = View(\"sample_view\", view_type=\"arangosearch\")\n ## add the link (can have 0 or 1 link)\n view.add_link(link)\n\n ## can have 0..* primary sort\n view.add_primary_sort(\"text\", asc=False)\n view.add_stored_value([\"text\", \"timestamp\"], compression=\"lz4\")\n\n assert view.summary() == {\n \"name\": \"sample_view\",\n \"viewType\": \"arangosearch\",\n \"properties\": {\n \"cleanupintervalstep\": 0,\n \"cleanupIntervalStep\": 0,\n \"commitIntervalMsec\": 1000,\n \"consolidationIntervalMsec\": 0,\n \"consolidationPolicy\": {\n \"type\": \"tier\",\n \"segmentsMin\": 1,\n \"segmentsMax\": 10,\n \"segmentsBytesMax\": 5368709120,\n \"segmentsBytesFloor\": 2097152,\n \"minScore\": 0,\n },\n \"primarySortCompression\": \"lz4\",\n \"writebufferIdle\": 64,\n \"writebufferActive\": 0,\n \"writebufferMaxSize\": 33554432,\n },\n \"links\": {\n \"TextNode\": {\n \"analyzers\": [\"identity\"],\n \"fields\": {\n \"text\": {\"analyzers\": [\"text_en\", \"analyzer_sample\"]}\n },\n \"includeAllFields\": False,\n \"trackListPositions\": False,\n \"inBackground\": False,\n }\n },\n \"primarySort\": [{\"field\": \"text\", \"asc\": False}],\n \"storedValues\": [\n {\"fields\": [\"text\"], \"compression\": \"lz4\"},\n {\"fields\": [\"timestamp\"], \"compression\": \"lz4\"},\n ],\n }\n\n ## creates the view in the database\n view.create(db)\n\n assert db.view(\"sample_view\")[\"name\"] == \"sample_view\"", "def view_blog(self):", "def index(request):\n products = Product.objects.all()\n highlights = Original.objects.filter(status='h')\n context = {\n \"index_page\": \"active\",\n \"products\": products,\n \"highlights\": highlights,\n \"title\": \"Home\"\n }\n return render(request, \"index.html\", context)", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def event_list(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n request_terms = request.GET.get('search')\n search_terms_array = request_terms.split()\n\n initial_term = search_terms_array[0]\n event_list = Event.objects.annotate(\n num_participants=Count('participants', distinct=True),\n num_collaborators=Count('collaborators', distinct=True)).filter(\n Q(title__icontains=initial_term) |\n Q(description__icontains=initial_term))\n if len(search_terms_array) > 1:\n for term in range(1, len(search_terms_array)):\n event_list = event_list.filter(Q(title__icontains=search_terms_array[term]) |\n Q(description__icontains=search_terms_array[term]))\n else:\n event_list = Event.objects.annotate(\n num_participants=Count('participants', distinct=True),\n num_collaborators=Count('collaborators', distinct=True)).all()\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(event_list, request)\n serializer = EventSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def home_demo(request):\n\n return render(request, \"home_demo.html\")", "def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })", "def home(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'research/index.html',\n {\n 'title':'Health Infinity',\n 'info' :'Medical BigData Platform',\n 'year' : datetime.now().year,\n 'temp': models.load_data(),\n 'test': models.hchart_str(),\n }\n )", "def index():\n\tresults = queries.index()\n\ttags = queries.tags()\n\treturn render_template('index.html', packages=results, tags=tags, currentFilter=None)", "def view_index(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n # Get the views\n views = workflow.views.values(\n 'id',\n 'name',\n 'description_text',\n 'modified')\n\n # Build the table only if there is anything to show (prevent empty table)\n return render(\n request,\n 'table/view_index.html',\n {\n 'query_builder_ops': workflow.get_query_builder_ops_as_str(),\n 'table': ViewTable(views, orderable=False),\n },\n )", "def class_based_view(class_obj):\n def _instantiate_view_class(request, *args, **kwargs):\n return class_obj()(request, *args, **kwargs)\n return _instantiate_view_class", "def render_html(self, request):\n views = __import__('%s.views' % self._get_module_path(), fromlist=[''])\n try:\n return views.index_view(request, self)\n except AttributeError, e:\n raise e\n raise PluginViewsNotProperlyConfiguredError(self)", "def view(self):\n return self._view_class(self)", "def baseView(*args, itemInfo: Union[AnyStr, bool]=\"\", itemList: bool=True, viewDescription:\n bool=True, viewLabel: bool=True, viewList: bool=True, viewName: Union[AnyStr,\n bool]=\"\", q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def search_tweets(request):\n return render(request, 'ede/search.html')", "async def the_store_view(request): # pylint: disable=W0613\n from componentstore.view.component.the_store import view\n requester = request.headers.get('X-FORWARDED-FOR', None)\n print(\"Serving 'The Store' to\", requester)\n html = await view()\n return web.Response(body=html, content_type=\"text/html\", charset=\"utf-8\")", "def home(request): \n return render_to_response('index.html', locals(), context_instance = RequestContext(request))", "def search(request):\n if 'q' in request.GET:\n term = request.GET['q'].lower()\n thispushqueryset = pushitem.objects.filter(Q(searchfield__contains= term) )\n message = _('Searching for %s')%str(term)\n else:\n thispushqueryset = pushitem.objects.none()\n message = _('No search query specified')\n r = makepage(request,thispushqueryset,{'search_query':request.GET['q'].lower(), 'showall': 1,'message':message,}, template='search.html')\n return r", "def index(request):\n if _use_new_ui(request):\n return _serve_new_ui(request)\n\n if request.user is None:\n return view_all(request, index_call=True)\n else:\n return mine(request)", "def index():\n pass", "def __init__( viewname, view ):", "def index(request):\n\n return render(request, 'index.html')", "def test_default_behavior_of_home_view(dummy_request):\n from ..views.default import get_home_view\n from pyramid.response import Response\n\n request = dummy_request\n response = get_home_view(request)\n # import pdb ; pdb.set_trace()\n assert isinstance(response, dict)", "def index(request):\n return render(\n request,\n 'core/index.html',\n {\n 'current_view': 'index',\n }\n )", "def as_view(cls, name=None, *class_args, **class_kwargs):\n\t\tif name is None:\n\t\t\tif class_args or class_kwargs:\n\t\t\t\traise TypeError(\n\t\t\t\t\t\t'View name is required when class_args or class_kwargs '\n\t\t\t\t\t\t'are provided was not set on view. %s.')\n\t\t\telse:\n\t\t\t\treturn cls._get_default_view()\n\n\t\tdef view(*args, **kwargs):\n\t\t\tself = view.view_class(*class_args, **class_kwargs)\n\t\t\tresponse = self(*args, **kwargs)\n\t\t\treturn response\n\n\t\tif cls.decorators:\n\t\t\tview.__name__ = name\n\t\t\tview.__module__ = cls.__module__\n\t\t\tfor decorator in cls.decorators:\n\t\t\t\tview = decorator(view)\n\n\t\t# We attach the view class to the view function for two reasons:\n\t\t# first of all it allows us to easily figure out what class-based\n\t\t# view this thing came from, secondly it's also used for instantiating\n\t\t# the view class so you can actually replace it with something else\n\t\t# for testing purposes and debugging.\n\t\tview.view_class = cls\n\t\tview.__name__ = name\n\t\tview.__doc__ = cls.__doc__\n\t\tview.__module__ = cls.__module__\n\t\tview.methods = cls.methods\n\t\treturn view", "def as_view(cls, **initkwargs):\n # sanitize keyword arguments\n for key in initkwargs:\n if key in cls.http_method_names:\n raise TypeError(\"You tried to pass in the %s method name as a \"\n \"keyword argument to %s(). Don't do that.\"\n % (key, cls.__name__))\n if not hasattr(cls, key):\n raise TypeError(\"%s() received an invalid keyword %r\" % (\n cls.__name__, key))\n\n def view(request, *args, **kwargs):\n self = cls(**initkwargs)\n return self.dispatch(request, *args, **kwargs)\n\n # take name and docstring from class\n update_wrapper(view, cls, updated=())\n\n # and possible attributes set by decorators\n # like csrf_exempt from dispatch\n update_wrapper(view, cls.dispatch, assigned=())\n return view", "def get(self, request, *args, **kwargs):\n search = request.GET.get('search')\n context = self.get_context_data(**kwargs)\n context['search'] = search\n context['results_count'] = 0\n\n # Don't search at all if search term is not at least 3 chars long\n if len(search) >= 3:\n datasets = []\n\n # Prepare filters for every searched class and execute the queries\n # by searching in all char/text fields\n for model in self.searched_classes:\n filters = [\n Q(**{field.name + '__icontains': search})\n for field in model._meta.fields if isinstance(field, (CharField, TextField))\n ]\n query = filters.pop()\n for item in filters:\n query |= item\n\n # Execute SQL query to get the results for this model\n objects = model.objects.select_related().filter(query)\n\n # Add info for this class in the data returned\n datasets.append({\n 'model': model,\n 'model_meta': model._meta,\n 'model_name': model.__name__,\n 'objects': objects\n })\n context['results_count'] += len(objects)\n\n context['datasets'] = datasets\n\n return self.render_to_response(context)", "def static_view_finder(viewname, **other):\n return viewname", "def view_deployment(request, deployment, **_kwargs):\n pass", "def index(request):\n \n return render(request, 'home/index.html')", "def _view(self, request, **kwargs):\n return self._dispatch(request, **kwargs)", "async def stack_overflow(self, ctx: Context, *, query: str) -> None:\n async with ctx.typing():\n site = stackexchange.Site(stackexchange.StackOverflow, StackExchangeToken)\n site.impose_throttling = True\n site.throttle_stop = False\n\n results = site.search(intitle=query)[:5]\n embed = Embed(title=\"StackOverflow search\")\n embed.set_thumbnail(url=f\"http://s2.googleusercontent.com/s2/favicons?domain_url={site.domain}\")\n\n description = f\"**Query:** {query}\\n\"\n\n if results:\n embed.color = Color.blue()\n else:\n embed.color = Color.red()\n description += \"\\nSorry, No results found for given query.\"\n\n for result in results:\n # Fetch question's data, include vote_counts and answers\n result = site.question(result.id, filter=\"!b1MME4lS1P-8fK\")\n description += f\"\\n**[{result.title}](https://{site.domain}/q/{result.id})**\"\n description += f\"\\n**Score:** {result.score}, **Answers:** {len(result.answers)}\\n\"\n\n embed.description = description\n await ctx.send(embed=embed)", "def search(request):\n if 'q' in request.GET:\n term = request.GET['q']\n story_list = Story.objects.filter(Q(title__contains=term)|Q(markdown_content__contains=term))\n heading = \"Search results\"\n return render_to_response(\"cms/story_list.html\",locals())", "def viewfactory(self):\n raise NotImplementedError()", "def _search(self, query):\n return self._request(query)", "def search(self, *args, **kwargs):", "def from_python_function(instance):\n # set the pagination class\n pagination_class = FromPythonPagination\n # filter queryset based on the search terms\n instance.queryset = instance.filter_queryset(instance.queryset)\n # get the serializer for this view\n current_serializer = general_serializer(instance)\n # serialize the data (many flag for serializing a queryset)\n serialized_queryset = current_serializer(instance.queryset, many=True, context={'request': instance.request})\n # get the data to serialize\n data = serialized_queryset.data\n # generate the response\n return HttpResponse(JSONRenderer().render({'count': len(instance.queryset), 'next': None, 'previous': None,\n 'results': data}))", "def index(request):\n return render(request, 'index.html')", "def index(request):\n return render(request, 'index.html')", "def index(request):\n return render(request, 'index.html')", "def search_responsify(serializer, mimetype):\n def view(pid_fetcher, search_result, code=200, headers=None, links=None,\n item_links_factory=None):\n response = current_app.response_class(\n serializer.serialize_search(pid_fetcher, search_result,\n links=links,\n item_links_factory=item_links_factory),\n mimetype=mimetype)\n response.status_code = code\n if headers is not None:\n response.headers.extend(headers)\n\n if links is not None:\n add_link_header(response, links)\n\n return response\n\n return view", "def index() -> Any:\n return render_template(\"index.html\")", "def index(request):\n return render(request, \"index.html\")", "def test_view():\n\treturn \"this is a response\"", "def home(request):\n\n context = {\n\n }\n\n return render(request, 'hydraviewer/home.html', context)", "def __repr__(self):\n return self.buildView(0)" ]
[ "0.6344785", "0.57771486", "0.56842697", "0.5668535", "0.56558794", "0.56173635", "0.5589077", "0.5574361", "0.5421331", "0.540091", "0.5397322", "0.5366973", "0.5350704", "0.5332693", "0.533061", "0.53137857", "0.53135633", "0.5257664", "0.52448064", "0.5209895", "0.5179522", "0.5179522", "0.5179522", "0.51763237", "0.5163771", "0.515676", "0.51488066", "0.51378536", "0.51248115", "0.5093009", "0.5092561", "0.5090198", "0.5090015", "0.5048412", "0.5044454", "0.50380886", "0.5032139", "0.5023335", "0.50229204", "0.50117695", "0.4987287", "0.49688402", "0.4965216", "0.4960797", "0.49503043", "0.49490434", "0.49299532", "0.492584", "0.49201795", "0.49190724", "0.49155003", "0.49056247", "0.48939797", "0.48924425", "0.48885992", "0.48851183", "0.48797965", "0.48793468", "0.48789272", "0.48778892", "0.48770562", "0.48734418", "0.4870817", "0.4864825", "0.4859026", "0.4857221", "0.48553857", "0.48528674", "0.4846883", "0.4844758", "0.4842959", "0.48401788", "0.48368183", "0.483134", "0.48244417", "0.48238078", "0.48201445", "0.48191586", "0.48145956", "0.48121643", "0.48085347", "0.48062345", "0.48037335", "0.48009008", "0.480048", "0.47997484", "0.47991422", "0.47952437", "0.47865754", "0.47864592", "0.47857463", "0.4780289", "0.4779753", "0.4779753", "0.4779753", "0.47778323", "0.4777179", "0.477702", "0.47725105", "0.47707593", "0.47692907" ]
0.0
-1
Returned data frame should have trading_pair as index and include usd volume, baseAsset and quoteAsset
async def get_active_exchange_markets(cls) -> pd.DataFrame: async with aiohttp.ClientSession() as client: trading_pairs_response = await client.get(ASSET_PAIRS_URL) trading_pairs_response: aiohttp.ClientResponse = trading_pairs_response if trading_pairs_response.status != 200: raise IOError(f"Error fetching Kraken trading pairs. " f"HTTP status is {trading_pairs_response.status}.") trading_pairs_data: Dict[str, Any] = await trading_pairs_response.json() trading_pairs_data["result"] = { pair: details for pair, details in trading_pairs_data["result"].items() if "." not in pair} wsname_dict: Dict[str, str] = {pair: details["wsname"] for pair, details in trading_pairs_data["result"].items()} trading_pairs: Dict[str, Any] = {pair: {"baseAsset": wsname_dict[pair].split("/")[0], "quoteAsset": wsname_dict[pair].split("/")[1], "wsname": wsname_dict[pair]} for pair in trading_pairs_data["result"]} trading_pairs_str: str = ','.join(trading_pairs.keys()) market_response = await client.get(f"{TICKER_URL}?pair={trading_pairs_str}") market_response: aiohttp.ClientResponse = market_response if market_response.status != 200: raise IOError(f"Error fetching Kraken markets information. " f"HTTP status is {market_response.status}.") market_data = await market_response.json() market_data: List[Dict[str, Any]] = [{"pair": pair, **market_data["result"][pair], **trading_pairs[pair]} for pair in market_data["result"] if pair in trading_pairs] # Build the data frame. all_markets: pd.DataFrame = pd.DataFrame.from_records(data=market_data, index="pair") all_markets["lastPrice"] = all_markets.c.map(lambda x: x[0]).astype("float") all_markets.loc[:, "volume"] = all_markets.v.map(lambda x: x[1]).astype("float") price_dict: Dict[str, float] = await cls.get_prices_from_df(all_markets) usd_volume: List[float] = [ ( baseVolume * price_dict[baseAsset] if baseAsset in price_dict else -1 ) for baseAsset, baseVolume in zip(all_markets.baseAsset, all_markets.volume)] all_markets.loc[:, "USDVolume"] = usd_volume return all_markets.sort_values("USDVolume", ascending=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account_df(self, typ='trades', improve=False):\n cols = ['date_open', 'date_close', 'symbol', 'style', 'volume', 'price_open', 'price_stop', 'price_limit', 'price_close', 'comment', 'magic', 'order_id_master', 'order_id_stop', 'order_id_limit', 'direction', 'price_diff', 'price_diff', 'price_diff_d', 'price_diff_rel', 'price_diff_rel_d', 'MAE', 'MFE', 'MAE_rel', 'MFE_rel', 'price_trailing_diff', 'profit']\n d = self._d_orders[typ]\n if len(d)>0:\n df = pd.DataFrame(d.values(), index=d.keys())\n df = df.rename(columns={0: 'bo'})\n df['date_created'] = df['bo'].map(lambda o: o.date_created)\n df['date_open'] = df['bo'].map(lambda o: o.date_open)\n df['date_close'] = df['bo'].map(lambda o: o.date_close)\n df['date_closed'] = df['bo'].map(lambda o: o.date_closed)\n df['symbol'] = df['bo'].map(lambda o: o.symbol)\n #df['style'] = df['bo'].map(lambda o: o.style)\n df['volume'] = df['bo'].map(lambda o: o.volume)\n df['price_open'] = df['bo'].map(lambda o: o.price_open)\n df['price_stop'] = df['bo'].map(lambda o: o.price_stop)\n df['price_limit'] = df['bo'].map(lambda o: o.price_limit)\n df['price_close'] = df['bo'].map(lambda o: o.price_close)\n df['comment'] = df['bo'].map(lambda o: o.comment)\n df['magic'] = df['bo'].map(lambda o: o.magic)\n #df['order_id_master'] = df['bo'].map(lambda o: o.order_id_master)\n #df['order_id_stop'] = df['bo'].map(lambda o: o.order_id_stop)\n #df['order_id_limit'] = df['bo'].map(lambda o: o.order_id_limit)\n\n df['direction'] = df['bo'].map(lambda o: o.direction)\n\n df['price_diff'] = df['bo'].map(lambda o: o.price_diff)\n df['price_diff_d'] = df['bo'].map(lambda o: o.price_diff_d)\n df['price_diff_rel'] = df['bo'].map(lambda o: o.price_diff_rel)\n df['price_diff_rel_d'] = df['bo'].map(lambda o: o.price_diff_rel_d)\n \n df['MAE'] = df['bo'].map(lambda o: o.MAE)\n df['MFE'] = df['bo'].map(lambda o: o.MFE)\n \n #df['MAE_rel'] = df['MAE'] / df['price_open']\n #df['MFE_rel'] = df['MFE'] / df['price_open']\n df['MAE_rel'] = df['bo'].map(lambda o: o.MAE_rel)\n df['MFE_rel'] = df['bo'].map(lambda o: o.MFE_rel)\n \n\n #df['profit'] = df['volume'] * df['price_diff'].fillna(0)\n df['profit'] = df['bo'].map(lambda o: o.profit)\n #df['profit_rel'] = df['bo'].map(lambda o: o.profit_rel)\n \n if improve:\n try:\n df = improve_account_df_with_additional_data(df)\n except Exception as e:\n log.error(\"Can't improve account df with additional data\")\n log.error(\"Reason: %s\" % str(e))\n \n #del df['bo'] \n \n return(df)\n else:\n return(pd.DataFrame(columns=cols))", "def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data", "def trade_vol_VS_tx_vol(df):\n\n volume_cryptocompare = df['Volume (BTC)']\n volume_tx = df['Tx Volume (BTC)']\n result = volume_cryptocompare.div(volume_tx).fillna(0)\n result.name = 'Trade Vol / Tx Vol'\n return out(SETTINGS, df, result)", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def exchanges_df(self) -> pd.DataFrame:\n mid_price = self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)\n maker_buy_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n maker_sell_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n taker_buy_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n taker_sell_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n maker_buy_spread_bps = (maker_buy_result.result_price - taker_buy_result.result_price) / mid_price * 10000\n maker_sell_spread_bps = (taker_sell_result.result_price - maker_sell_result.result_price) / mid_price * 10000\n columns = [\"Exchange\", \"Market\", \"Mid Price\", \"Buy Price\", \"Sell Price\", \"Buy Spread\", \"Sell Spread\"]\n data = []\n data.append([\n self.maker_exchange,\n self.maker_pair,\n float(self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)),\n float(maker_buy_result.result_price),\n float(maker_sell_result.result_price),\n int(maker_buy_spread_bps),\n int(maker_sell_spread_bps)\n ])\n data.append([\n self.taker_exchange,\n self.taker_pair,\n float(self.connectors[self.taker_exchange].get_mid_price(self.maker_pair)),\n float(taker_buy_result.result_price),\n float(taker_sell_result.result_price),\n int(-maker_buy_spread_bps),\n int(-maker_sell_spread_bps)\n ])\n df = pd.DataFrame(data=data, columns=columns)\n return df", "def map_to_trade(self, raw_trade: HitbtcRawTradeModel) -> HitbtcTradeModel:\n\n id_ = int(raw_trade[\"id\"])\n price = Decimal(raw_trade[\"price\"])\n quantity = Decimal(raw_trade[\"quantity\"])\n side = raw_trade[\"side\"]\n timestamp = raw_trade[\"timestamp\"]\n\n trade = HitbtcTradeModel(\n id=id_,\n price=price,\n quantity=quantity,\n side=side,\n timestamp=timestamp)\n\n return trade", "def _deserialize_trade(self, raw_result: list[Any]) -> Trade:\n amount = deserialize_asset_amount(raw_result[4])\n trade_type = TradeType.BUY if amount >= ZERO else TradeType.SELL\n bfx_pair = self._process_bfx_pair(raw_result[1])\n if bfx_pair in self.pair_bfx_symbols_map:\n bfx_base_asset_symbol, bfx_quote_asset_symbol = self.pair_bfx_symbols_map[bfx_pair]\n elif len(bfx_pair) == 6:\n # Could not see it in the listed pairs. Probably delisted. Gotta try and figure it out\n # TODO: The whole pair logic in bitfinex seems complicated. Simplify!\n bfx_base_asset_symbol = bfx_pair[:3]\n bfx_quote_asset_symbol = bfx_pair[3:]\n else:\n raise DeserializationError(\n f'Could not deserialize bitfinex trade pair {raw_result[1]}. '\n f'Raw trade: {raw_result}',\n )\n\n base_asset = asset_from_bitfinex(\n bitfinex_name=bfx_base_asset_symbol,\n currency_map=self.currency_map,\n )\n quote_asset = asset_from_bitfinex(\n bitfinex_name=bfx_quote_asset_symbol,\n currency_map=self.currency_map,\n )\n fee_asset = asset_from_bitfinex(\n bitfinex_name=raw_result[10],\n currency_map=self.currency_map,\n )\n\n trade = Trade(\n timestamp=Timestamp(int(raw_result[2] / 1000)),\n location=Location.BITFINEX,\n base_asset=base_asset,\n quote_asset=quote_asset,\n trade_type=trade_type,\n amount=AssetAmount(abs(amount)),\n rate=deserialize_price(raw_result[5]),\n fee=Fee(abs(deserialize_fee(raw_result[9]))),\n fee_currency=fee_asset,\n link=str(raw_result[0]),\n notes='',\n )\n return trade", "def calculate_vol_adjusted_index_from_prices(self, prices_df, br):\n\n tsc = TimeSeriesCalcs()\n\n returns_df, leverage_df = self.calculate_vol_adjusted_returns(prices_df, br, returns = False)\n\n return tsc.create_mult_index(returns_df)", "def create_pair_differences(self):\n\n # Create an empty dataframe of pair differences, we will append this later.\n pair_string_names = []\n pair_price_diff = []\n\n for pair in self.__pairs_data:\n # Choose both stocks from each pair\n stock_symbol_1 = pair[0]\n stock_symbol_2 = pair[1]\n\n # Create a string that symbolizes the pair and add it to a list of strings\n pair_string = str(stock_symbol_1) + '-' + str(stock_symbol_2)\n pair_string_names.append(pair_string)\n\n # Get both stock prices from the price dataset\n stock_price1 = self.__price_data[stock_symbol_1]\n stock_price2 = self.__price_data[stock_symbol_2]\n pair_diff = stock_price2 - stock_price1\n pair_price_diff.append(pair_diff)\n\n # Concat all the pairs into the pair differences attribute in class and set column names\n self.__pair_diff = pd.concat([pd.Series(pair_prices) for pair_prices in pair_price_diff], axis=1)\n self.__pair_diff.columns = pair_string_names\n\n return self.__pair_diff", "async def get_trading_table(self):\n if self.trading_table is None:\n self.trading_table = {}\n wikitext = await Controller.get_wikitext('Trading')\n for match in re.finditer(r\"===='''([^']+)'''====\\n({\\|[^\\n]*\\n(?:[^\\n]*\\n)+?\\|})\", wikitext):\n place = match.group(1)\n trade_list = {'into':{}, 'from':{}}\n for row in match.group(2).strip().split('|-'):\n if len(row) < 5:\n continue\n trade = re.search(r'\\|([0-9,.]+)\\|\\| \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]\\|\\|→\\n\\|align\\=right\\|([0-9,.]+)\\|\\| \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]', row)\n if not trade:\n trade = re.search(r'\\| ?([0-9,.]+) \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]\\|\\| ?([0-9,.]+) \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]', row)\n if not trade:\n logging.warn(f'No trade row in `{row}`')\n continue\n from_amt = int(trade.group(1).replace(',', ''))\n from_itm = trade.group(2).lower()\n to_amt = int(trade.group(3).replace(',', ''))\n to_itm = trade.group(4).lower()\n if from_itm not in trade_list['from']:\n trade_list['from'][from_itm] = []\n if to_itm not in trade_list['into']:\n trade_list['into'][to_itm] = []\n trade_list['from'][from_itm].append((to_itm, from_amt, to_amt))\n trade_list['into'][to_itm].append((from_itm, to_amt, from_amt))\n if '(' in place:\n # Gorenichi (Kiev), Magnitogorsk (trader), Magnitogorsk (fitter)\n if place[0] == 'G':\n place = 'Kiev'\n self.trading_table[place.lower()] = trade_list\n return self.trading_table", "def aggregate_historical_trades(self, pair: list):\n raise NotImplementedError", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def get_data(pair, other):\n days_ago = 7\n endtime = int(time())\n starttime = endtime - 60 * 60 * 24 * days_ago\n\n geckourl = '%s/markets?vs_currency=%s&ids=%s' % (API, pair[\"currency\"],\n pair[\"coin\"])\n liveprice = requests.get(geckourl).json()[0]\n pricenow = float(liveprice['current_price'])\n alltimehigh = float(liveprice['ath'])\n other['volume'] = float(liveprice['total_volume'])\n\n url_hist = '%s/%s/market_chart/range?vs_currency=%s&from=%s&to=%s' % (\n API, pair[\"coin\"], pair[\"currency\"], str(starttime), str(endtime))\n\n try:\n timeseriesarray = requests.get(url_hist).json()['prices']\n except JSONDecodeError as err:\n print(f'Caught JSONDecodeError: {repr(err)}')\n return None\n timeseriesstack = []\n length = len(timeseriesarray)\n i = 0\n while i < length:\n timeseriesstack.append(float(timeseriesarray[i][1]))\n i += 1\n\n timeseriesstack.append(pricenow)\n if pricenow > alltimehigh:\n other['ATH'] = True\n else:\n other['ATH'] = False\n\n other[\"image\"] = pair[\"image\"]\n other[\"coin\"] = pair[\"coin\"]\n\n return timeseriesstack", "def format_pair_result(pair_name, pair_tuple, price):\n return {\n \"name\": pair_name,\n \"base_volume\": \"%.7f\" % pair_tuple[0],\n \"counter_volume\": \"%.7f\" % pair_tuple[1],\n \"trade_count\": pair_tuple[2],\n \"price\": \"%.7f\" % price\n }", "def get_usdt_pairs(self):\r\n usdt_pairs = {}\r\n raw_symbols = self.kc.get_symbols()\r\n '''\r\n {'symbol': 'GRIN-USDT', 'quoteMaxSize': '99999999', 'enableTrading': True, 'priceIncrement': '0.000001',\r\n 'feeCurrency': 'USDT', 'baseMaxSize': '10000000000', 'baseCurrency': 'GRIN', 'quoteCurrency': 'USDT', 'market': 'USDS', 'quoteIncrement': '0.000001',\r\n 'baseMinSize': '0.01', 'quoteMinSize': '0.01', 'name': 'GRIN-USDT', 'baseIncrement': '0.00000001', 'isMarginEnabled': False}\r\n '''\r\n\r\n for data in raw_symbols:\r\n if self.base_currency in data[\"symbol\"]:\r\n pair = data[\"symbol\"]\r\n quote, base = pair.split('-')\r\n if base == self.base_currency:\r\n self.log(pair, quote)\r\n # add/modify data here\r\n usdt_pairs[quote] = data\r\n\r\n return usdt_pairs", "def return_trade_history(self, currency_pair):\n return self.api_query('returnTradeHistory', {\"currencyPair\": currency_pair})", "def make_trade_params(pair):\n params = {\n \"order\": \"desc\",\n \"limit\": 1,\n }\n params.update(make_asset_param_from_pair(pair, \"base\"))\n params.update(make_asset_param_from_pair(pair, \"counter\"))\n return params", "async def update_adjusted_tick_data(self, pair: str):\n\n base = config['trade_base']\n pair_base = pair.split('-')[0]\n\n try:\n last_time = self.last_adjusted_close_times[pair]\n start_index = self.close_times[pair].index(last_time) + 1\n\n except ValueError:\n self.log.error(\"{} has no adjusted close times.\", pair)\n last_time = 0\n start_index = 0\n\n diff = len(self.close_times[pair]) - start_index\n if diff != 1:\n self.log.debug(\"{} got diff {}, source length {}, last time {}.\",\n pair, diff, len(self.close_times[pair]), last_time)\n\n if base == pair_base:\n self.adjusted_close_values[pair] = self.close_values[pair]\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n await self._update_volume_derivatives(pair, diff, start_index)\n await self._truncate_adjusted_tick_data(pair)\n return\n\n convert_pair = '{}-{}'.format(base, pair_base)\n missing = 0\n\n for index in range(diff):\n try:\n convert_value = self.close_values[convert_pair][start_index + index]\n except IndexError:\n convert_value = self.close_values[convert_pair][-1]\n missing += 1\n\n close_value = self.close_values[pair][start_index + index]\n self.adjusted_close_values[pair].append(close_value * convert_value)\n\n if missing > 0:\n self.log.debug(\"{} padded {} values at end.\", pair, missing)\n\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n await self._update_volume_derivatives(pair, diff, start_index)\n await self._truncate_adjusted_tick_data(pair)", "def typical_prices_from_trades_history(trade_history, read=False):\r\n if not read: # in that case you get data directly from the market\r\n start_date = _get_biggest_anterior_date(trade_history.index[-1]) + datetime.timedelta(minutes=5)\r\n trade_history = _cut_smallest_dates(trade_history, start_date)\r\n current_date = start_date + datetime.timedelta(hours=2)\r\n dates = [start_date + datetime.timedelta(hours=2)]\r\n # dates = []\r\n volumes = []\r\n while current_date < trade_history.index[0] + datetime.timedelta(hours=2):\r\n current_date += datetime.timedelta(minutes=5)\r\n dates.append(current_date)\r\n typical_prices = []\r\n k = 1\r\n date_index = 1\r\n while k < trade_history.shape[0]+1: # you have to go through all the rows of the dataframe\r\n price = 0\r\n normalizing_factor = 0\r\n volume = 0\r\n j = 0\r\n try:\r\n # you have one price per date. Each price is computed from the traded prices between a date and the following one\r\n while trade_history.index[-(k+j)] + datetime.timedelta(hours=2) <= dates[date_index]:\r\n if isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], pd.Series): # there can be several trades at the same time. In that case you get a series rather than a number\r\n for l in range(len(trade_history.loc[trade_history.index[-(k+j)], 'total'].values.tolist())):\r\n price += float(trade_history.loc[trade_history.index[-(k+j)], 'total'].values[l]) # several rows could have the same date. So take the mean. Maybe weight the prices with the volume ?\r\n normalizing_factor += float(trade_history.loc[trade_history.index[-(k+j)], 'amount'].values[l])\r\n volume += float(trade_history.loc[trade_history.index[-(k+j)], 'total'].values[l])\r\n j += trade_history.loc[trade_history.index[-(k+j)], 'rate'].shape[0]\r\n else:\r\n price += float(trade_history.loc[trade_history.index[-(k+j)], 'total'])\r\n normalizing_factor += float(trade_history.loc[trade_history.index[-(k+j)], 'amount'])\r\n volume += float(trade_history.loc[trade_history.index[-(k+j)], 'total'])\r\n j += 1\r\n if j != 0:\r\n price /= normalizing_factor\r\n typical_prices.append(price)\r\n k += j\r\n else:\r\n typical_prices.append(np.nan)\r\n k += 1\r\n volumes.append(volume)\r\n date_index += 1\r\n except IndexError:\r\n # print \"k+j: %s, \\n trade_history.shape: %s, \\n date_index: %s, \\n len(dates): %s\" % (k+j, trade_history.shape, date_index, len(dates))\r\n break\r\n return dates, typical_prices, volumes\r\n else: # otherwise you get a file where every data is unicode\r\n start_date = _get_biggest_anterior_date(parser.parse(trade_history.index[-1])) + datetime.timedelta(minutes=5)\r\n current_date = start_date + datetime.timedelta(hours=2)\r\n dates = []\r\n volumes = []\r\n while current_date < parser.parse(trade_history.index[0]) + datetime.timedelta(hours=2):\r\n current_date += datetime.timedelta(minutes=5)\r\n dates.append(current_date)\r\n typical_prices = []\r\n k = 1\r\n date_index = 1\r\n while k < trade_history.shape[0]+1:\r\n price = 0\r\n volume = 0\r\n normalizing_factor = 0\r\n j = 0\r\n try:\r\n while parser.parse(trade_history.index[-(k+j)]) + datetime.timedelta(hours=2) <= dates[date_index]:\r\n if not(isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], np.float64) or isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], np.float32) or isinstance(trade_history.loc[trade_history.index[-(k+j)], 'rate'], float)):\r\n price += np.sum(trade_history.loc[trade_history.index[-(k+j)], 'total'].values) # several rows could have the same date. So take the mean. Maybe weight the prices with the volume ?\r\n normalizing_factor += np.sum(trade_history.loc[trade_history.index[-(k+j)], 'amount'].values)\r\n volume += np.sum(trade_history.loc[trade_history.index[-(k+j)], 'total'].values)\r\n j += trade_history.loc[trade_history.index[-(k+j)], 'rate'].shape[0]\r\n else:\r\n price += trade_history.loc[trade_history.index[-(k+j)], 'total']\r\n normalizing_factor += trade_history.loc[trade_history.index[-(k+j)], 'amount']\r\n volume += trade_history.loc[trade_history.index[-(k+j)], 'total']\r\n j += 1\r\n if j != 0:\r\n price /= normalizing_factor\r\n typical_prices.append(price)\r\n k += j\r\n else:\r\n typical_prices.append(np.nan)\r\n k += 1\r\n volumes.append(volume)\r\n date_index += 1\r\n except IndexError:\r\n # print \"k+j: %s, \\n trade_history.shape: %s, \\n date_index: %s, \\n len(dates): %s\" % (k+j, trade_history.shape, date_index, len(dates))\r\n break\r\n return dates, typical_prices, volumes", "def active_orders_df(self) -> pd.DataFrame:\n columns = [\"Exchange\", \"Market\", \"Side\", \"Price\", \"Amount\", \"Spread Mid\", \"Spread Cancel\", \"Age\"]\n data = []\n mid_price = self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)\n taker_buy_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n taker_sell_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n buy_cancel_threshold = taker_sell_result.result_price * Decimal(1 - self.min_spread_bps / 10000)\n sell_cancel_threshold = taker_buy_result.result_price * Decimal(1 + self.min_spread_bps / 10000)\n for connector_name, connector in self.connectors.items():\n for order in self.get_active_orders(connector_name):\n age_txt = \"n/a\" if order.age() <= 0. else pd.Timestamp(order.age(), unit='s').strftime('%H:%M:%S')\n spread_mid_bps = (mid_price - order.price) / mid_price * 10000 if order.is_buy else (order.price - mid_price) / mid_price * 10000\n spread_cancel_bps = (buy_cancel_threshold - order.price) / buy_cancel_threshold * 10000 if order.is_buy else (order.price - sell_cancel_threshold) / sell_cancel_threshold * 10000\n data.append([\n self.maker_exchange,\n order.trading_pair,\n \"buy\" if order.is_buy else \"sell\",\n float(order.price),\n float(order.quantity),\n int(spread_mid_bps),\n int(spread_cancel_bps),\n age_txt\n ])\n if not data:\n raise ValueError\n df = pd.DataFrame(data=data, columns=columns)\n df.sort_values(by=[\"Market\", \"Side\"], inplace=True)\n return df", "async def refresh_adjusted_tick_data(self, pair: str):\n\n self.base_24hr_volumes[pair][1] = array('d')\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n\n trade_base = config['trade_base']\n pair_base = pair.split('-')[0]\n\n if trade_base == pair_base:\n self.adjusted_close_values[pair] = self.close_values[pair]\n await self._refresh_volume_derivatives(pair)\n return\n\n else:\n self.adjusted_close_values[pair] = array('d')\n\n convert_pair = '{}-{}'.format(trade_base, pair_base)\n\n try:\n source_index = len(self.close_times[pair]) - 1\n convert_index = self.close_times[convert_pair].index(self.close_times[pair][-1])\n\n except ValueError:\n try:\n convert_index = len(self.close_times[convert_pair]) - 1\n source_index = self.close_times[pair].index(self.close_times[convert_pair][-1])\n convert_value = self.close_values[convert_pair][-1]\n\n for index in range(len(self.close_times[pair]) - 1, source_index, -1):\n adjusted_value = self.close_values[pair][index] * convert_value\n self.adjusted_close_values[pair].insert(0, adjusted_value)\n\n self.log.debug(\"{} last {} adjusted values are approximate.\", pair,\n len(self.close_times[pair]) - source_index)\n\n except ValueError:\n self.adjusted_close_values[pair] = array('d')\n self.log.error(\"{} ends at {} before start of convert pair {} data at {}.\",\n pair, self.close_times[pair][-1], convert_pair, self.close_times[convert_pair][0])\n return\n\n for index in range(source_index, -1, -1):\n if convert_index > -1:\n convert_value = self.close_values[convert_pair][convert_index]\n else:\n convert_value = self.close_values[convert_pair][0]\n\n adjusted_value = self.close_values[pair][index] * convert_value\n self.adjusted_close_values[pair].insert(0, adjusted_value)\n convert_index -= 1\n\n if convert_index < 0:\n self.log.debug(\"{} first {} adjusted values are approximate.\", pair, convert_index * -1)\n\n await self._refresh_volume_derivatives(pair)", "def _normalize_trade_type(t):\n t = copy(t)\n assert isclose(t[\"vol\"], t[\"cost\"] / t[\"price\"], rel_tol=1e-4)\n if t[\"type\"] == \"sell\":\n t = _flip_pair(t)\n assert isclose(t[\"vol\"], t[\"cost\"] / t[\"price\"], rel_tol=1e-4)\n return t", "def get_matching_ticker(row: QFSeries) -> Ticker:\n ticker_str = row.loc[\"Contract symbol\"]\n name = row.loc[\"Asset Name\"]\n sec_type = SecurityType(row.loc[\"Security type\"])\n point_value = row.loc[\"Contract size\"]\n ticker = ticker_params_to_ticker.get((name, sec_type, point_value), None)\n if isinstance(ticker, FutureTicker):\n ticker_type = ticker.supported_ticker_type()\n ticker = ticker_type(ticker_str, sec_type, point_value)\n return ticker", "async def prepare_trades(self, pair: str):\n\n if pair not in self.trades:\n self.trades[pair] = {\n 'last_open_time': 0.0,\n 'rebuy_count': 0,\n 'open': [],\n 'closed': []\n }", "def _get_trades(self):\n\n trade_url = self.trade_url % (self.date, self.instrument, self.exchange)\n self.trades = pd.read_csv(trade_url, parse_dates=[0],\n date_parser=lambda t: pd.to_datetime(str(t), format='%Y%m%dT%H%M%S'))\n\n self.trades.fillna(np.nan)\n self.trades.index = pd.to_datetime(self.trades.time, unit='s')\n self.trades.time = pd.to_datetime(self.trades.time, unit='s')\n self.trades.columns = ['time', 'price', 'volume', 'source', 'buyer', 'seller', 'initiator']\n # del self.trades['time']\n\n if self.exclude_derivative:\n self.trades = self.trades[(self.trades.source != 'Derivatives trade') & (self.trades.source != 'Official')]", "def get_adjusted_data(stockSymbol, df):\n\n events = ['SPLIT', 'BONUS']\n arr = ['Open Price', 'High Price', 'Low Price',\n 'Last Price', 'Close Price', 'Average Price']\n\n stockSymbol = stockSymbol.replace('&', '%26')\n\n if(df.empty):\n print(\"Please check data. Dataframe is empty\")\n return df\n\n df.index = pd.to_datetime(df.index)\n df.sort_index(inplace=True)\n\n try:\n df = df.drop(['Prev Close'], axis=1)\n except KeyError:\n pass\n\n for event in events:\n\n ratio, dates = scrape_bonus_splits(stockSymbol, event)\n for i in range(len(dates)):\n\n date = datetime.datetime.strptime(dates[i], '%d-%b-%Y')\n print(event, \" on : \", dates[i], \" and ratio is : \", ratio[i])\n\n changed_data = df.loc[df.index < date]\n same_data = df.loc[df.index >= date]\n\n for j in arr:\n\n try:\n changed_data.loc[:, j] = changed_data.loc[:, j]/ratio[i]\n except TypeError:\n pass\n\n df = pd.concat([changed_data, same_data])\n\n return df", "def merge_new(dfc, pairs, span=None):\n global last_update\n t1 = Timer()\n columns = ['open', 'close', 'trades', 'volume', 'buy_ratio']\n exclude = ['_id','high','low','quote_vol','sell_vol', 'close_time']\n projection = dict(zip(exclude, [False]*len(exclude)))\n idx, data = [], []\n db = app.get_db()\n\n if span is None and last_update:\n # If no span, query/merge db records inserted since last update.\n oid = ObjectId.from_datetime(last_update)\n last_update = now()\n _filter = {'_id':{'$gte':oid}}\n else:\n # Else query/merge all since timespan.\n span = span if span else timedelta(days=7)\n last_update = now()\n _filter = {'pair':{'$in':pairs}, 'close_time':{'$gte':now()-span}}\n\n batches = db.candles.find_raw_batches(_filter, projection)\n\n if batches.count() < 1:\n return dfc\n\n try:\n ndarray = bsonnumpy.sequence_to_ndarray(\n batches,\n dtype,\n db.candles.count()\n )\n except Exception as e:\n log.error(str(e))\n return dfc\n #raise\n\n df = pd.DataFrame(ndarray)\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms')\n df['freq'] = df['freq'].str.decode('utf-8')\n df['pair'] = df['pair'].str.decode('utf-8')\n\n df['freq'] = df['freq'].replace('1m',60)\n df['freq'] = df['freq'].replace('5m',300)\n df['freq'] = df['freq'].replace('1h',3600)\n df['freq'] = df['freq'].replace('1d',86400)\n df = df.sort_values(by=['pair','freq','open_time'])\n\n df2 = pd.DataFrame(df[columns].values,\n index = pd.MultiIndex.from_arrays(\n [df['pair'], df['freq'], df['open_time']],\n names = ['pair','freq','open_time']),\n columns = columns\n ).sort_index()\n\n df3 = pd.concat([dfc, df2]).drop_duplicates().sort_index()\n\n log.debug(\"{:,} records loaded into numpy. [{:,.1f} ms]\".format(\n len(df3), t1))\n #print(\"Done in %s ms\" % t1)\n return df3", "def compute_derived_blockchain_data(df):\n\n original_join_state = SETTINGS.join\n\n SETTINGS.join = False\n result = pd.concat([trade_vol_VS_tx_vol(df),\n miners_revenue_VS_tx_volume(df),\n block_reward_USD(df),\n tx_fees_VS_miners_revenue(df),\n avg_tx_fees_USD(df),\n avg_tx_fees_BTC(df),\n avg_tx_value_USD(df),\n avg_tx_value_BTC(df),\n fee_VS_tx_value(df)], axis=1)\n\n velocity_df = compute_function_different_periods(df,\n periods=ta_periods,\n function=velocity)\n NVT_df = compute_function_different_periods(df,\n periods=ta_periods,\n function=NVT_ratio)\n\n result = pd.concat([result, velocity_df, NVT_df], axis=1)\n\n SETTINGS.join = original_join_state\n return out(SETTINGS, df, result)", "def prices_pivot(self, prices_df, if_exists='append'):\n\n prices_df['date_hour'] = prices_df.index\n prices_df['date_hour'] = prices_df['date_hour'].dt.strftime(\"%Y-%m-%d %H:00:00\")\n prices_df['eurbidprice'] = prices_df['EURbidPrice']\n prices_df.drop(['EURbidPrice'], axis=1, inplace = True)\n \n with open('../creds/pg_creds.json') as json_data:\n d = json.load(json_data)\n json_data.close()\n\n user = d[0]['user']\n password = d[0]['password']\n \n engine = create_engine('postgresql://' + user + ':' + password + '@localhost:5432/cryptotracker')\n prices_df.to_sql('eur_prices', engine, schema=\"public\", if_exists=if_exists, index=False)\n\n return", "def DataTrimmer(datatuple):\r\n TickerName = datatuple[1]\r\n\r\n df = pd.DataFrame(datatuple[0]).transpose()\r\n df.index = pd.DatetimeIndex(df.index)\r\n df = df[df.index >= datetime(2020,1,1)] # used 1/1/2020 because first Covid19 case in the US happened on 1/20/2020\r\n df = df.sort_index()\r\n df = df.rename(columns={\"4. close\":TickerName})\r\n\r\n outputseries = df[TickerName].astype(float)\r\n\r\n return outputseries", "def compute_market_prices(prices):\n denom = prices.bid_volume + prices.ask_volume\n numer = (prices.bid_price * prices.ask_volume +\n prices.ask_price * prices.bid_volume)\n mask = denom == 0\n denom[mask] = 2\n numer[mask] = prices.bid_price[mask] + prices.ask_price[mask]\n prices = prices.copy()\n prices['market_price'] = numer / denom\n return prices", "def map_to_ticker(self, raw_ticker: HitbtcRawTickerModel) -> HitbtcTickerModel:\n\n symbol = raw_ticker[\"symbol\"]\n low = Decimal(raw_ticker[\"low\"])\n high = Decimal(raw_ticker[\"high\"])\n volume = Decimal(raw_ticker[\"volume\"])\n volume_quote = Decimal(raw_ticker[\"volumeQuote\"])\n timestamp = raw_ticker[\"timestamp\"]\n raw_ask = raw_ticker[\"ask\"]\n ask = Decimal(raw_ask) if raw_ask is not None else raw_ask\n raw_bid = raw_ticker[\"bid\"]\n bid = Decimal(raw_bid) if raw_bid is not None else raw_bid\n raw_last = raw_ticker[\"last\"]\n last = Decimal(raw_last) if raw_last is not None else raw_last\n raw_open = raw_ticker[\"open\"]\n open_ = Decimal(raw_open) if raw_open is not None else raw_open\n\n ticker = HitbtcTickerModel(\n symbol=symbol,\n low=low,\n high=high,\n volume=volume,\n volume_quote=volume_quote,\n timestamp=timestamp,\n ask=ask,\n bid=bid,\n last=last,\n open=open_)\n\n return ticker", "def con_trade(self):\r\n if self.trade_direction == 'BUY':\r\n character = 'B'\r\n else:\r\n character = 'S'\r\n\r\n\r\n #'sB', 'sS', 'Close', 'short_mavg', 'positionsB', 'positionsS', 'TradeID'\r\n if self.trade_direction == 'BUY':\r\n self.signals.reset_index(inplace=True)\r\n self.signals['TradeID2'] = abs(self.signals['positionsB']).cumsum()\r\n #z = (f'positions{character}')\r\n #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\r\n\r\n #dftemp['ST'] = np.where((dftemp['Close']==1401.25) | (dftemp['Close']==1412.50), -1.0, 0.0)\r\n self.signals['ST'] = np.where((self.signals['signalscumUncrystallized']>=self.profit_tradelimit) | (self.signals['signalscumUncrystallized']<=self.loss_tradelimit), -1.0, 0.0)\r\n\r\n self.signals['modSignals'] = self.signals['sB']\r\n groupobj = self.signals.groupby('TradeID2')\r\n grouplist = list(groupobj)\r\n\r\n listsize = len(grouplist)\r\n\r\n '''function to keep only one -1 entry in ST column for each group '''\r\n for i in range(0,listsize):\r\n #loc\r\n #dftemp['ST'] = np.where((dftemp['cumUncrystallized']<tradelimits[i][0]*product_data.multiplier) | (dftemp['cumUncrystallized']>tradelimits[i][1]*product_data.multiplier), -1.0, 0.0)\r\n loc_result = grouplist[i][1].loc[grouplist[i][1].ST == -1]\r\n countSTs = loc_result.sB.count()\r\n\r\n if countSTs == 0:\r\n #print('pass')\r\n first_loc_result = grouplist[i][1].index[0]\r\n\r\n location_last = grouplist[i][1][-1:]\r\n\r\n location_last_list = list(location_last.index)\r\n\r\n location_result = location_last_list[0]+1\r\n location_result\r\n\r\n #pass\r\n else:\r\n #print('else')\r\n #print(f'loc result: {countSTs}')\r\n loc_result_list = list(loc_result.index)\r\n first_loc_result = loc_result_list[0]\r\n first_loc_result\r\n location_last = grouplist[i][1][-1:]\r\n location_last_list = list(location_last.index)\r\n location_result = location_last_list[0]+1\r\n location_result\r\n self.signals['modSignals'][first_loc_result:location_result]=0\r\n #dftemp.iloc[dftemp.modSignals][first_loc_result:location_result]=0\r\n #if I want I can create new column for each time a ST -1 is found\r\n #dftemp[f'signalsB'{i}]\r\n self.signals.set_index('Date',inplace=True)\r\n self.signals['positionsB'] = self.signals['modSignals'].diff()\r\n #dftemp['modTradeID'] = abs(dftemp['modSignals'].diff()).cumsum()\r\n self.signals['modTradeID'] = abs(self.signals['positionsB']).cumsum()\r\n self.signals['modTradeID'] = self.signals['modTradeID']*abs(self.signals['positionsB'])\r\n self.signals['sB'] = self.signals['modSignals']\r\n\r\n self.signals['TradeID'] = self.signals['modTradeID']\r\n self.signals.drop(['TradeID2', 'ST','modSignals','modTradeID'], axis=1,inplace=True)\r\n\r\n '''Following code enables us to see points gained/lossed on each data point (row/date) compared to prior data point'''\r\n #inTrade (first bracket in following code) is True when enter a position and stays true until and including exit of trade is trigerred\r\n self.signals['signalsinTradePrice'] = ((self.signals['sB'].abs() + self.signals['positionsB'].abs())!=0)*self.signals['Close']\r\n self.signals['signalsperCloseP/L'] = (np.subtract(self.signals['signalsinTradePrice'],self.signals['signalsinTradePrice'].shift(1)).where((self.signals['signalsinTradePrice']!=0) & (self.signals['signalsinTradePrice'].shift(1)!=0)).fillna(0))\r\n\r\n '''following code gives us the cumsum of data points while in the trade '''\r\n '''Also good example of resetting cumsum to zero when zero encountered..'''\r\n self.signals['signalscumUncrystallized'] = self.signals['signalsperCloseP/L'].cumsum() - self.signals['signalsperCloseP/L'].cumsum().where(self.signals['signalsperCloseP/L'] == 0).ffill().fillna(0)\r\n\r\n '''following code identifies when the strategy is closed and shows crystallizes points gained/lossed'''\r\n self.signals['signalsboolCrystallization'] = self.signals['positionsB'] < 0\r\n self.signals['signalscumCrystallized'] = self.signals['signalsboolCrystallization']*self.signals['signalscumUncrystallized']\r\n else:\r\n self.signals.reset_index(inplace=True)\r\n self.signals['TradeID2'] = abs(self.signals['positionsS']).cumsum()\r\n #z = (f'positions{character}')\r\n #print('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\r\n\r\n #dftemp['ST'] = np.where((dftemp['Close']==1401.25) | (dftemp['Close']==1412.50), -1.0, 0.0)\r\n self.signals['ST'] = np.where((self.signals['signalscumUncrystallized']>=self.profit_tradelimit) | (self.signals['signalscumUncrystallized']<=self.loss_tradelimit), 1.0, 0.0)\r\n\r\n self.signals['modSignals'] = self.signals['sS']\r\n groupobj = self.signals.groupby('TradeID2')\r\n grouplist = list(groupobj)\r\n\r\n listsize = len(grouplist)\r\n\r\n '''function to keep only one -1 entry in ST column for each group '''\r\n for i in range(0,listsize):\r\n #loc\r\n #dftemp['ST'] = np.where((dftemp['cumUncrystallized']<tradelimits[i][0]*product_data.multiplier) | (dftemp['cumUncrystallized']>tradelimits[i][1]*product_data.multiplier), -1.0, 0.0)\r\n loc_result = grouplist[i][1].loc[grouplist[i][1].ST == 1]\r\n countSTs = loc_result.sS.count()\r\n\r\n if countSTs == 0:\r\n #print('pass')\r\n first_loc_result = grouplist[i][1].index[0]\r\n\r\n location_last = grouplist[i][1][-1:]\r\n\r\n location_last_list = list(location_last.index)\r\n\r\n location_result = location_last_list[0]+1\r\n location_result\r\n\r\n #pass\r\n else:\r\n #print('else')\r\n #print(f'loc result: {countSTs}')\r\n loc_result_list = list(loc_result.index)\r\n first_loc_result = loc_result_list[0]\r\n first_loc_result\r\n location_last = grouplist[i][1][-1:]\r\n location_last_list = list(location_last.index)\r\n location_result = location_last_list[0]+1\r\n location_result\r\n self.signals['modSignals'][first_loc_result:location_result]=0\r\n #dftemp.iloc[dftemp.modSignals][first_loc_result:location_result]=0\r\n #if I want I can create new column for each time a ST -1 is found\r\n #dftemp[f'signalsB'{i}]\r\n self.signals.set_index('Date',inplace=True)\r\n self.signals['positionsS'] = self.signals['modSignals'].diff()\r\n #dftemp['modTradeID'] = abs(dftemp['modSignals'].diff()).cumsum()\r\n self.signals['modTradeID'] = abs(self.signals['positionsS']).cumsum()\r\n self.signals['modTradeID'] = self.signals['modTradeID']*abs(self.signals['positionsS'])\r\n self.signals['sS'] = self.signals['modSignals']\r\n\r\n self.signals['TradeID'] = self.signals['modTradeID']\r\n self.signals.drop(['TradeID2', 'ST','modSignals','modTradeID'], axis=1,inplace=True)\r\n\r\n '''Following code enables us to see points gained/lossed on each data point (row/date) compared to prior data point'''\r\n #inTrade (first bracket in following code) is True when enter a position and stays true until and including exit of trade is trigerred\r\n self.signals['signalsinTradePrice'] = ((self.signals['sS'].abs() + self.signals['positionsS'].abs())!=0)*self.signals['Close']\r\n\r\n ########################################edo\r\n self.signals['signalsperCloseP/L'] = (np.subtract(self.signals['signalsinTradePrice'],self.signals['signalsinTradePrice'].shift(1)).where((self.signals['signalsinTradePrice']!=0) & (self.signals['signalsinTradePrice'].shift(1)!=0)).fillna(0))\r\n\r\n '''following code gives us the cumsum of data points while in the trade '''\r\n '''Also good example of resetting cumsum to zero when zero encountered..'''\r\n self.signals['signalscumUncrystallized'] = self.signals['signalsperCloseP/L'].cumsum() - self.signals['signalsperCloseP/L'].cumsum().where(self.signals['signalsperCloseP/L'] == 0).ffill().fillna(0)\r\n\r\n '''following code identifies when the strategy is closed and shows crystallizes points gained/lossed'''\r\n self.signals['signalsboolCrystallization'] = self.signals['positionsS'] > 0\r\n self.signals['signalscumCrystallized'] = self.signals['signalsboolCrystallization']*self.signals['signalscumUncrystallized']\r\n\r\n\r\n\r\n return self.signals", "def calculate_VWAP(self, trade_df):\n trade_df['total_value'] = trade_df['price'] * trade_df['shares'] # Calculate dollar value for each trade\n # Calculate hourly vwap at the end of each trading hour for each stock\n trade_df = trade_df.groupby([trade_df['end_time_hour'], trade_df['stock']])['total_value', 'shares'].sum()\n trade_df['vwap'] = round(trade_df['total_value'] / trade_df['shares'], 3)\n trade_df = trade_df.reset_index()\n trade_df = trade_df.sort_values(['end_time_hour', 'stock'], ascending=[True, True])\n # Calculate total dollar value traded in that trading day at the end of each trading hour for each stock\n trade_df['cum_total_value'] = trade_df.groupby(trade_df['stock'])['total_value'].cumsum()\n # Calculate total shares traded in that trading day at the end of each trading hour for each stock\n trade_df['cum_shares'] = trade_df.groupby(trade_df['stock'])['shares'].cumsum()\n # Calculate vwap in that trading day at the end of each trading hour for each stock\n trade_df['cum_vwap'] = round(trade_df['cum_total_value'] / trade_df['cum_shares'], 3)\n return trade_df", "def getStock(symbol, start, end):\n df = data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def generate_trade_summary(self, df):\n\n print(\"[{}] [INFO] Generating trade summary...\".format(datetime.now().isoformat()))\n\n # Dictionary of list. Key: symbol. Value: [ entry date, last open trade ]\n trade_info = {}\n df_trade_summary_values = []\n\n #--------------------------------------------------------------------------\n # Create named tuple trade.\n #--------------------------------------------------------------------------\n TradeSummaryTuple = namedtuple(\"TradeSummaryTuple\", \"symbol date_entry date_exit avg_price exit_price book_value trade_pnl\")\n\n #--------------------------------------------------------------------------\n # Process entry and exit only.\n #--------------------------------------------------------------------------\n for row in df.itertuples(index=False):\n\n # Skip rows without cashflow.\n if pd.isnull(row.cashflow) or row.cashflow == 0:\n continue\n\n # Store trade info into dictionary.\n if row.symbol not in trade_info and row.cashflow < 0:\n\n trade_info[row.symbol] = [row.date, row]\n\n # Update last open trade.\n elif row.symbol in trade_info and row.cnt_long == 1:\n\n trade_info[row.symbol][1] = row\n\n # Pop entry and store both entry and exit to list.\n elif row.symbol in trade_info and row.cnt_long == 0:\n\n # Unpack list.\n symbol_trade_info = trade_info.pop(row.symbol)\n entry_date, last_open_trade = symbol_trade_info\n exit_trade = row\n\n df_trade_summary_values.append(TradeSummaryTuple(\n last_open_trade.symbol,\n entry_date,\n last_open_trade.date,\n last_open_trade.avg_price,\n exit_trade.split_adjusted_open,\n last_open_trade.book_value,\n exit_trade.trade_pnl\n ))\n\n #--------------------------------------------------------------------------\n # Add outstanding trades.\n #--------------------------------------------------------------------------\n for symbol_trade_info in trade_info.values():\n\n # Unpack list.\n entry_date, last_open_trade = symbol_trade_info\n\n df_trade_summary_values.append(TradeSummaryTuple(\n last_open_trade.symbol,\n entry_date,\n np.nan,\n last_open_trade.avg_price,\n np.nan,\n last_open_trade.book_value,\n np.nan\n ))\n\n #--------------------------------------------------------------------------\n # Convert to dataframe.\n #--------------------------------------------------------------------------\n df_trade_summary = pd.DataFrame(df_trade_summary_values)\n\n #--------------------------------------------------------------------------\n # Add trade profit and loss percentage.\n #--------------------------------------------------------------------------\n df_trade_summary[\"trade_pnl_pct\"] = df_trade_summary.trade_pnl / df_trade_summary.book_value\n\n #--------------------------------------------------------------------------\n # Write to csv.\n #--------------------------------------------------------------------------\n df_trade_summary.to_csv(\"{}/algo_turtle_trade_summary.csv\".format(CSV_ROOT_PATH), index=False)\n\n #--------------------------------------------------------------------------\n # Trade statistics.\n #--------------------------------------------------------------------------\n trades = df_trade_summary\n winning_trades = trades.loc[ trades.trade_pnl > 0 ]\n losing_trades = trades.loc[ trades.trade_pnl < 0 ]\n\n print(\"----------------------------------------------------------------------\")\n print(\" All trades:\")\n print(\"----------------------------------------------------------------------\")\n print(trades[[\"trade_pnl\", \"trade_pnl_pct\"]].describe().to_string())\n\n print(\"----------------------------------------------------------------------\")\n print(\" Winning trades:\")\n print(\"----------------------------------------------------------------------\")\n print(winning_trades[[\"trade_pnl\", \"trade_pnl_pct\"]].describe().to_string())\n\n print(\"----------------------------------------------------------------------\")\n print(\" Losing trades:\")\n print(\"----------------------------------------------------------------------\")\n print(losing_trades[[\"trade_pnl\", \"trade_pnl_pct\"]].describe().to_string())", "def test_query_asset_pairs(\n test_client, pydex_client, asset_infos\n):\n expected_res = {\n 'total': 1,\n 'page': 1,\n 'perPage': 20,\n 'records': [\n {\n 'assetDataA': {\n 'minAmount': ZERO_STR,\n 'maxAmount': MAX_INT_STR,\n 'precision': DEFAULT_ERC20_DECIMALS,\n 'assetData': asset_infos.VETH_ASSET_DATA\n },\n 'assetDataB': {\n 'minAmount': ZERO_STR,\n 'maxAmount': MAX_INT_STR,\n 'precision': DEFAULT_ERC20_DECIMALS,\n 'assetData': asset_infos.LONG_ASSET_DATA\n }\n }\n ]\n }\n asset_pairs_params = pydex_client.make_asset_pairs_query(\n asset_data_a=asset_infos.VETH_ASSET_DATA,\n asset_data_b=asset_infos.LONG_ASSET_DATA,\n include_maybe_fillables=True\n )\n res = test_client.get(\n pydex_client.asset_pairs_url,\n query_string=asset_pairs_params\n )\n assert res.status_code == 200\n assert res.get_json() == expected_res\n asset_pairs_params = pydex_client.make_asset_pairs_query(\n include_maybe_fillables=True\n )\n res = test_client.get(\n pydex_client.asset_pairs_url,\n query_string=asset_pairs_params\n )\n assert res.status_code == 200\n assert res.get_json() == expected_res", "def get_trades(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'trades', query_string))\n if r.status_code == 200:\n return r.json()", "def get_quote(pair: CurrencyPair, amount: int) -> QuoteData:\n storage_quote = create_quote(\n currency_pair=CurrencyPairs.from_pair(pair),\n rate=get_rate(currency_pair=pair).rate,\n amount=amount,\n expires_at=datetime.now() + timedelta(minutes=10),\n )\n return QuoteData(\n quote_id=QuoteId(uuid.UUID(storage_quote.id)),\n rate=Rate(storage_quote.currency_pair.value, storage_quote.rate),\n expires_at=storage_quote.expires_at,\n amount=storage_quote.amount,\n )", "def get_stock_data_frame(time, stock):\n\n print(\"Getting\", time, \"stock data for\", stock)\n url = 'https://api.iextrading.com/1.0/stock/'+stock+'/chart/'+time\n req = requests.get(url)\n print(url)\n\n print(\"Parsing data.\")\n rjson = req.text\n\n rdata = json.loads(rjson)\n\n dates = []\n openprices = []\n highprices = []\n lowprices = []\n closeprices = []\n volumes = []\n\n for i in rdata:\n date = i['date']\n dates.append(date)\n openprices.append(float(i['open']))\n highprices.append(float(i['high']))\n lowprices.append(float(i['low']))\n closeprices.append(float(i['close']))\n volumes.append(float(i['volume']))\n\n index = pd.DatetimeIndex(dates, dtype='datetime64[ns]')\n _open = pd.Series(openprices, index=index)\n high = pd.Series(highprices, index=index)\n low = pd.Series(lowprices, index=index)\n close = pd.Series(closeprices, index=index)\n data_frame_data = {'Open' : _open, 'High' : high, 'Low' : low, 'Close' : close}\n\n return pd.DataFrame(data_frame_data)", "def map_to_symbol(self, raw_symbol: HitbtcRawSymbolModel) -> HitbtcSymbolModel:\n\n id_ = raw_symbol[\"id\"]\n base_currency = raw_symbol[\"baseCurrency\"]\n quote_currency = raw_symbol[\"quoteCurrency\"]\n quantity_increment = Decimal(raw_symbol[\"quantityIncrement\"])\n tick_size = Decimal(raw_symbol[\"tickSize\"])\n take_liquidity_rate = Decimal(raw_symbol[\"takeLiquidityRate\"])\n provide_liquidity_rate = Decimal(raw_symbol[\"provideLiquidityRate\"])\n fee_currency = raw_symbol[\"feeCurrency\"]\n\n symbol = HitbtcSymbolModel(\n id=id_,\n base_currency=base_currency,\n quote_currency=quote_currency,\n quantity_increment=quantity_increment,\n tick_size=tick_size,\n take_liquidity_rate=take_liquidity_rate,\n provide_liquidity_rate=provide_liquidity_rate,\n fee_currency=fee_currency)\n\n return symbol", "def load_pair_file(pair: str, filename: str):\n\n with open(filename) as file:\n tick_data = json.load(file)\n\n if tick_data is None:\n return(pair, [], [], [], [])\n\n source_values, source_times, source_volumes = Market._load_source_tick_data(tick_data)\n return (pair,) + Market._parse_source_tick_data(source_values, source_times, source_volumes)", "def get_pairs(quote='ETH'):\n k_quote_sym = translate_to_kraken(quote)\n\n j = requests.get(PAIRS_ENDPOINT).json()\n\n # {\"error\":[],\"result\":{\"BATETH\":{\"altname\":\"BATETH\",\"wsname\":\"BAT\\/ETH\",\"aclass_base\":\"currency\",\"base\":\"BAT\",\"aclass_quote\":\"currency\",\"quote\":\"XETH\",...\n if j.get('error'):\n raise KrakenAPIException(j['error'])\n else:\n r = j['result']\n return [ (translate_from_kraken(s['base']), translate_from_kraken(s['quote'])) for _, s in r.items() if s['quote'] == k_quote_sym ]", "def hourly_prices(self, eth_binance_symbols):\n \n date_hour = []\n symbol = []\n eurbidprice = []\n dict_for_df = {\n 'date_hour': date_hour,\n 'symbol': symbol,\n 'eurbidprice': eurbidprice\n }\n for item in eth_binance_symbols:\n dict_for_df['date_hour'].append(\n datetime.datetime.fromtimestamp(item[\"EthEurTime\"]/1000000).strftime(\"%Y-%m-%d %H:00:00\")\n )\n dict_for_df['symbol'].append(item[\"symbol\"][:-3])\n dict_for_df['eurbidprice'].append(item[\"EURbidPrice\"])\n input_df = pd.DataFrame(dict_for_df)\n \n with open('../creds/pg_creds.json') as json_data:\n d = json.load(json_data)\n json_data.close()\n\n user = d[0]['user']\n password = d[0]['password']\n \n engine = create_engine('postgresql://' + user + ':' + password + '@localhost:5432/cryptotracker')\n input_df.to_sql('eur_prices', engine, schema=\"public\", if_exists='append', index=False)\n\n return", "def _update_trade(self):\n # Populate price ladder dataframe. Assign trade to a side assuming there\n # isn't both a bid and ask at the same price. Aggregate consecutive\n # trades at the same price and populate cumulative volume.\n if self._quotes_row > 0:\n for i in range(self._config['row_count']):\n if math.isclose(self._price_ladder[i],\n self._trades_df.loc[self._trades_row, 'price']):\n volume = self._trades_df.loc[self._trades_row, 'volume']\n if self._price_ladder_df.iloc[i, 1]:\n if self._price_ladder_df.iloc[i, 0]:\n volume += int(self._price_ladder_df.iloc[i, 0])\n\n self._price_ladder_df.iloc[i, 0] = str(volume)\n self._price_ladder_df.iloc[i, 4] = ''\n elif self._price_ladder_df.iloc[i, 3]:\n if self._price_ladder_df.iloc[i, 4]:\n volume += int(self._price_ladder_df.iloc[i, 4])\n\n self._price_ladder_df.iloc[i, 0] = ''\n self._price_ladder_df.iloc[i, 4] = str(volume)\n else:\n self._price_ladder_df.iloc[i, [0, 4]] = ''\n\n # Print this trade row and update counter.\n print(self._trades_df.iloc[self._trades_row, ].values)\n self._trades_row += 1", "def getStock(symbol, start, end):\n df = pd.io.data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def prepare_cumulative_pairsets_for_metadata_export(pairs, name):\n\ttn_pairsets = pairs.loc[pairs['match_type'] == \"tumor_normal\", [\"entity:pair_id\"]].rename(columns={\"entity:pair_id\":\"pair\"})\n\ttp_pairsets = pairs.loc[pairs['match_type'] == \"tumor_primary\", [\"entity:pair_id\"]].rename(columns={\"entity:pair_id\":\"pair\"})\n\ttn_pairsets['membership:pair_set_id'] = \"Cum_TN_%s_all\"%name\n\ttp_pairsets['membership:pair_set_id'] = \"Cum_TP_%s_all\"%name\n\t\t\n\treturn tn_pairsets[['membership:pair_set_id', 'pair']], tp_pairsets[['membership:pair_set_id', 'pair']]", "def get_index_portfolio_value_data(game_id: int, symbol: str, start_time: float = None,\n end_time: float = None) -> pd.DataFrame:\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n base_value = get_index_reference(game_id, symbol)\n\n with engine.connect() as conn:\n df = pd.read_sql(\"\"\"\n SELECT timestamp, `value` FROM indexes\n WHERE symbol = %s AND timestamp >= %s AND timestamp <= %s;\"\"\", conn, params=[symbol, start_time, end_time])\n index_info = query_to_dict(\"SELECT * FROM index_metadata WHERE symbol = %s\", symbol)[0]\n\n # normalizes index to the same starting scale as the user\n df[\"value\"] = STARTING_VIRTUAL_CASH * df[\"value\"] / base_value\n df[\"username\"] = index_info[\"name\"]\n\n # When a game kicks off, it will generally be that case that there won't be an index data point at exactly that\n # time. We solve this here, create a synthetic \"anchor\" data point that starts at the same time at the game\n trade_start = make_index_start_time(start_time)\n return pd.concat([pd.DataFrame(dict(username=index_info[\"name\"], timestamp=[trade_start],\n value=[STARTING_VIRTUAL_CASH])), df])", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def get_stock_data(ticker_name, s3_resource, s3_bucket, prefix='') -> dict:\n if ticker_name.upper() == \"IBM\":\n return get_ibm_adj_cls_from_s3(s3_resource, s3_bucket, prefix)\n elif ticker_name.upper() == \"AMZN\":\n return get_amazon_adj_cls_from_s3(s3_resource, s3_bucket, prefix)\n elif ticker_name.upper() == \"AAPL\":\n return get_apple_adj_cls_from_s3(s3_resource, s3_bucket, prefix)\n elif ticker_name.upper() == \"GOOGL\":\n return get_google_adj_cls_from_s3(s3_resource, s3_bucket, prefix)\n else:\n # TODO: add input error handling\n return None", "def get_data(list_data_tuples):\n \n \n benchmark_symbol=list_data_tuples[0][0]; # First element is the benchmark symbol\n \n #print benchmark_symbol\n \n df=pd.DataFrame(index=list_data_tuples[0][1]['data'].index) # First dataframe index is nothing but date\n \n for tpl in list_data_tuples:\n #print tpl[0]\n df_temp = pd.DataFrame(tpl[1]['data']['Adj. Close'],index=tpl[1]['data'].index)\n df_temp = df_temp.rename(columns={'Adj. Close': tpl[0]}) # tpl[0] is the symbol\n #print df_temp,tpl[0]\n df = df.join(df_temp)\n if tpl[0] == benchmark_symbol: # drop dates SPY did not trade\n df = df.dropna(subset=[benchmark_symbol])\n\n df=df.dropna(axis=0) # This drops any NaN values especially if the stock price has no information\n \n return df", "def trade_info(trade_id: TradeId) -> TradeData:\n trade = find_trade(trade_id)\n\n return TradeData(\n trade_id=TradeId(uuid.UUID(trade.id)),\n direction=trade.direction,\n pair=trade.quote.currency_pair.value,\n amount=trade.quote.amount,\n quote=QuoteData(\n quote_id=QuoteId(uuid.UUID(trade.quote.id)),\n rate=Rate(trade.quote.currency_pair.value, trade.quote.rate),\n expires_at=trade.quote.expires_at,\n amount=trade.quote.amount,\n ),\n status=trade.status,\n tx_version=trade.tx_version,\n )", "def internetData(tickerSymbol):\n #this qeureys the SQL database for all stock data\n temp = Stock.query.filter_by(ticker=tickerSymbol).all()\n #init the lists to store data for output\n prices = []#closing price\n volumes = []\n \n for i in temp:\n prices.append(i.close)\n volumes.append(i.volume)\n print tickerSymbol\n print \"prices length is \" + str(len(prices))\n print \"volumes length is \" + str(len(volumes))\n outputDict = {'Prices':prices,'Volumes':volumes}\n return outputDict", "async def _refresh_volume_derivatives(self, pair: str):\n\n if not self.base_24hr_volumes[pair][0]:\n return\n\n self.base_24hr_volumes[pair][1].append(0)\n for index in range(1, len(self.base_24hr_volumes[pair][0])):\n volume = self.base_24hr_volumes[pair][0][index]\n prev_volume = self.base_24hr_volumes[pair][0][index - 1]\n norm_derivative = (volume - prev_volume) / volume * 100.0\n self.base_24hr_volumes[pair][1].append(norm_derivative)\n\n convert_pair = common.get_pair_trade_base(pair)\n if not convert_pair:\n return\n\n try:\n source_index = len(self.close_times[pair]) - 1\n convert_index = self.close_times[convert_pair].index(self.close_times[pair][-1])\n\n except ValueError:\n try:\n convert_index = len(self.close_times[convert_pair]) - 1\n source_index = self.close_times[pair].index(self.close_times[convert_pair][-1])\n convert_volume = self.base_24hr_volumes[convert_pair][1][-1]\n\n for index in range(len(self.close_times[pair]) - 1, source_index, -1):\n adjusted_volume = (self.base_24hr_volumes[pair][1][index] + convert_volume) / 2\n self.base_24hr_volumes[pair][1][index] = adjusted_volume\n\n self.log.debug(\"{} last {} averaged volume derivates are approximate.\", pair,\n len(self.close_times[pair]) - source_index)\n\n except ValueError:\n self.log.error(\"{} ends at {} before start of convert pair {} data at {}.\",\n pair, self.close_times[pair][-1], convert_pair, self.close_times[convert_pair][0])\n return\n\n for index in range(source_index, -1, -1):\n if convert_index > -1:\n convert_volume = self.base_24hr_volumes[convert_pair][1][convert_index]\n else:\n convert_volume = self.base_24hr_volumes[convert_pair][1][0]\n\n adjusted_volume = (self.base_24hr_volumes[pair][1][index] + convert_volume) / 2\n self.base_24hr_volumes[pair][1][index] = adjusted_volume\n convert_index -= 1\n\n if convert_index < 0:\n self.log.debug(\"{} first {} average volume derivatives are approximate.\", pair, convert_index * -1)", "def test_09_transactions_by_asset(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n buy_stock_ibm = Transaction.buy_stock(\n portfolio=p,\n asset=\"IBM\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=32,\n unit_price=144.96,\n user=user\n )\n\n t = Transaction.get_transactions_by_asset(p, \"IBM\")\n self.assertEqual(len(t), 2,\n msg=\"Transaction is NOT returning a valid list of transaction by asset\")\n print(\"Transaction get transactions by asset is returning the following list: {}\".format(\n t,\n ))", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def query_stock_prices(db_path, ticker, price, filter_zero_volume):\n\n try:\n db_connection = sqlite3.connect(db_path)\n except sqlite3.Error as e:\n print(e)\n return False\n\n if type(ticker) == str:\n if ticker.lower() == 'all_available': # if is all_available, transform ticker into a list using the function\n ticker = get_all_available_stock_table(db_path)\n else: # if not, will consider the string is a stock ticker\n if filter_zero_volume == True:\n main_df = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s WHERE volume>0\" % (price,ticker), db_connection)\n else:\n main_df = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s\" % (price, ticker), db_connection)\n main_df['formatted_date'] = pd.to_datetime(main_df['formatted_date'])\n main_df.set_index('formatted_date',drop=True,inplace=True)\n main_df.columns = [ticker]\n\n else:\n lst_ = []\n for t_ in ticker:\n if filter_zero_volume == True:\n df_ = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s WHERE volume>0\" % (price, t_), db_connection)\n else:\n df_ = pd.read_sql_query(\"SELECT formatted_date,%s FROM %s\" % (price, t_), db_connection)\n\n df_['formatted_date'] = pd.to_datetime(df_['formatted_date'])\n df_.drop_duplicates(subset='formatted_date', inplace=True)\n df_.set_index('formatted_date', drop=True, inplace=True)\n df_.columns = [t_]\n lst_.append(df_) # storing the DataFrame into a list\n\n main_df = pd.concat(lst_, axis=1, sort=False) # concatenating the list into one main DataFrame\n\n main_df.sort_index(inplace=True)\n main_df = main_df[(main_df.index.year >= 2000)] # filtra ano < 2000\n\n db_connection.close()\n\n return main_df", "def make_asset_param_from_pair(pair, prefix):\n if pair[prefix + \"_asset_issuer\"] == \"native\":\n return make_asset_params(prefix, \"native\", \"\", \"\")\n else:\n asset_code = pair[prefix + \"_asset_code\"]\n if len(asset_code) > 12:\n raise ValueError(\"asset code longer than 12 characters\")\n asset_type = \"credit_alphanum4\" if len(asset_code) <= 4 else \"credit_alphanum12\"\n asset_issuer = pair[prefix + \"_asset_issuer\"]\n return make_asset_params(prefix, asset_type, asset_code, asset_issuer)", "def dataframe_raw_trades():\n data = [\n ['701', '1', '1000', '700', 'A1'],\n ['002', '1', '1000', '1170', 'B2'],\n ['103', '2', '500', '200', 'C3']\n ]\n df_cols = ['CorrelationID', 'NumberOfTrades', 'Limit', 'Value', 'TradeID']\n df = pd.DataFrame(data, columns=df_cols)\n return df", "def get_adjusted_stock(stockSymbol, full_data=False, start_date=None, end_date=None, check_stockSymbol=True):\n\n df = get_data(stockSymbol, full_data, start_date,\n end_date, check_stockSymbol)\n df = get_adjusted_data(stockSymbol, df)\n\n return df", "def __mimic_trades_for_benchmark(self, full_shares: bool = False):\n\n # Create dataframe to store benchmark trades\n self.benchmark_trades = self.__transactions[\n [\"Date\", \"Type\", \"Investment\"]\n ].copy()\n\n # Set current price of benchmark\n self.benchmark_trades[\"Last price\"] = self.benchmark_historical_prices[-1]\n\n # Map historical prices into trades\n self.benchmark_trades[[\"Benchmark Quantity\"]] = float(0)\n benchmark_historical_prices = pd.DataFrame(self.benchmark_historical_prices)\n benchmark_historical_prices.columns.values[0] = \"Trade price\"\n self.benchmark_trades = self.benchmark_trades.set_index(\"Date\")\n self.benchmark_trades.index = pd.to_datetime(self.benchmark_trades.index)\n self.benchmark_trades = self.benchmark_trades.merge(\n benchmark_historical_prices, how=\"left\", left_index=True, right_index=True\n )\n self.benchmark_trades = self.benchmark_trades.reset_index()\n self.benchmark_trades[\"Trade price\"] = self.benchmark_trades[\n \"Trade price\"\n ].fillna(method=\"ffill\")\n\n # Calculate benchmark investment quantity\n if full_shares:\n self.benchmark_trades[\"Benchmark Quantity\"] = np.floor(\n self.benchmark_trades[\"Investment\"]\n / self.benchmark_trades[\"Trade price\"]\n )\n else:\n self.benchmark_trades[\"Benchmark Quantity\"] = (\n self.benchmark_trades[\"Investment\"]\n / self.benchmark_trades[\"Trade price\"]\n )\n\n self.benchmark_trades[\"Benchmark Investment\"] = (\n self.benchmark_trades[\"Trade price\"]\n * self.benchmark_trades[\"Benchmark Quantity\"]\n )\n self.benchmark_trades[\"Benchmark Value\"] = (\n self.benchmark_trades[\"Last price\"]\n * self.benchmark_trades[\"Benchmark Quantity\"]\n )\n self.benchmark_trades[\"Benchmark % Return\"] = (\n self.benchmark_trades[\"Benchmark Value\"]\n / self.benchmark_trades[\"Benchmark Investment\"]\n - 1\n )\n self.benchmark_trades[\"Benchmark Abs Return\"] = (\n self.benchmark_trades[\"Benchmark Value\"]\n - self.benchmark_trades[\"Benchmark Investment\"]\n )\n # TODO: To add alpha here, we must pull prices from original trades and get last price\n # for each of those trades. Then just calculate returns and compare to benchmark\n self.benchmark_trades.fillna(0, inplace=True)\n\n if full_shares:\n console.print(\n \"Note that with full shares (-s) enabled, there will be a mismatch between how much you invested in the\"\n f\" portfolio ({round(sum(self.portfolio_trades['Portfolio Investment']), 2)}) and how much you invested\"\n f\" in the benchmark ({round(sum(self.benchmark_trades['Benchmark Investment']), 2)}).\"\n )", "def map_to_trades(self, raw_trades: HitbtcRawTrades) -> HitbtcTrades:\n\n trades: HitbtcTrades = {}\n for symbol, raw_symbol_trades in raw_trades.items():\n trades[symbol] = self.map_to_symbol_trades(raw_symbol_trades)\n return trades", "def get_stock(symbol, interval):\n \n try:\n \n time_interval = TIME_INTERVALS[interval]\n \n if(time_interval == TIME_INTERVALS['Intraday']):\n json_data = requests.request('GET', 'https://www.alphavantage.co'+\n '/query?function=TIME_SERIES_INTRADAY&symbol='+symbol+\n '&interval=1min&apikey='+API_KEY).json()\n data_frame = pd.DataFrame.from_records(json_data['Time Series (1min)'])\n \n else:\n json_data = requests.request('GET', 'https://www.alphavantage.co'+\n '/query?function='+time_interval+'&symbol='+symbol+\n '&apikey='+API_KEY).json()\n \n data_key = ''\n \n if(time_interval == TIME_INTERVALS['Daily']):\n data_key = 'Time Series (Daily)'\n elif(time_interval == TIME_INTERVALS['Weekly']):\n data_key = 'Weekly Time Series'\n else:\n data_key = 'Monthly Time Series'\n \n data_frame = pd.DataFrame.from_records(json_data[data_key])\n \n data_frame = data_frame.transpose()\n data_frame.columns = ['Open', 'High', 'Low', 'Close', 'Volume']\n return data_frame\n \n except:\n print(\"Error while loading data\")\n return None", "def populate_buy_trend(dataframe: DataFrame, metadata: dict) -> DataFrame:\n conditions = []\n\n conditions.append(\n ((dataframe['bull'] > 0) & qtpylib.crossed_below(dataframe['rsi'], params['bull-buy-rsi-value'])) |\n (~(dataframe['bull'] > 0) & qtpylib.crossed_below(dataframe['rsi'], params['bear-buy-rsi-value']))\n )\n\n conditions.append(dataframe['volume'] > 0)\n\n dataframe.loc[\n reduce(lambda x, y: x & y, conditions),\n 'buy'] = 1\n\n return dataframe", "def _import_cryptocom_double_entries(self, data: Any, double_type: str) -> None:\n double_rows: Dict[Any, Dict[str, Any]] = {}\n debited_row = None\n credited_row = None\n for row in data:\n if row['Transaction Kind'] == f'{double_type}_debited':\n timestamp = deserialize_timestamp_from_date(\n date=row['Timestamp (UTC)'],\n formatstr='%Y-%m-%d %H:%M:%S',\n location='crypto.com',\n )\n if timestamp not in double_rows:\n double_rows[timestamp] = {}\n double_rows[timestamp]['debited'] = row\n elif row['Transaction Kind'] == f'{double_type}_credited':\n timestamp = deserialize_timestamp_from_date(\n date=row['Timestamp (UTC)'],\n formatstr='%Y-%m-%d %H:%M:%S',\n location='crypto.com',\n )\n if timestamp not in double_rows:\n double_rows[timestamp] = {}\n double_rows[timestamp]['credited'] = row\n\n for timestamp in double_rows:\n credited_row = double_rows[timestamp]['credited']\n debited_row = double_rows[timestamp]['debited']\n if credited_row is not None and debited_row is not None:\n description = credited_row['Transaction Description']\n notes = f'{description}\\nSource: crypto.com (CSV import)'\n # No fees here\n fee = Fee(ZERO)\n fee_currency = A_USD\n\n base_asset = Asset(credited_row['Currency'])\n quote_asset = Asset(debited_row['Currency'])\n pair = TradePair(f'{base_asset.identifier}_{quote_asset.identifier}')\n base_amount_bought = deserialize_asset_amount(credited_row['Amount'])\n quote_amount_sold = deserialize_asset_amount(debited_row['Amount'])\n rate = Price(abs(base_amount_bought / quote_amount_sold))\n\n trade = Trade(\n timestamp=timestamp,\n location=Location.CRYPTOCOM,\n pair=pair,\n trade_type=TradeType.BUY,\n amount=base_amount_bought,\n rate=rate,\n fee=fee,\n fee_currency=fee_currency,\n link='',\n notes=notes,\n )\n self.db.add_trades([trade])", "def volatility_indicators(df):\n p = \"volatility_\"\n high, low, close = convert_df_to_features(df, False)\n\n # ATR\n atr = AverageTrueRange(high, low, close, 14)\n df[p + \"atr_14\"] = atr.average_true_range()\n df[p + \"atr_o_close\"] = list(map(lambda a,\n c: a / c, df[p + \"atr_14\"], close))\n # BB\n bb = BollingerBands(close, window=10, window_dev=2)\n df[p + \"bb_wband_10\"] = bb.bollinger_wband()\n\n bb = BollingerBands(close, window=100, window_dev=2)\n df[p + \"bb_pband_100\"] = bb.bollinger_pband()\n\n bb = BollingerBands(close, window=200, window_dev=2)\n df[p + \"bb_wband_200\"] = bb.bollinger_wband()\n\n bb = BollingerBands(close, window=20, window_dev=2)\n df[p + \"bb_hband_o_close\"] = list(map(lambda l,\n c: (l - c) / c, bb.bollinger_hband(), close))\n\n # DC\n dc = DonchianChannel(high, low, close, window=50)\n df[p + \"dc_pband_50\"] = dc.donchian_channel_pband()\n dc = DonchianChannel(high, low, close, window=10)\n df[p + \"dc_wband_10\"] = dc.donchian_channel_wband()\n # KC\n kc = KeltnerChannel(high, low, close, window=50)\n df[p + \"pband_50\"] = kc.keltner_channel_pband()\n kc = KeltnerChannel(high, low, close, window=20)\n df[p + \"wband_20\"] = kc.keltner_channel_wband()\n # UI\n ui = UlcerIndex(close, window=30)\n df[p + \"ui_30\"] = ui.ulcer_index()\n return df", "def add_trades(trdf):\n trade_list = trdf.Snapshot()\n if len(trade_list) > 0:\n main_dictStruct[str(trdf.Name())] = dict(\n (trade.Oid(), [trade.Oid(), trade.Quantity(), trade.Price() / 100, trade.TradeTime()])\n for trade in trade_list)", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def read_trade() -> dict:\n try:\n ddb_table.get_item(Key = data_to_get)\n except Exception as e:\n _LOGGER.error('Problem updating dynamodb trade data. {0}'.format(e))\n raise Exception('Problem updating dynamodb trade data. {0}'.format(e))", "def trading_alg(self,table_name = None, buy_now = False, strategy_name = \"sma9\"):\n \n self.bs.buyed_stocks = 0\n self.bs.money = self.bs.startCredit\n spy_stocks = self.load_data(table_name = table_name, symbols = [\"SPY\"])\n spy_stocks = FinI.add_indicators(spy_stocks)\n \n if self.symbols:\n symbols = self.symbols\n else:\n symbols = self.db.get_symbols()\n\n # symbols = [\"INTC\",\"BYND\",\"ZM\",\"NKE\",\"HIMX\",\"JKS\",\"ENPH\",\"DUK\",\"GE\",\"DIS\",\"LEVI\",\"NVAX\",\"SLCA\",\"GPS\"]\n \n for symbol in symbols:\n print(\"symbol: \" + str(symbol))\n \n sub_data = self.load_data(table_name = table_name, symbols = symbol)\n if len(sub_data) < 1:\n break\n\n self.bt_stocks = FinI.add_indicators(sub_data)\n self.bt_stocks = FinI.add_fib(self.bt_stocks)\n # print(self.bt_stocks)\n print(self.bt_stocks[\"sma30\"])\n print(\"calculating percent change:\" + str(symbol))\n # sub_data = self.stocks.loc[self.stocks.sym ==symbol[0]].sort_values(by='index')\n \n self.symbols = symbol[0]\n \n # self.prev_stock = sub_data.iloc[0]\n # self.bt_stocks.iloc[0] = sub_data.iloc[0]\n\n # self.sell_marks = self.sell_marks.iloc[0:0]\n # self.buy_marks = self.buy_marks.iloc[0:0]\n self.bs.transactions = 0\n self.bs.profit_perc = 0\n \n # trend_indicator = \n # TODO mechanism for select strategies\n # self.ts_boll(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks)\n self.ts_eval(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_logic = strategy_name)\n\n # call the method with passed and assembled name\n # method = getattr(self, 'ts_' + strategy_name)\n # method(buy_now = buy_now, at_settings = None, symbol = symbol, spy_stocks = spy_stocks, strategy_name = strategy_name)", "def process_stocks(s3_working_bucket: str, date: tuple) -> DataFrame:\n\n logging.debug(\"Start reading stocks csv.\")\n df_stocks = stdm.read_csv(spark, s3_working_bucket, date, \"stocks\")\n\n logging.debug(\"Calling gmt_unix_to_datetime function.\")\n df_stocks = stp.gmt_unix_to_datetime(df_stocks, \"timestamp\")\n\n logging.debug(\"Calling order_by_col function.\")\n df_stocks = stp.order_by_col(df_stocks, \"datetime\")\n\n return df_stocks", "def get_validated_pairs_data(self):\n validated_pairs_prices = []\n for pair in self.__validated_pairs:\n stock1 = pair[0]\n stock2 = pair[1]\n validated_pairs_prices.append(self.__price_data[stock1])\n validated_pairs_prices.append(self.__price_data[stock2])\n\n validated_pairs_prices = pd.DataFrame(validated_pairs_prices).T\n\n return validated_pairs_prices", "async def _update_volume_derivatives(self, pair: str, diff: int, start_index: int):\n\n if not self.base_24hr_volumes[pair][0] or not self.base_24hr_volumes[pair][1]:\n return\n\n source_length = len(self.base_24hr_volumes[pair][0])\n for index in range(source_length - diff, source_length):\n volume = self.base_24hr_volumes[pair][0][index]\n prev_volume = self.base_24hr_volumes[pair][0][index - 1]\n norm_derivative = (volume - prev_volume) / volume * 100.0\n self.base_24hr_volumes[pair][1].append(norm_derivative)\n\n convert_pair = common.get_pair_trade_base(pair)\n if not convert_pair:\n return\n\n missing = 0\n\n for index in range(diff):\n try:\n convert_volume = self.base_24hr_volumes[convert_pair][1][start_index + index]\n except IndexError:\n convert_volume = self.base_24hr_volumes[convert_pair][1][-1]\n missing += 1\n\n adjusted_volume = (self.base_24hr_volumes[pair][1][start_index + index] + convert_volume) / 2\n self.base_24hr_volumes[pair][1][start_index + index] = adjusted_volume\n\n if missing > 0:\n self.log.debug(\"{} last {} averaged volume derivates are approximate.\", pair, missing)", "def get_stock_prices(ticker, start_date, end_date=None):\n if end_date is None:\n end_date = dt.date.today()\n\n shares = Share(ticker)\n df = pd.DataFrame(shares.get_historical(start_date.isoformat(),\n end_date.isoformat()))\n return df.set_index(\"Date\", drop=True) \\\n .drop(\"Symbol\", axis=1) \\\n .astype(float) \\\n .sort_index()", "async def get_active_exchange_markets(cls) -> pd.DataFrame:\n async with aiohttp.ClientSession() as client:\n async with client.get(\"https://api.radarrelay.com/v2/markets?include=ticker,stats\") as response:\n response: aiohttp.ClientResponse = response\n if response.status != 200:\n raise IOError(f\"Error fetching active Radar Relay markets. HTTP status is {response.status}.\")\n data = await response.json()\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=data, index=\"id\")\n fetch_markets: pd.DataFrame = all_markets[\n lambda df: [FETCH_MARKET_SYMBOL_PATTERN.search(i) is not None for i in df.index]\n ]\n\n weth_dai_price: float = float(fetch_markets.loc[\"WETH-DAI\"][\"ticker\"][\"price\"])\n dai_volume: List[float] = []\n for row in fetch_markets.itertuples():\n product_name: str = row.Index\n base_volume: float = float(row.stats[\"volume24Hour\"])\n if product_name.endswith(\"WETH\"):\n dai_volume.append(weth_dai_price * base_volume)\n else:\n dai_volume.append(base_volume)\n fetch_markets.loc[:, \"DAIVolume\"] = dai_volume\n\n return fetch_markets.sort_values(\"DAIVolume\", ascending=False)", "def buy(self, trading_pair: str, amount: Decimal, order_type: OrderType, price: Decimal) -> str:\n return self.place_order(True, trading_pair, amount, price)", "def _hand_data_dumb(context, data):\n price = data.current(context.asset, 'price')\n cash = context.portfolio.cash\n try_buy(\n context, data, context.MIN_TRADE,\n )\n record(\n price=price,\n cash=cash,\n volume=data.current(context.asset, 'volume'),\n starting_cash=context.portfolio.starting_cash,\n leverage=context.account.leverage,\n positions_asset=context.portfolio.positions[context.asset].amount,\n )", "def record_to_tuple(record):\n return float(record[\"base_volume\"]), float(record[\"counter_volume\"]), int(record[\"trade_count\"])", "def assets(self, short_name=False, quantity=False):\n table = Table(\n 3 + short_name + quantity,\n headers=(['Account']\n + (['Name'] if short_name else [])\n + (['Quantity'] if quantity else [])\n + ['Asset', 'Value']),\n coltypes=(['str']\n + (['str'] if short_name else [])\n + (['float'] if quantity else [])\n + ['str', 'dollars']))\n for account in self.accounts():\n for asset in account.assets():\n row = ([account.name()]\n + ([f'{asset.short_name()}'] if short_name else [])\n + [asset.name(), asset.adjusted_value()])\n if quantity:\n row.insert(1 + short_name, asset.shares()\n if hasattr(asset, 'shares') else None)\n table.add_row(row)\n return table", "def get_product_vs_offer_and_volume(self, df):\n\n product_vs_min_volume = {}\n product_offer = {}\n base_product_vs_offer_product = {}\n\n for i in df.itertuples():\n product_vs_min_volume[i[4].lower()] = i[5]\n product_offer[i[4].lower()] = i[1]\n base_product_vs_offer_product[i[4].lower()] = i[7].lower()\n\n return product_vs_min_volume, product_offer, base_product_vs_offer_product", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def merge(df):\n return (df['utterance_t-3'] + df['utterance_t-2'] + df['utterance_t-1'] \\\n + df['utterance_t'])", "def get_stock_data(symbol):\n # Set current dates\n start = date(date.today().year, 1, 1) # first of current year\n end = date.today() # today\n\n # Get yahoo Yahoo data\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n\n # Rename columns\n data.columns = [\"Highest price (USD)\",\n \"Lowest price (USD)\",\n \"Opening price (USD)\",\n \"Closing price (USD)\",\n \"Volume\",\n \"Adjusted closing price (USD)\"]\n\n return data", "def calculate_real_vol(df: pd.DataFrame)->pd.DataFrame:\n # Real FX Vol\n # Creating a DataFrame with real exchange rate for each country\n real_fx = df.loc[:, (slice(None), 'r')]\n \n # Getting the returns of real exchange rate\n real_rets = real_fx.pct_change()*100\n real_rets.columns = real_rets.columns.droplevel(-1)\n # Standarizing 'real_rets'\n norm_real_rets = (real_rets)/real_rets.std()\n # Studying volatility\n real_vol = {}\n model = {}\n for currcy in real_rets:\n real_vol[currcy], model[currcy] = garch_volatility(norm_real_rets[currcy].dropna(), out=5)\n # Rescalling and annualizing volatility?\n real_vol = pd.concat(real_vol, axis=1)\n #r_vol = real_vol*real_rets.std()*np.sqrt(12)\n \n return real_vol", "def transaction(self,input):\r\n output = {}\r\n output['load_real'] = input['voltage_real']**2/2800.\r\n output['load_imag'] = input['voltage_imag']**2/2800.\r\n output['bid'] = input['price']*0.75\r\n return output", "def convert_transaction_history(conn, df, broker, currency):\n # clean data according to brokerage firm\n if broker == \"firstrade\":\n df = clean_firstrade(df)\n\n # get exchange rates of the selected currency\n df['Rate Acquired'] = df.apply(lambda x: get_rates_with_cache(\n conn=conn, currency=currency, date=x['Date Acquired']),\n axis=1)\n df['Rate Sold'] = df.apply(lambda x: get_rates_with_cache(\n conn=conn, currency=currency, date=x['Date Sold']),\n axis=1)\n df = df.round({'Rate Acquired': 2, 'Rate Sold': 2})\n\n # calculate gain/loss in the selected currency\n df['Converted Cost'] = df['Cost'] * df['Rate Acquired']\n df['Converted Sales'] = df['Sales'] * df['Rate Sold']\n df = df.round({'Converted Cost': 2, 'Converted Sales': 2})\n\n # arrange columns\n df = df[[\n 'Symbol', 'Quantity', 'Date Acquired', 'Cost', 'Rate Acquired',\n 'Converted Cost', 'Date Sold', 'Sales', 'Rate Sold', 'Converted Sales'\n ]]\n\n # calculate gain/loss\n df['Gain&Loss'] = df['Converted Sales'] - df['Converted Cost']\n df = df.round({'Gain&Loss': 2})\n\n return df.sort_values([\"Symbol\", \"Date Sold\"])", "def create_order(df_stock, df_signal, moneyness=('OTM', 'ITM'),\n cycle=0, strike=0, expire=(False, True)):\n symbol = df_stock.ix[df_stock.index.values[0]]['symbol']\n\n tb_closes = {\n stock.date.strftime('%Y-%m-%d'): np.float(stock.close) for stock in\n Stock.objects.filter(Q(symbol=symbol) & Q(source='thinkback'))\n }\n\n holding = df_signal['holding'].apply(\n lambda x: int(x / np.timedelta64(1, 'D'))\n ).astype(np.int).min()\n\n data = list()\n dates0, options0 = get_options_by_cycle_strike(\n symbol=symbol,\n name='CALL',\n dates0=df_signal['date0'],\n dte=holding,\n moneyness=moneyness,\n cycle=cycle,\n strike=strike\n )\n\n for date0, (index, signal) in zip(dates0, df_signal.iterrows()):\n date1 = signal['date1']\n\n if date0:\n option0 = options0.get(date=date0)\n\n option1 = None\n if option0 and option0.bid > 0:\n date1, option1 = get_option_by_contract_date(option0.contract, date1)\n\n if option0 and option1:\n stock0 = tb_closes[option0.date.strftime('%Y-%m-%d')]\n close0 = stock0 - np.float(option0.bid)\n\n ask1 = 0\n if int(expire):\n ask1 = np.float(\n tb_closes[option1.date.strftime('%Y-%m-%d')]\n - np.float(option0.contract.strike)\n )\n ask1 = ask1 if ask1 > 0 else 0.0\n\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(ask1)\n else:\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(option1.ask)\n\n data.append({\n 'date0': option0.date,\n 'date1': date1,\n 'signal0': 'BUY',\n 'signal1': 'SELL',\n 'stock0': stock0,\n 'stock1': stock1,\n 'option0': option0.bid,\n 'option1': ask1 if expire else option1.ask,\n 'close0': np.round(close0, 2), # buy using ask\n 'close1': np.round(close1, 2), # sell using bid\n 'option_code': option0.contract.option_code,\n 'strike': np.float(option0.contract.strike),\n 'dte0': np.int(option0.dte),\n 'dte1': np.int(option1.dte),\n 'intrinsic0': np.float(option0.intrinsic),\n 'intrinsic1': np.float(option1.intrinsic)\n })\n\n df = DataFrame()\n if len(data):\n df = DataFrame(data, columns=[\n 'date0', 'date1', 'signal0', 'signal1',\n 'stock0', 'stock1', 'option0', 'option1', 'close0', 'close1',\n 'option_code', 'strike', 'dte0', 'dte1',\n 'intrinsic0', 'intrinsic1'\n ])\n\n df['holding'] = df['date1'] - df['date0']\n df['pct_chg'] = np.round((df['close1'] - df['close0']) / df['close0'], 2)\n\n f = lambda x: np.round(x['pct_chg'] * -1 if x['signal0'] == 'SELL' else x['pct_chg'], 2)\n df['pct_chg'] = df.apply(f, axis=1)\n\n df['sqm0'] = 100\n df['sqm1'] = -100\n df['oqm0'] = -1\n df['oqm1'] = 1\n\n return df", "def market_value(dh: DataHandler):\n scenario_order = [\n str(dh.scenarios.active_scenarios[keys][\"name\"])\n for keys in dh.scenarios.active_scenarios\n ]\n\n # adding all vintage classes together\n df_supply = dh.get(\"o_supply\").groupby([\"r\", \"tec_supply\", \"t\"]).sum()\n df_supply = df_supply.stack().unstack(\"t\").T\n df_price = dh.get(\"o_prices\").stack().unstack(\"t\").T\n #print(df_supply.mul(df_price, fill_value = 0))\n\n # calculate market value\n df_mv = (\n df_supply.mul(df_price)\n .sum()\n .div(df_supply.sum())\n .unstack(\"scenario\")\n )\n df_mv = df_mv[scenario_order]\n\n df_PtHydrogen = pd.concat(\n [dh.get(\"o_h2price_sell\")], keys=[\"PtHydrogen\"], names=[\"tec_supply\"]\n )\n df_PtHydrogen = df_PtHydrogen.reorder_levels([\"r\", \"tec_supply\"])\n df_mv = pd.concat([df_mv, df_PtHydrogen])\n df_mv.fillna(0,inplace=True)\n\n return df_mv", "def append_price_data_to_balance_histories(df: pd.DataFrame) -> pd.DataFrame:\n # Resample balances over the desired time interval within each symbol\n min_time = datetime_to_posix(df[\"timestamp\"].min())\n max_time = datetime_to_posix(df[\"timestamp\"].max())\n # Now add price data\n symbols = df[\"symbol\"].unique()\n price_df = get_price_histories(symbols, min_time, max_time)\n price_df[\"timestamp\"] = price_df[\"timestamp\"].apply(lambda x: posix_to_datetime(x))\n price_subsets = []\n for symbol in symbols:\n balance_subset = df[df[\"symbol\"] == symbol]\n balance_subset.sort_values(\"timestamp\", inplace=True)\n prices_subset = price_df[price_df[\"symbol\"] == symbol]\n if prices_subset.empty and symbol == \"Cash\":\n # Special handling for cash\n balance_subset.loc[:, \"price\"] = 1\n price_subsets.append(balance_subset)\n continue\n del prices_subset[\"symbol\"]\n price_subsets.append(pd.merge_asof(balance_subset, prices_subset, on=\"timestamp\", direction=\"nearest\"))\n return pd.concat(price_subsets, axis=0)", "async def update_open_trades(self, pair: str):\n\n remove_indexes = []\n\n for index, trade in enumerate(self.trades[pair]['open']):\n if await self._handle_deferred_push(trade):\n remove_indexes.append(index)\n elif await self._handle_deferred_sell(trade):\n remove_indexes.append(index)\n elif await self._handle_stop_loss(trade):\n remove_indexes.append(index)\n\n if not trade['filled']:\n await self._trade_methods['update'](trade)\n\n for index in reversed(remove_indexes):\n del self.trades[pair]['open'][index]\n\n self.save_attr('trade_stats', max_depth=2, filter_items=[pair], filter_keys=[self.time_prefix])\n self.save_attr('last_trades', max_depth=1, filter_items=[pair])\n self.save_attr('trades', max_depth=1, filter_items=[pair])", "def map_to_trading_currency_balance(self, raw_balance: HitbtcRawTradingCurrencyBalanceModel\n ) -> HitbtcTradingCurrencyBalanceModel:\n\n currency = raw_balance[\"currency\"]\n available = Decimal(raw_balance[\"available\"])\n reserved = Decimal(raw_balance[\"reserved\"])\n res = HitbtcTradingCurrencyBalanceModel(\n currency=currency,\n available=available,\n reserved=reserved)\n return res", "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # success: '1',\n # data: {\n # pairs: [\n # {\n # name: 'btc_jpy',\n # base_asset: 'btc',\n # quote_asset: 'jpy',\n # maker_fee_rate_base: '0',\n # taker_fee_rate_base: '0',\n # maker_fee_rate_quote: '-0.0002',\n # taker_fee_rate_quote: '0.0012',\n # unit_amount: '0.0001',\n # limit_max_amount: '1000',\n # market_max_amount: '10',\n # market_allowance_rate: '0.2',\n # price_digits: '0',\n # amount_digits: '4',\n # is_enabled: True,\n # stop_order: False,\n # stop_order_and_cancel: False\n # },\n # ...\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n pairs = self.safe_value(data, 'pairs', [])\n result = {}\n for i in range(0, len(pairs)):\n pair = pairs[i]\n marketId = self.safe_string(pair, 'name')\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = {\n 'info': pair,\n 'symbol': symbol,\n 'maker': self.safe_number(pair, 'maker_fee_rate_quote'),\n 'taker': self.safe_number(pair, 'taker_fee_rate_quote'),\n 'percentage': True,\n 'tierBased': False,\n }\n return result", "def read_portfolio_history(self, price_provider: Callable[[Security, dt.datetime], float]) -> Dict[dt.datetime, float]:\n if self._transactions_dataframe.empty:\n return dict()\n\n dated_transactions = self._transactions_dataframe.set_index(TRANSACTION_DATE).sort_index()\n all_share_ids = dated_transactions[SECURITY_ID].unique()\n\n date_index = dated_transactions.index.unique()\n share_history = pd.DataFrame(0, index=date_index, columns=all_share_ids)\n for share_id in all_share_ids:\n relevant_rows = dated_transactions[SECURITY_ID] == share_id\n transactions = dated_transactions.loc[relevant_rows, TRANSACTION_SHARE_AMOUNT]\n share_history.loc[dated_transactions.index[relevant_rows], share_id] = transactions\n share_history[share_id] = share_history[share_id].cumsum()\n\n share_prices = pd.DataFrame(0, index=date_index, columns=all_share_ids)\n for share_id in all_share_ids:\n date_to_price = lambda date: price_provider(Security(share_id), date)\n share_prices[share_id] = np.vectorize(date_to_price)(date_index.to_pydatetime())\n\n portfolio = (share_prices * share_history).apply(np.sum, axis=1)\n\n return {date.to_pydatetime(): price for date, price in portfolio.to_dict().items()}", "def parse_fields(self, cxn, data):\n trade_flag = False\n quote_flag = False\n pid = 'API_Symbol(%s)' % self.symbol\n\n if data == None:\n self.api.force_disconnect('LIVEQUOTE Advise has been terminated by API for %s' % pid)\n return\n\n self.update_rawdata(data)\n\n if 'TRDPRC_1' in data:\n self.last = self.api.parse_tql_float(data['TRDPRC_1'], pid, 'TRDPRC_1')\n trade_flag = True\n if 'TRDTIM_1' in data and 'TRD_DATE' in data:\n self.last_trade_time = ' '.join(self.api.format_barchart_date(data['TRD_DATE'], data['TRDTIM_1'], pid))\n else:\n self.api.error_handler(f\"{self}\", 'TRDPRC_1 without TRD_DATE, TRDTIM_1')\n\n # don't request a barchart update during the symbol init processing\n if self.api.enable_symbol_barchart and (not self.cxn_init):\n # query a barchart update after each trade\n # TODO: revisit this: can a barchart use ADVISE instead?\n self.barchart_query('-5', self.barchart_update, self.barchart_query_failed)\n\n if 'HIGH_1' in data:\n self.high = self.api.parse_tql_float(data['HIGH_1'], pid, 'HIGH_1')\n trade_flag = True\n if 'LOW_1' in data:\n self.low = self.api.parse_tql_float(data['LOW_1'], pid, 'LOW_1')\n trade_flag = True\n if 'TRDVOL_1' in data:\n self.size = self.api.parse_tql_int(data['TRDVOL_1'], pid, 'TRDVOL_1')\n trade_flag = True\n if 'ACVOL_1' in data:\n self.volume = self.api.parse_tql_int(data['ACVOL_1'], pid, 'ACVOL_1')\n trade_flag = True\n if 'BID' in data:\n self.bid = self.api.parse_tql_float(data['BID'], pid, 'BID')\n if self.bid and 'BIDSIZE' in data:\n self.bidsize = self.api.parse_tql_int(data['BIDSIZE'], pid, 'BIDSIZE')\n else:\n self.bidsize = 0\n quote_flag = True\n if 'ASK' in data:\n self.ask = self.api.parse_tql_float(data['ASK'], pid, 'ASK')\n if self.ask and 'ASKSIZE' in data:\n self.asksize = self.api.parse_tql_int(data['ASKSIZE'], pid, 'ASKSIZE')\n else:\n self.asksize = 0\n quote_flag = True\n if 'COMPANY_NAME' in data:\n self.fullname = self.api.parse_tql_str(data['COMPANY_NAME'], pid, 'COMPANY_NAME')\n if 'CUSIP' in data:\n self.cusip = self.api.parse_tql_str(data['CUSIP'], pid, 'CUSIP')\n if 'OPEN_PRC' in data:\n self.open = self.api.parse_tql_float(data['OPEN_PRC'], pid, 'OPEN_PRC')\n if 'HST_CLOSE' in data:\n self.close = self.api.parse_tql_float(data['HST_CLOSE'], pid, 'HST_CLOSE')\n if 'VWAP' in data:\n self.vwap = self.api.parse_tql_float(data['VWAP'], pid, 'VWAP')\n\n if self.api.enable_ticker:\n if quote_flag:\n self.update_quote()\n if trade_flag:\n self.update_trade()", "async def refresh_pairs(self):\n\n summaries = await self.api.get_market_summaries()\n if summaries is None:\n self.log.error('Could not get market summaries data.')\n return None\n\n pairs = []\n pair_count = 0\n changes, volumes, min_trade_qtys, min_trade_sizes = await self._extract_filtered_summaries(summaries)\n bases = list(config['min_base_volumes'].keys())\n\n for pair in sorted(volumes, key=volumes.get, reverse=True):\n if await Market.apply_pair_prefer_filter(pair, bases, volumes.keys()):\n continue\n if await self._handle_greylisted(pair):\n continue\n\n pairs.append(pair)\n self.log.debug('Added pair {}: volume {}, change {}.', pair, volumes[pair], changes[pair], verbosity=1)\n\n pair_count += 1\n if config['max_pairs'] and pair_count >= config['max_pairs']:\n break\n\n if config['app_node_index'] is not None:\n pair_splits = list(utils.split(pairs, config['app_node_max']))\n self.pairs = pair_splits[config['app_node_index']] # pylint: disable=E1126\n else:\n self.pairs = pairs\n\n self.extra_base_pairs = [pair for pair in config['base_pairs'] if pair not in pairs]\n self.min_trade_qtys = min_trade_qtys\n self.min_trade_sizes = min_trade_sizes", "def get_swap_assets_transactions(\n contract: bytes, asset_amount: int, microalgo_amount: int,\n private_key: str, first_valid, last_valid, gh, fee):\n address = logic.address(contract)\n _, ints, bytearrays = logic.read_program(contract)\n if not (len(ints) == 10 and len(bytearrays) == 1):\n raise error.WrongContractError(\n \"Wrong contract provided; a limit order contract\" +\n \" is needed\")\n min_trade = ints[4]\n asset_id = ints[6]\n ratn = ints[8]\n ratd = ints[7]\n max_fee = ints[2]\n owner = encoding.encode_address(bytearrays[0])\n\n if microalgo_amount < min_trade:\n raise error.TemplateInputError(\n \"At least \" + str(min_trade) +\n \" microalgos must be requested\")\n\n if asset_amount*ratd < microalgo_amount*ratn:\n raise error.TemplateInputError(\n \"The exchange ratio of assets to microalgos must be at least \"\n + str(ratn) + \" / \" + str(ratd))\n\n txn_1 = transaction.PaymentTxn(\n address, fee, first_valid, last_valid, gh,\n account.address_from_private_key(private_key),\n int(microalgo_amount))\n\n txn_2 = transaction.AssetTransferTxn(\n account.address_from_private_key(private_key), fee,\n first_valid, last_valid, gh, owner, asset_amount, asset_id)\n\n if txn_1.fee > max_fee or txn_2.fee > max_fee:\n raise error.TemplateInputError(\n \"the transaction fee should not be greater than \"\n + str(max_fee))\n\n transaction.assign_group_id([txn_1, txn_2])\n\n lsig = transaction.LogicSig(contract)\n stx_1 = transaction.LogicSigTransaction(txn_1, lsig)\n stx_2 = txn_2.sign(private_key)\n\n return [stx_1, stx_2]", "def returnChartData(self,\n currency_pair,\n start=datetime.now() - timedelta(days=1),\n end=datetime.now(),\n period=300):\n pass" ]
[ "0.5413317", "0.5374446", "0.5327119", "0.5326608", "0.52844787", "0.5255842", "0.52381635", "0.52032465", "0.51819", "0.5160164", "0.5140268", "0.5139721", "0.5139721", "0.51213753", "0.50929576", "0.50690424", "0.50472456", "0.50343525", "0.501496", "0.49923664", "0.49835643", "0.49736023", "0.49674344", "0.49642214", "0.49518022", "0.49460435", "0.49222705", "0.49073943", "0.48923177", "0.48846138", "0.4845038", "0.48363167", "0.4827511", "0.48270097", "0.4824295", "0.48205903", "0.48204604", "0.48175362", "0.48047537", "0.480433", "0.47997805", "0.47952953", "0.4781102", "0.47760254", "0.47662744", "0.4761224", "0.4759876", "0.4753646", "0.47504508", "0.47487122", "0.47330225", "0.47283682", "0.4723942", "0.47225556", "0.47191423", "0.4713734", "0.47093388", "0.47006384", "0.4699177", "0.46934876", "0.46887323", "0.46745744", "0.4672587", "0.46695793", "0.46584454", "0.46535096", "0.46534202", "0.46531412", "0.46455345", "0.46421957", "0.4640067", "0.46305102", "0.462163", "0.46208602", "0.4605522", "0.4604666", "0.46043548", "0.45990223", "0.45940232", "0.45857984", "0.4582032", "0.4581577", "0.45806953", "0.45802924", "0.45763645", "0.45749536", "0.45706692", "0.45688936", "0.4565375", "0.45601478", "0.45580944", "0.45553", "0.45547417", "0.45516732", "0.4550503", "0.45502502", "0.45420864", "0.45417398", "0.45238364", "0.4521433" ]
0.56753194
0
Prompts a user for input. If the user aborts the input by sending an interrupt signal,
def prompt( text: str, default: Optional[str] = None, hide_input: bool = False, confirmation_prompt: bool = False, type: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin value_proc: Optional[Callable[[Optional[str]], Any]] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, show_choices: bool = True, ): result = None # noqa def prompt_func(text): try: return _prompt(text, err=err, hide_input=hide_input) except (KeyboardInterrupt, EOFError): if hide_input: click.echo(None, err=err) raise click.Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore while True: while True: value = prompt_func(prompt) if value: break elif default is not None: if isinstance(value_proc, Path): # validate Path default value (exists, dir_okay etc.) value = default break return default try: result = value_proc(value) except click.UsageError as e: click.echo(f"Error: {e.message}", err=err) # noqa: B306 continue if not confirmation_prompt: return result while True: value2 = prompt_func("Repeat for confirmation: ") if value2: break if value == value2: return result click.echo("Error: the two entered values do not match", err=err)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def cont():\n\n try:\n input = raw_input()\n except Exception:\n pass", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def clean_input(prompt):\n try:\n return input(prompt)\n # There is a general handling of KeyboardInterrupt in main() but\n # here it leads to a cleaner exit as the option to quit is returned.\n except KeyboardInterrupt:\n return 'Quit'", "def safe_input():\n try:\n input(\"Please enter something: \")\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def _WaitForAnyUserInput() -> None:\n _get_input('Press any key to continue')", "def prompt_for_exit():\n g.message = c.r + \"Press ctrl-c again to exit\" + c.w\n g.content = generate_songlist_display()\n screen_update()\n\n try:\n userinput = input(c.r + \" > \" + c.w)\n\n except (KeyboardInterrupt, EOFError):\n quits(showlogo=False)\n\n return userinput", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def input_wrapper(msg):\n userinput = input(msg)\n if userinput != 'q':\n return userinput\n else:\n sys.exit()", "def checkInterrupt():\n if wasInterrupted():\n raise KeyboardInterrupt()", "def ask_input(self, prompt):\n self._vim.command('call inputsave()')\n self._vim.command('let user_input = input(\"{} \")'.format(prompt))\n self._vim.command('call inputrestore()')\n response = self._vim.eval('user_input')\n self._vim.command('unlet user_input')\n return response", "def input_for_user_selection(self):\n user_input = \"\"\n while user_input not in range(0, len(self.users)):\n print(\"Pick user from above, or type 'cancel'\")\n user_input = input()\n if user_input == \"cancel\":\n raise ValueError\n user_input = int(user_input)\n return user_input", "def wait() -> None:\n\n process_input(input())", "def wait_for_input(self):\n if self._dont_enter_interactive_mode:\n return\n stop = False\n while True:\n print(\">>> \", end='')\n try:\n command_str = input()\n except EOFError:\n print(\"Exiting interactive mode\")\n break\n stop = self.interpret_command(command_str)\n if stop:\n print(\"Exiting interactive mode\")\n break", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)", "def noinput():\n env.prompt = False", "def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)", "def safe_input(prompt=\"\"):\n\n\ttry:\n\t\tresult = input(prompt)\n\t\treturn result\n\texcept KeyboardInterrupt:\n\t\tsys.exit()\n\texcept:\n\t\treturn \"\"", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()", "def handle_input():\n\n # wait for user input and get timeout or character to process\n char = read_input()\n\n # handle user input\n if not is_input_valid(char):\n # No valid input, keep waiting for input\n return True\n\n # if terminal size is not valid, stop here\n if not nuqql.config.WinConfig.is_terminal_valid():\n show_terminal_warning()\n return True\n\n # if terminal resized, resize and redraw active windows\n if char == curses.KEY_RESIZE:\n nuqql.conversation.resize_main_window()\n return True\n\n # pass user input to active conversation\n for conv in nuqql.conversation.CONVERSATIONS:\n if conv.is_active():\n conv.process_input(char)\n return True\n\n # if no conversation is active pass input to active list window\n if nuqql.win.MAIN_WINS[\"list\"].state.active:\n # list window navigation\n nuqql.win.MAIN_WINS[\"input\"].redraw()\n nuqql.win.MAIN_WINS[\"log\"].redraw()\n nuqql.win.MAIN_WINS[\"list\"].process_input(char)\n return True\n\n # list window is also inactive -> user quit\n return False", "def handle_input(sock):\n\tprint(\"Type message, enter to send. 'q' to quit\")\n\twhile True:\n\t\tmsg = input() #Blocks\n\t\tif msg == 'q':\n\t\t\tprint('Shut Down Client')\n\t\t\tsock.shutdown(socket.SHUT_RDWR)\n\t\t\tsock.close()\n\t\t\tbreak\n\t\ttry:\n\t\t\ttincanchat.send_msg(sock,msg) #Blocks until sent\n\t\texcept(BrokenPipeError,ConnectionError):\n\t\t\tbreak", "def wait_for_enter(field_data=\"\"):\n try:\n input(f\"{field_data}\\n\" f\"Press the 'ENTER' key to continue\")\n except KeyboardInterrupt:\n pass", "def s_input(prompt : str = \">\", accepted_inputs : list = [\"break\"], case_sensitive : bool = False, fail_message : str = \"\") -> str:\r\n\r\n user_input = \"\"\r\n first = True #For checking if the fail message should print or not\r\n while user_input not in accepted_inputs:\r\n if fail_message != \"\" and not first:\r\n print(fail_message) #Prints the assigned fail message if it isn't the first iteration\r\n user_input = input(prompt) #Gets user input\r\n if not case_sensitive:\r\n user_input = user_input.lower() #Sets the input to lower if needed\r\n first = False #Ensures that it is not the first iteration anymore\r\n return user_input", "def waitenterpressed(message = \"Press ENTER to continue...\"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\traw_input(message)\n\treturn 0", "def keep_going(text=\"Do you wish to continue? Answer Y or N.\"):\n answer = input(text)\n\n if answer == 'Y':\n print(\"The script is now running....\")\n else:\n print(\"You have chosen to quit this program\")\n raise SystemExit", "def __exit_condition(data_logger):\n try:\n while True:\n raw_input(\"\") # FIXME: is raw_input the right approach\n if CLOSE:\n raise KeyboardInterrupt()\n\n except (KeyboardInterrupt, EOFError):\n sys.stdin.close()\n data_logger.stop()", "def wait_for_input(self):\n pass", "def Wait(p_question: str):\n input(p_question)\n return", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query", "def input(prompt=\"Input\"):\n \n __PyInputHelper__.userInput = None\n \n __PyInputHelper__.showAlertWithPrompt(prompt)\n \n while (__PyInputHelper__.userInput == None):\n \n if (threading.currentThread() in ignoredThreads):\n return \"\"\n \n continue\n \n userInput = __PyInputHelper__.userInput\n __PyInputHelper__.userInput = None\n return str(userInput)", "def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer", "def stop(self):\n command = input(\"Enter anything to finish (or 'exit' to cancel)>>>\")\n return command != 'exit'", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def quit():\n while True:\n try:\n choice = input('press q to quit \\n r to restart')\n choice = choice.lower() # sanitize inputs before comparision\n\n except TypeError:\n print('Please enter q to quit or r to restart')\n if choice not in ('q', 'r'):\n continue\n else:\n break\n if choice == 'q':\n return True\n elif choice == 'r':\n return False", "def inask(question: str) -> str:\n answer = input(question)\n return answer", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def handle_input(self, key):\n if key == 'Q' or key == 'q':\n if(self.proc is not None):\n self.proc.send_signal(signal.SIGINT)\n\n raise urwid.ExitMainLoop()\n if key == 'R' or key == 'r':\n self.model.running = True\n self.run()\n if key == 'P' or key == 'p':\n self.togglePause()", "def get_input():\n return getch()", "def haltExec(self, input_pin=0):\n if not 0 <= input_pin < 2:\n raise(ValueError('`input_pin` [{0}] must be between 0 and 2'\n ''.format(input_sig)))\n cmd_string = 'H{0}'.format(input_sig)\n return self.sendRcv(cmd_string)", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def input(self):\n try:\n temp = ord(raw_input())\n self.tape.replace(temp)\n except:\n print \"Error -002\"\n raise", "def check_or_exit(msg):\n while True:\n user_input = raw_input(\"%s (y/n): \" % msg).lower()\n if user_input in ['y', 'yes']:\n print\n return\n if user_input in ['n', 'no']:\n print\n print_warning(\"Please complete the required steps and then \"\n \"re-run the script.\")\n sys.exit(1)", "def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()", "def ask_for_interface():\n return input(\"Interface name : \")", "def prompt_user():\n print()\n while True:\n print('Please choose one of the following options:')\n print(\"1: Send a Thank You\")\n print(\"2: Create a report\")\n print(\"3: Send letters to everyone\")\n print(\"4: Match donations\")\n print(\"5: Quit\")\n try:\n return int(input(\"Option: \"))\n except ValueError as e:\n print(\"***INVALID Option Selected***\")", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def confirm_prompt(prompt):\n while True:\n print(prompt, end=' [Y/n]: ')\n\n if not os.isatty(sys.stdout.fileno()):\n print(\"Not running interactively. Assuming 'N'.\")\n return False\n pass\n\n r = input().strip().lower()\n if r in ['y', 'yes', '']:\n return True\n elif r in ['n', 'no']:\n return False", "def on_KeyboardInterrupt(player):\n print(\"paused by KeyboardInterrupt\")\n player.edit()", "def prompt_user(prompt: str) -> bool:\n positive_response = {'yes', 'y', 'ye', '', 'ok'}\n negative_response = {'no', 'n'}\n\n while True:\n answer = input(prompt).lower()\n if answer in positive_response:\n return True\n elif answer in negative_response:\n return False\n else:\n print(\"Please respond with 'yes' or 'no'\\n\", file=sys.stderr)", "def signal_handler(signal, frame): \n import signal\n import sys\n from time import localtime, strftime\n time = strftime(\"%H:%M:%S\", localtime())\n sel = raw_input('\\n\\n%s: Paused. Press return to resume, or type exit to quit: \\n' % time)\n if sel.startswith('e') or sel.startswith('E'):\n sys.exit(0)\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation resumed.\\n' % time", "def HandleInput(self, input):\r\n if input.IsKeyDown(K_UP, once=True):\r\n self.selected_option -= 1\r\n if self.selected_option < 0:\r\n self.selected_option = len(self.options) - 1\r\n if input.IsKeyDown(K_DOWN, once=True):\r\n self.selected_option += 1\r\n if self.selected_option >= len(self.options):\r\n self.selected_option = 0\r\n \r\n # Selection\r\n if input.IsKeyDown(K_RETURN, once=True):\r\n self.SelectOption()\r\n \r\n # If they hit ESC, leave bartering\r\n if input.IsKeyDown(K_ESCAPE, once=True):\r\n Log('Quick removed barter from dialogue.')\r\n self.dialogue.SelectLastConversation()", "def enter():\n input(\"\\nClick Enter to continue \")", "def rawInputWithCheck(prompt):\n proceed = False\n i = None\n while not(proceed):\n i = raw_input(prompt)\n print \"Is this correct?\"\n print ' '*3, repr(i)\n proceed = YNInput(' '*2)\n return i", "def exit_prompt(message=''):\r\n if message != '': print(str(message))\r\n input('\\nPress [Enter] to exit...')\r\n sys.exit()", "def waitenter(times=1):\n\n # For each time\n for _ in range(times):\n # Ask for user input\n input(\"\")", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def input(self, prompt):\r\n return console_input(prompt)", "def safe_input(response):\n try:\n return input(response)\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def prompt(prompt, validator=(lambda x: True), hint=None):\n user_input = input(prompt)\n while not validator(user_input):\n user_input = input(prompt)\n return user_input", "def control(message, the_function):\n while True:\n user_choice = input(message)\n if user_choice in ('Y', 'y'):\n the_function()\n elif user_choice in ('N', 'n'):\n print(\"exiting program.....\")\n print(\"Have a nice day!\")\n break\n else:\n print(\"Not a valid option, try again\")", "def stdin_thread(self):\n while True:\n if not self.is_running():\n time.sleep(0.1)\n continue\n msg = self._stdin_queue.get()\n if msg is None:\n break # Ask to stop\n self._say(msg)", "def receive_interrupt_request(self, _: EmptyMsg):\n self.renderer.interrupt()", "def takeInAndConfirmUserInput():\n validInput = False\n userInput = \"\"\n while validInput != True:\n userInput = input(\"~ \")\n\n print(f\"you have written {userInput}, is this correct? y/[n]\")\n\n confirmation = input(\"~ \")\n\n if confirmation.lower() == \"y\":\n validInput = True\n\n return userInput", "def ask(question=WARNING_DIFF):\n\t\t\tfd = sys.stdin.fileno()\n\n\t\t\toldterm = termios.tcgetattr(fd)\n\t\t\tnewattr = termios.tcgetattr(fd)\n\t\t\tnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n\t\t\ttermios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n\t\t\toldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\t\t\tself.stdout.write(question)\n\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfirstCharacter = sys.stdin.read(1)\n\t\t\t\t\t\treturn forceUnicode(firstCharacter) in (u\"y\", u\"Y\")\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\ttermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\t\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(1) # no match", "def alert_on_error(error_msg: str, prompt_user: bool):\n print(error_msg)\n g.alerts.send(error_msg)\n if prompt_user:\n while True:\n user_response = input(\"Continue execution? [Y/N]\\n\").upper()\n if user_response == \"Y\":\n break\n elif user_response == \"N\":\n raise KeyboardInterrupt\n else:\n print(\"Please type in 'Y' or 'N' as a response.\")", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)", "def pause():\n global pause_continue\n if pause_continue:\n return\n line = input(\n \"Paused. 'q' to quit, 'c' to continue without pausing, anything else to step.\"\n )\n if line:\n if line[0] == \"q\":\n exit(0)\n if line[0] == \"c\":\n pause_continue = True", "def confirm(message: str, answer: str | None = None) -> bool:\n given_answer = answer.lower() if answer is not None else \"\"\n while given_answer not in [\"y\", \"n\", \"q\", \"yes\", \"no\", \"quit\"]:\n console.print(f\"[yellow]{message}[y/n/q]?[/] \", end=\"\")\n try:\n given_answer = input(\"\").lower()\n except KeyboardInterrupt:\n given_answer = \"q\"\n if given_answer.lower() in [\"q\", \"quit\"]:\n # Returns 65 in case user decided to quit\n sys.exit(65)\n return given_answer in [\"y\", \"yes\"]", "def instruction_in(self, register):\n if len(self.input_buffer) == 0:\n user_input = raw_input() + '\\n'\n self.input_buffer = deque(user_input)\n\n char = self.input_buffer.popleft()\n value = ord(char)\n\n self.set_register(register, value)", "def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'", "def inp(text):\r\n input(text)", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def get_input(label):\n result = input(label)\n sounds.play_input_beep()\n return result", "def switch_input(cls):\n try:\n assert globals()[\"input\"]\n cls.restore_input()\n except KeyError:\n cls.override_input()", "def get_input():\n letters = input('Enter letters, Enter to quit:\\n')\n return letters", "def signal_handler(signal, _): \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)", "def prompt():\r\n inpt = -1\r\n valid_choices = ['1','2','3','4','5']\r\n while inpt not in valid_choices:\r\n inpt = input(\"\\nPlease select the number of the operation you wish \"\r\n \"to complete:\\n\" +\r\n \"1. Run file mover\\n2. Add directories\"\r\n \"\\n3. Remove directory\\n4. View saved directories\\n5. Quit\\n\").strip()\r\n if inpt not in valid_choices:\r\n print(\"\\n*** Invalid choice ***\")\r\n return inpt", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def loop(self):\n\t\twhile (self.quit == 0):\n\t\t\ttry:\n\t\t\t\tuserInput = raw_input(\"> \")\n\t\t\t\tself.processCommand(userInput)\n\t\t\texcept EOFError:\n\t\t\t\tsys.exit()\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.exit()", "def get_restart_prompt():\n while True:\n try:\n restart = input('\\nWould you like to restart? Enter yes or no.\\n').lower()\n if restart in ['yes' , 'no']:\n return restart\n else:\n raise ValueError\n except ValueError:\n print('Sorry. I cannot understand what you mean.')", "def input(self, message=''):\r\n from javax.swing import JOptionPane\r\n return JOptionPane.showInputDialog(frame, message)", "def get_user_input(self):\n while not self.suspended:\n input = raw_input()\n input = input.split('|')\n if input[0] in ['exit', 'quit', 'kill']:\n self.broadcast('kill')\n self.suspended = True\n for client in self.clients.values():\n client.socket.close()\n self.s.close() # Have to connect to socket to exit server.\n sock = socket(AF_INET, SOCK_STREAM)\n port = bind_to_random(sock)\n sock.connect((str(self.ip), self.port))\n elif len(input) > 1:\n msg = '|'.join(['#server']+input[1:])\n if input[0][:1] == '@':\n destination = input[0][1:].lower()\n if destination == 'server':\n print msg\n elif destination == 'all':\n self.broadcast(msg)\n else:\n client = self.clients.get(destination, None)\n if client:\n client_send(client.socket, msg)\n else:\n print 'Destination not active'\n else:\n print msg", "def manual_input():\n label, data = [], []\n\n complete_input = False\n print('Enter data label and data. For example, 1f 56, is a record of 56 labeled 1f.')\n print('To erase last input type \"cancel\", to finish type \"done\"')\n\n while not complete_input:\n x = input('Enter label and data:')\n if str.lower(x) == 'done':\n complete_input = True\n print('Okay thank you.')\n print('Processing...')\n elif str.lower(x) == 'cancel':\n label.pop()\n data.pop()\n else:\n name, number = x.split(' ')\n label.append(name)\n data.append(int(number))\n return label, data", "def deny():\n raise InterruptEvent", "def interactive_shell():\n print(\"\"\"\nTo exit, enter 'EXIT'.\nEnter a sentence like \ninput> wth is it????\"\"\")\n\n while True:\n try:\n # for python 2\n sentence = raw_input(\"input> \")\n except NameError:\n # for python 3\n sentence = input(\"input> \")\n\n words_raw = sentence.strip()\n\n if words_raw == \"EXIT\":\n break\n \n words_raw = Encoder.str2uni(words_raw)\n label_prob, label_pred = predict(words_raw)\n if label_pred[0] == 0:\n print(\"OUTPUT> Subversive \\t\\t PROB> %.2f\"%(100*(1-label_prob.data[0])))\n else:\n print(\"OUTPUT> None \\t\\t PROB> %.2f\"%(100*label_prob.data[0]))", "def getInput(self):\n\t\tkeyPressed = self.screen.getch()\n\t\tif keyPressed == 113:\t\t# <escape>\n\t\t\tself.terminate()\n\t\t\tself.running = False\n\t\treturn keyPressed \t\t# return key for (possible) further action in calling program", "def getInput(functionToAsk):\n while True:\n userInput = input(\"Would you like to {}? (Yy/Nn)\\n> \".format(functionToAsk))\n\n if userInput.lower() == \"y\":\n return True\n if userInput.lower() == \"n\":\n return False", "def _interrupt(self, signum: int, frame: Optional[Any]) -> None:\n if self._in_task(frame):\n raise KeyboardInterrupt\n else:\n self._interrupted = True\n self._ready_tasks.interrupt()", "def prompt(self):\n # TODO: fix null input\n print('Enter user input: ')\n userinput = input()\n print(f'User chose: {userinput}')\n return userinput" ]
[ "0.7238744", "0.68656516", "0.6854231", "0.67521363", "0.67260265", "0.6714292", "0.6679884", "0.6577005", "0.65114206", "0.64484817", "0.6370928", "0.6356066", "0.63458174", "0.62438965", "0.62413186", "0.62225163", "0.61820346", "0.6136225", "0.6091857", "0.6065281", "0.60440826", "0.60329354", "0.6029072", "0.6018131", "0.6004149", "0.5989279", "0.59841466", "0.5978481", "0.5968148", "0.593496", "0.5918289", "0.5907097", "0.5893147", "0.58353335", "0.58338547", "0.5793338", "0.5756519", "0.57431686", "0.5720074", "0.5715298", "0.5713701", "0.5688975", "0.5680948", "0.56802547", "0.56561095", "0.56561095", "0.56369287", "0.5632217", "0.5630873", "0.56093854", "0.5606449", "0.5603417", "0.5597384", "0.5587753", "0.5561772", "0.5538774", "0.55295825", "0.55100566", "0.55049706", "0.55000436", "0.5498337", "0.5498003", "0.5495285", "0.54945505", "0.54938596", "0.5491273", "0.5483445", "0.54819375", "0.5480947", "0.54795176", "0.54769033", "0.547639", "0.54762286", "0.5462063", "0.5458844", "0.5458492", "0.5451169", "0.5442685", "0.5441248", "0.5439851", "0.5437128", "0.5430766", "0.54273146", "0.54236054", "0.5411957", "0.5392503", "0.5389612", "0.53892386", "0.5387499", "0.53846884", "0.53752667", "0.5370687", "0.53668785", "0.5352738", "0.53449273", "0.534222", "0.5341423", "0.53389055", "0.5338427", "0.5330845", "0.53274214" ]
0.0
-1
Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this
def confirm( text: str, default: bool = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ): prompt = _build_prompt(text, prompt_suffix, show_default, "Y/n" if default else "y/N") while True: try: value = _prompt(prompt, err=err, hide_input=False).lower().strip() except (KeyboardInterrupt, EOFError): raise click.Abort() if value in ('y', "yes"): rv = True elif value in ('n', "no"): rv = False elif value == '': rv = default else: click.echo("Error: invalid input", err=err) continue break if abort and not rv: raise click.Abort() return rv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirmation(self, question, answer):\n confirm_flag = False\n while confirm_flag not in ['y', 'n']:\n confirm_flag = raw_input(question + ' [y/n]: ')\n if confirm_flag == 'y':\n print answer\n elif confirm_flag == 'n':\n print 'The user cancel the operation'\n exit()\n else:\n print 'The entry is not valid, please enter y or n.'\n return True", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def confirm(message: str, answer: str | None = None) -> bool:\n given_answer = answer.lower() if answer is not None else \"\"\n while given_answer not in [\"y\", \"n\", \"q\", \"yes\", \"no\", \"quit\"]:\n console.print(f\"[yellow]{message}[y/n/q]?[/] \", end=\"\")\n try:\n given_answer = input(\"\").lower()\n except KeyboardInterrupt:\n given_answer = \"q\"\n if given_answer.lower() in [\"q\", \"quit\"]:\n # Returns 65 in case user decided to quit\n sys.exit(65)\n return given_answer in [\"y\", \"yes\"]", "def confirm(message):\n if not sys.stdout.isatty():\n return False\n reply = BaseCommand.input(\"\\n{message} [Y/N]:\".format(message=message))\n return reply and reply[0].lower() == 'y'", "def get_confirmation():\n inp = PInput(\"#> \")\n\n inp.add_keyword(\"yes\")\n inp.add_keyword(\"no\")\n\n inp.ask()\n ans = inp.get_input()\n\n if ans == \"yes\":\n return True\n else:\n return False", "def confirm(msg=\"\"):\n answer = \"\"\n if not msg: msg = \"OK to continue\"\n while answer not in [\"y\", \"n\"]:\n answer = input(msg+\" [Y/N]? \").lower()\n return answer == \"y\"", "def confirm_yes():\r\n confirm = raw_input(\"Enter 'yes' to confirm: \")\r\n if confirm == 'yes':\r\n return True\r\n return False", "def confirm_prompt(prompt):\n while True:\n print(prompt, end=' [Y/n]: ')\n\n if not os.isatty(sys.stdout.fileno()):\n print(\"Not running interactively. Assuming 'N'.\")\n return False\n pass\n\n r = input().strip().lower()\n if r in ['y', 'yes', '']:\n return True\n elif r in ['n', 'no']:\n return False", "def confirm():\n answer = \"\"\n while answer not in [\"y\", \"n\"]:\n answer = input(\"OK to push to continue [Y/N]? \").lower()\n return answer == \"y\"", "def confirm(force):\n if not force:\n ans = input(que(bold(\"Are you sure? [y/N]: \")))\n else:\n ans = 'y'\n\n return ans.lower()", "def ask_for_confirmation(prompt=\"Are you sure? \", default=True):\n yes, no = (\"Y\", \"n\") if default else (\"y\", \"N\")\n prompt += f\"[{yes}/{no}] \"\n\n while True:\n ans = input(prompt).lower().strip()\n if not ans:\n return default\n elif not (\"yes\".startswith(ans) or \"no\".startswith(ans)):\n print(\"Please enter yes or no.\")\n continue\n else:\n return \"yes\".startswith(ans)", "def confirm(msg: str) -> bool:\n res = input(msg + \" (Y/n) > \")\n if res == 'Y' or res == 'y' or res == 'yes' or res == 'Yes' or res == \"\":\n return True\n return False", "def confirm():\n answer = \"\"\n while answer not in [\"y\", \"n\"]:\n answer = input(\"OK with that [Y/N]? \").lower()\n return answer == \"y\"", "def confirm(msg: str = \"Do you want it:\", default: bool = True) -> bool:\n\n question = [\n {\n 'type': 'confirm',\n 'name': 'confirm',\n 'message': msg,\n 'default': default\n }\n ]\n try:\n answer = prompt(question)\n return answer['confirm']\n except KeyError:\n exit = confirm(msg=\"Do you want cancel script\")\n if exit:\n raise SystemExit\n else:\n return confirm(msg, default)", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def proceed():\n c_print(\"********** PROCEED? **********\")\n # capture user input\n confirm = input(\" \" * 36 + \"(y/n) \")\n # quit script if not confirmed\n if confirm.lower() != \"y\":\n c_print(\"******* EXITING SCRIPT *******\")\n print(\"~\" * 80)\n exit()\n else:\n c_print(\"********* PROCEEDING *********\")", "def confirm(self, prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def get_confirm(self):\n self.warning('Would you like to execute[y/N]: ')\n _choice = input()\n choice = _choice.lower() if _choice else 'n'\n err_msg = \"must input yes(y)/no(n), not \" + _choice\n if not choice.startswith(('y', 'n')):\n self.error(err_msg)\n return\n if choice == 'y' or choice == 'yes':\n confirm = True\n elif choice == 'n' or choice == 'no':\n self.info(\"Nothing to do.\")\n confirm = False\n else:\n self.error(err_msg)\n confirm = None\n\n return confirm", "def confirm(prompt=None, resp=False):\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def confirm(prompt=None, resp=False):\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print('please enter y or n.')\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def confirm(prompt=None, resp=False):\n\n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n\n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def keep_going(text=\"Do you wish to continue? Answer Y or N.\"):\n answer = input(text)\n\n if answer == 'Y':\n print(\"The script is now running....\")\n else:\n print(\"You have chosen to quit this program\")\n raise SystemExit", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def get_confirmation(self, message):\n\t\tassert isinstance(message, str)\n\n\t\tif not message:\n\t\t\traise AssertionError(\"asd\")\n\t\tuser_input = raw_input(\"{}\\n>\".format(message)).lower()\n\t\twhile True:\n\t\t\tif self.is_boolean_state(user_input):\n\t\t\t\treturn self.get_boolean_state(user_input)\n\t\t\tuser_input = raw_input(\"Please type 'n' for no, or 'y' for yes:\\n>\").lower()", "def question(self, message=\"Do you wish to proceed?\", title=\"Question\", cancel=False):\n if cancel:\n instructions = \"y = yes, n = no, c = cancel\"\n options = ['y', 'n', 'c']\n else:\n instructions = \"y = yes, n = no, c = cancel\"\n options = ['y', 'n']\n print(title)\n print(message)\n print(instructions)\n answer = input()\n while answer not in options:\n print(\"Sorry, I can't interpret that answer\")\n print(message)\n print(instructions)\n answer = input()\n if answer == 'y': return \"Yes\"\n if answer == 'n': return \"No\"\n if answer == 'c': return \"Cancel\"", "def decision(question):\n return click.confirm(question, show_default=True)", "def cancel():\n global confirmation, output1, place_for_enter\n output1.delete(1.0, END)\n confirmation.after(1, confirmation.destroy)\n place_for_enter.delete(0, END)", "def _confirm_prompt(message, prompt=\"\\nAre you sure? [y/yes (default: no)]: \",\n affirmations=(\"Y\", \"Yes\", \"yes\", \"y\")):\n answer = input(message + prompt)\n return answer in affirmations", "def confirm(question, default_choice='yes'):\n valid = {'yes':True, 'y':True, 'ye':True, 'no':False, 'n':False}\n default_choice = str(default_choice).lower()\n if default_choice == 'none':\n prompt = ' [y/n] '\n elif default_choice == 'yes':\n prompt = ' [Y/n] '\n elif default_choice == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('invalid default answer: \"%s\"' % default_choice)\n\n while True:\n print(str(question) + prompt)\n choice = input().lower()\n if default_choice != 'none' and choice == '':\n return valid[default_choice]\n elif choice in valid:\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def confirm(s=None, default=False):\n\n if s:\n s = '{} (y/n): '.format(s)\n else:\n s = 'Continue? (y/n): '\n answer = input(s).strip().lower()\n return answer.startswith('y') if answer else default", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def confirm_choice(\n choice: Text, default: Optional[bool] = True, abort: Optional[bool] = True\n) -> bool:\n if default:\n hint = \"Y/n\"\n else:\n hint = \"y/N\"\n answer = input(f\"{choice} [{hint}]: \")\n\n value = None\n if answer.lower() in [\"y\", \"yes\"]:\n value = True\n\n if answer.lower() in [\"n\", \"no\"]:\n value = False\n\n if not answer:\n value = default\n\n if value is None:\n print(\"Invalid answer\")\n return confirm_choice(choice=choice, default=default, abort=abort)\n\n if abort and not value:\n raise RuntimeError(\"Aborting\")\n\n return value", "def test_prompt_msg_confirm_blank_default_yes(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": True\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [Yn]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], True)", "def ask_yes_no(question):\n answer = None\n while answer not in (\"y\",\"n\"):\n answer = input(question).lower()\n return answer", "def ask_yes_no(question):\r\n\tresponse = None\r\n\twhile response not in (\"y\", \"n\"):\r\n\t\tresponse = input(question).lower()\r\n\treturn response", "def confirm_action(message: str) -> bool:\n return input(message)[0:1] in \"Yy\"", "def confirm(self, prompt, default):\n raise NotImplementedError(NotImplementedMessage)", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def ask_continue():\n i = input(\"Please ensure your System Dependencies are met. Continue? [y/N] \")\n if i in (\"\", \"N\", \"n\"):\n out_error(\"Please install system dependencies to continue\")\n exit(1)", "def prompt_for_exit():\n g.message = c.r + \"Press ctrl-c again to exit\" + c.w\n g.content = generate_songlist_display()\n screen_update()\n\n try:\n userinput = input(c.r + \" > \" + c.w)\n\n except (KeyboardInterrupt, EOFError):\n quits(showlogo=False)\n\n return userinput", "def _yes_no_select(question):\n while True:\n response = input(question + \" [y/n] \")\n if response in [\"y\", \"yes\"]:\n return True\n elif response in [\"n\", \"no\"]:\n return False\n else:\n print(\"\\nPlease select y or n\\n\")", "def confirm(message: str = \"Confirm?\", suffix: str = \" (y/n) \") -> bool:\n session = create_confirm_session(message, suffix)\n return session.prompt()", "def ask_user( prompt ):\n answer = raw_input( prompt )\n if answer.lower() in [\"y\",\"yes\"]:\n return True\n else:\n return False", "def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)", "def confirm():\n\t\traise NotImplementedError", "def ask(prompt):\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)", "def prompt_user(prompt: str) -> bool:\n positive_response = {'yes', 'y', 'ye', '', 'ok'}\n negative_response = {'no', 'n'}\n\n while True:\n answer = input(prompt).lower()\n if answer in positive_response:\n return True\n elif answer in negative_response:\n return False\n else:\n print(\"Please respond with 'yes' or 'no'\\n\", file=sys.stderr)", "def ask_yes_no(message=\"\", title=None):\n return dialog(\"ask_yes_no\", message=message, title=title)", "def test_prompt_msg_confirm_blank_default_no(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": False\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [yN]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], False)", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def no_going_back(confirmation):\r\n if not confirmation:\r\n confirmation = 'yes'\r\n\r\n return valid_response(\r\n 'This action cannot be undone! '\r\n 'Type \"%s\" or press Enter to abort: ' % confirmation,\r\n str(confirmation))", "def _prompt(prompt):\n return raw_input(\"%s [yes or no]: \" % prompt) == \"yes\"", "def confirm_prompt(prompt, stream=sys.stdout):\n return ask_prompt(prompt, options=[\"y\", \"n\"], stream=stream) == \"y\"", "def confirm(prompt_str, default=False):\r\n if default:\r\n prompt = '%s [Y/n]: ' % prompt_str\r\n else:\r\n prompt = '%s [y/N]: ' % prompt_str\r\n\r\n response = valid_response(prompt, 'y', 'yes', 'yeah', 'yup', 'yolo')\r\n\r\n if response is None:\r\n return default\r\n\r\n return response", "def ask_yesno(prompt):\n more = input(prompt)\n while more not in [\"y\", \"n\"]:\n print(\"I beg your pardon!\")\n more = input(prompt)\n return more == 'y'", "def AskYesNoCancel(question, default = 0, yes=None, no=None, cancel=None, id=262):\n\n raise NotImplementedError(\"AskYesNoCancel\")", "def handle_yes_no_input(prompt):\n user_input = input(prompt).upper()\n\n # Handling bad input\n while user_input not in [\"Y\", \"N\"]:\n user_input = input(f\"\\\"{user_input}\\\" is not a valid input. Please enter \\\"Y\\\" or \\\"N\\\": \")\n\n return user_input == \"Y\"", "def prompt_user(videos, confirmation=False):\r\n if not confirmation:\r\n print(\"Found videos:\")\r\n print(\"\\n\".join(videos))\r\n question = \"Are you sure you would like to upload these videos? [Y/n]\"\r\n confirmation = raw_input(question).lower() in ('', 'y', 'yes')\r\n return confirmation", "def prompt_yes_no(message, color):\n\tquestions = [inquirer.List('choice',\n\t message=color + Style.BRIGHT + message + Fore.BLUE,\n\t choices=[' Yes', ' No'],\n\t ),\n\t ]\n\n\tanswers = inquirer.prompt(questions)\n\treturn answers.get('choice').strip().lower() == 'yes'", "def takeInAndConfirmUserInput():\n validInput = False\n userInput = \"\"\n while validInput != True:\n userInput = input(\"~ \")\n\n print(f\"you have written {userInput}, is this correct? y/[n]\")\n\n confirmation = input(\"~ \")\n\n if confirmation.lower() == \"y\":\n validInput = True\n\n return userInput", "def confirm(text, app, version, modules=None, default_yes=False):\n print(text)\n print(' Directory: %s' % os.path.basename(app.app_dir))\n print(' App ID: %s' % app.app_id)\n print(' Version: %s' % version)\n print(' Modules: %s' % ', '.join(modules or app.modules))\n if default_yes:\n return raw_input('Continue? [Y/n] ') not in ('n', 'N')\n else:\n return raw_input('Continue? [y/N] ') in ('y', 'Y')", "def bool_prompt(text):\n switcher = {\n \"y\": True,\n \"yes\": True\n }\n user_input = input(text).lower()\n return switcher.get(user_input, False)", "def ask(self, question):\n if self.options.yes:\n return True\n\n result = False\n while True:\n print(question + ' [y/n] ')\n response = sys.stdin.readline()\n if response:\n if response[0].lower() == 'y':\n result = True\n break\n elif response[0].lower() == 'n':\n break\n print('Please type \"y\" for yes or \"n\" for no')\n return result", "def check_or_exit(msg):\n while True:\n user_input = raw_input(\"%s (y/n): \" % msg).lower()\n if user_input in ['y', 'yes']:\n print\n return\n if user_input in ['n', 'no']:\n print\n print_warning(\"Please complete the required steps and then \"\n \"re-run the script.\")\n sys.exit(1)", "def yes_or_no(prompt):\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n\n return response", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def test_prompt_msg_confirm_capital_valid(self):\n with mock.patch('__builtin__.raw_input', return_value=\"Y\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": False\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [yN]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], True)", "def prompt(question):\n print('\\n')\n while True:\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[:1] == 'y':\n return True\n if reply[:1] == 'n':\n return False", "def confirm_exit(self):\n return True", "def runAskYesNoCancelDialog(\n self,\n c: Cmdr,\n title: str,\n message: str=None,\n yesMessage: str=\"&Yes\",\n noMessage: str=\"&No\",\n yesToAllMessage: str=None,\n defaultButton: str=\"Yes\",\n cancelMessage: str=None,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setWindowTitle(title)\n # Creation order determines returned value.\n yes = dialog.addButton(yesMessage, ButtonRole.YesRole)\n no = dialog.addButton(noMessage, ButtonRole.NoRole)\n cancel = dialog.addButton(cancelMessage or 'Cancel', ButtonRole.RejectRole)\n if yesToAllMessage:\n dialog.addButton(yesToAllMessage, ButtonRole.YesRole)\n if defaultButton == \"Yes\":\n dialog.setDefaultButton(yes)\n elif defaultButton == \"No\":\n dialog.setDefaultButton(no)\n else:\n dialog.setDefaultButton(cancel)\n try:\n c.in_qt_dialog = True\n dialog.raise_() # #2246.\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return {\n 0: 'yes', 1: 'no', 2: 'cancel', 3: 'yes-to-all',\n }.get(val, 'cancel')", "def sigint_handler(*args):\n # if QMessageBox.question(None, '', \"Are you sure you want to quit?\",\n # QMessageBox.Yes | QMessageBox.No,\n # QMessageBox.No) == QMessageBox.Yes:\n QApplication.quit()", "def yesButton(self):\n \n self.answer=self.yesMessage.lower()\n self.top.destroy()", "def test_ask_yes_no_1(self, input_mock):\n response = basic.ask_yes_no()\n self.assertTrue(response)", "def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\", \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n\tprompt = \" [y/n/q] \"\n elif default == \"yes\":\n\tprompt = \" [Y/n/q] \"\n elif default == \"no\":\n\tprompt = \" [y/N/q] \"\n elif default == \"quit\":\n\tprompt = \" [y/n/Q] \"\n else:\n\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n\tsys.stdout.write(question + prompt)\n\tchoice = raw_input().lower()\n\tif default is not None and choice == '':\n\t return default\n\telif choice in valid.keys():\n\t return valid[choice]\n\telse:\n\t sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")", "def confirm():\n end_loop = False\n while not end_loop:\n confirmation = input(\"\"\"Would you like to continue with your choice?\n[1] No [2] Yes\nEnter a number please: \"\"\")\n if not confirmation or confirmation.isspace():\n print(\"You have not entered anything!\")\n try_again()\n elif confirmation.isnumeric() == True:\n if 0 < int(confirmation) < 3:\n if int(confirmation) == 1:\n confirmation = False\n return confirmation\n else:\n confirmation = True\n return confirmation\n end_loop = True\n else:\n print(\"You have not entered a valid number. Please enter a number between 1 and 2.\")\n else:\n print(\"Please enter a number only.\")\n try_again()", "def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)", "def query_yes_no_quit(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\",\n \"quit\":\"quit\", \"qui\":\"quit\", \"qu\":\"quit\", \"q\":\"quit\"}\n if default == None:\n prompt = \" [y/n/q] \"\n elif default == \"yes\":\n prompt = \" [Y/n/q] \"\n elif default == \"no\":\n prompt = \" [y/N/q] \"\n elif default == \"quit\":\n prompt = \" [y/n/Q] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes', 'no' or 'quit'.\\n\")", "def prompt_yes_no(question, default):\n again = 'Unknown response.'\n if default.lower() in ('y', 'yes'):\n options = '(Y/n): '\n elif default.lower() in ('n', 'no'):\n options = '(y/N): '\n else:\n raise ValueError('default must be \"y\", \"yes\", \"n\", or \"no\"')\n\n response = input(' '.join((question, options))).lower()\n while response not in ('y', 'yes', 'n', 'no', ''):\n response = input(' '.join((again, question, options))).lower()\n if response == '':\n return default\n return response", "def sigint_handler(*args):\n sys.stderr.write('\\r')\n if QMessageBox.question(None, '', \"Are you sure you want to quit?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No) == QMessageBox.Yes:\n QApplication.quit()\n # qApp.quit()", "def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)", "def test_ask_yes_no_2(self, input_mock):\n response = basic.ask_yes_no()\n self.assertFalse(response)", "def prompt_yes_no(message, color, invert=False):\n questions = [\n inquirer.List(\n \"choice\",\n message=color + Style.BRIGHT + message + Fore.BLUE,\n choices=(\" No\", \" Yes\") if invert else (\" Yes\", \" No\"),\n )\n ]\n\n answers = inquirer.prompt(questions)\n if answers:\n return answers.get(\"choice\").strip().lower() == \"yes\"\n else:\n sys.exit(1)", "def confirm(action, default=None, skip=False):\n MAX_ITERATIONS = 3\n if skip:\n return default\n else:\n defaults = {\n None: ('y','n'),\n True: ('Y','n'),\n False: ('y','N'),\n }\n y, n = defaults[default]\n prompt = u('{action}? ({y}/{n})').format(**locals()).encode('utf-8')\n choice = None\n try:\n if default is None:\n cnt = 1\n while not choice and cnt < MAX_ITERATIONS:\n choice = raw_input(prompt)\n cnt += 1\n else:\n choice = raw_input(prompt)\n except KeyboardInterrupt:\n return None\n if choice in ('yes', 'y', 'Y'):\n return True\n if choice in ('no', 'n', 'N'):\n return False\n if default is not None:\n return default\n return None", "def get_prompt_yes_or_no(msg_input):\n\n msg_output = \"\"\n msg_code = 2\n yes = set(['yes', 'y', 'ye', ''])\n no = set(['no', 'n'])\n\n msg_answer = raw_input(msg_input).lower()\n if msg_answer in yes:\n msg_code = 0\n elif msg_answer in no:\n msg_code = 1\n msg_output = \"Ok, aborting...\"\n else:\n msg_code = 2\n msg_output = \"Please respond with 'yes' or 'no'.\"\n\n return msg_code, msg_output", "def ask(question=WARNING_DIFF):\n\t\t\tfd = sys.stdin.fileno()\n\n\t\t\toldterm = termios.tcgetattr(fd)\n\t\t\tnewattr = termios.tcgetattr(fd)\n\t\t\tnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n\t\t\ttermios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n\t\t\toldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\t\t\tself.stdout.write(question)\n\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfirstCharacter = sys.stdin.read(1)\n\t\t\t\t\t\treturn forceUnicode(firstCharacter) in (u\"y\", u\"Y\")\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\ttermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\t\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)", "def validate_action(self, message=\"This action may delete data from the database. This action cannot be undone.\\nDo you wish to continue? (Y/N): \"):\n \n while True:\n print('\\n\\n')\n inp = input(message)\n \n if (inp.upper() == 'Y'):\n return True\n elif (inp.upper() == 'N'):\n return False\n \n print(\"Invalid input. Try again\")", "def runAskYesNoDialog(self,\n c: Cmdr, title: str, message: str=None, yes_all: bool=False, no_all: bool=False,\n ) -> str:\n if g.unitTesting:\n return None\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n # Creation order determines returned value.\n yes = dialog.addButton('Yes', ButtonRole.YesRole)\n dialog.addButton('No', ButtonRole.NoRole)\n # dialog.addButton('Cancel', ButtonRole.RejectRole)\n if yes_all:\n dialog.addButton('Yes To All', ButtonRole.YesRole)\n if no_all:\n dialog.addButton('No To All', ButtonRole.NoRole)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Warning)\n dialog.setDefaultButton(yes)\n if c:\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n val = dialog.exec() if isQt6 else dialog.exec_()\n finally:\n c.in_qt_dialog = False\n else:\n dialog.raise_()\n val = dialog.exec() if isQt6 else dialog.exec_()\n # val is the same as the creation order.\n # Tested with both Qt6 and Qt5.\n return_d = {0: 'yes', 1: 'no'}\n if yes_all and no_all:\n return_d [2] = 'yes-all'\n return_d [3] = 'no-all'\n elif yes_all:\n return_d [2] = 'yes-all'\n elif no_all:\n return_d [2] = 'no-all'\n return return_d.get(val, 'cancel')", "def ask_to_continue():\n\n bored = raw_input(\"Do you want another suggestion?(yes/no) \").lower()\n\n if bored == 'no':\n print\n print \"Great have fun!\"\n return False\n\n return True", "def yes_no_dialog(self, message):\n reply = QMessageBox.question(self, \"Are you sure?\",\n message, QMessageBox.Yes, QMessageBox.Cancel)\n\n if reply == QMessageBox.Yes:\n return True\n else:\n return False", "def promptyn(msg: str, default: Optional[bool] = None) -> bool:\n\n while True:\n yes = \"Y\" if default else \"y\"\n if default or default is None:\n no = \"n\"\n else:\n no = \"N\"\n confirm = prompt(\"%s [%s/%s]\" % (msg, yes, no), \"\").lower()\n if confirm in (\"y\", \"yes\"):\n return True\n elif confirm in (\"n\", \"no\"):\n return False\n elif not confirm and default is not None:\n return default", "def yesno_cancel(\n question, title=None, bitmap=None, yes=None, no=None, cancel=None, checkbox=None, checked=None\n):\n\n if title is None:\n title = _('Yes or no?')\n if yes is None:\n yes = _(\"Yes\")\n if no is None:\n no = _(\"No\")\n if cancel is None:\n cancel = _(\"Cancel\")\n if checkbox is None:\n checkbox = _(\"Apply to all\")\n\n return msg_dialogs.prompt3msg(question, title, bitmap, yes, no, cancel, checkbox, checked)", "def read_yes_no(prompt):\n ans = input(str(prompt) + ' [Y/n] ').lower()\n if ans in ['', 'y', 'yes']:\n return True\n else:\n return False", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def ask_for_confirmation(df: pd.DataFrame) -> bool:\n print(df.to_string())\n answer = input(r'Start the scan? y/[n]: ')\n return True if answer == 'y' else False", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass", "def test_prompt_msg_noask_confirm_fails(self):\n self.expected['failed'] = True\n self.expected['msg'] = \"Unexpected 'confirm' in non-question prompt.\"\n\n self.assertEquals(\n self.prompt._prompt(self.response, {\n \"say\": \"Hello World\",\n \"confirm\": True\n }),\n self.expected\n )" ]
[ "0.7569344", "0.7401378", "0.72859263", "0.7283516", "0.7266474", "0.719268", "0.71785426", "0.7086956", "0.70464075", "0.7021681", "0.6976385", "0.69672275", "0.69638824", "0.6946273", "0.6888155", "0.6809539", "0.6780994", "0.67785114", "0.66649485", "0.6664532", "0.6661782", "0.66520536", "0.6636097", "0.6519618", "0.65183264", "0.6500076", "0.6486283", "0.6461148", "0.6452029", "0.64101785", "0.64016825", "0.6399888", "0.63829315", "0.6380877", "0.6380849", "0.63682413", "0.63681436", "0.63663596", "0.6321782", "0.63100606", "0.63066137", "0.6258747", "0.62529725", "0.62440544", "0.6230077", "0.6222206", "0.6209998", "0.6203887", "0.6192816", "0.61876935", "0.6186979", "0.6180998", "0.61717516", "0.6167907", "0.61481506", "0.61295927", "0.6126983", "0.6123972", "0.6118938", "0.61127865", "0.6080667", "0.60631657", "0.6059547", "0.6052171", "0.6050829", "0.6043869", "0.6030351", "0.60180247", "0.60165346", "0.59993976", "0.5992133", "0.5961614", "0.59435505", "0.59435266", "0.59369326", "0.59274447", "0.5917486", "0.59095037", "0.5904631", "0.58935803", "0.5885693", "0.5885668", "0.5875868", "0.58746785", "0.58737296", "0.5869515", "0.58633995", "0.58570737", "0.58470845", "0.58390445", "0.5837847", "0.58255285", "0.58116394", "0.58098626", "0.5805425", "0.5796985", "0.579058", "0.5779257", "0.5773276", "0.5766068" ]
0.62040895
47
Read a string from standard input, but prompt to standard error. The trailing newline is stripped.
def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n' raise EOFError elif line[-1] == '\n': return line[:-1] return line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def safe_input(prompt=\"\"):\n\n\ttry:\n\t\tresult = input(prompt)\n\t\treturn result\n\texcept KeyboardInterrupt:\n\t\tsys.exit()\n\texcept:\n\t\treturn \"\"", "def get_input(prompt):\n try:\n try:\n return raw_input(prompt)\n except NameError:\n return input(prompt)\n except EOFError:\n return ''", "def _input(str=''):\n print(str, end='', flush=True)\n return stdin.readline().rstrip('\\n')", "def input_or_error(stream=sys.stdin):\n line = readline_strip(stream)\n if not line: raise EOFError(\"End of input\")\n return line", "def prompt_str_input(prompt_name: str, get_user_input: GetInputFunc) -> str:\n try:\n return str(get_user_input(f\"type in {prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))", "def safe_input():\n try:\n input(\"Please enter something: \")\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()", "def clean_input(prompt):\n try:\n return input(prompt)\n # There is a general handling of KeyboardInterrupt in main() but\n # here it leads to a cleaner exit as the option to quit is returned.\n except KeyboardInterrupt:\n return 'Quit'", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def rawInput(string):\n if os.name == \"posix\":\n tcflush(sys.stdin, TCIFLUSH)\n return input(string)", "def get_input(prompt):\n # type: (str) -> str\n return raw_input(prompt)", "def rlinput(prompt, prefill=''):\n if \"readline\" not in sys.modules:\n # For example on Windows\n return input(prompt)\n else:\n readline.set_startup_hook(lambda: readline.insert_text(prefill))\n try:\n return input(prompt)\n finally:\n readline.set_startup_hook()", "def safe_input(display_string):\n\n try:\n x = raw_input(display_string)\n except NameError:\n x = input(display_string)\n\n return x", "def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer", "def pseudo_raw_input(self, prompt):\n\n if self.use_rawinput:\n try:\n line = sm.input(prompt)\n except EOFError:\n line = 'EOF'\n else:\n self.stdout.write(prompt)\n self.stdout.flush()\n line = self.stdin.readline()\n if not len(line):\n line = 'EOF'\n else:\n if line[-1] == '\\n': # this was always true in Cmd\n line = line[:-1]\n return line", "def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)", "def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'", "def prompt(prompt, validator=(lambda x: True), hint=None):\n user_input = input(prompt)\n while not validator(user_input):\n user_input = input(prompt)\n return user_input", "def input_with_timeout(prompt: Optional[str] = None, timeout: float = 36000.0) -> str:\n # use of sys.stdin and sys.stdout to mimic the builtin input based on\n # https://github.com/python/cpython/blob/baf7bb30a02aabde260143136bdf5b3738a1d409/Lib/getpass.py#L129\n if prompt:\n sys.stdout.write(prompt)\n sys.stdout.flush()\n\n line = misc.readline_with_timeout(timeout, prompt)\n\n if not line:\n raise EOFError\n return line.rstrip('\\n')", "def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def _get_input(question: str) -> str:\n print(question)\n sys.stdout.flush()\n user_input = sys.stdin.readline()\n user_input = user_input.strip()\n return user_input", "def input_(self) -> str:\n\n # Try to input through the prefered medium, but revert to\n # backup if need to and log any errors found, for example:\n # logging.error(\"Problem!\")\n\n return IO.stdin()", "def test_prompt_setInput_stringio_valid(self):\n instr = StringIO.StringIO()\n self.prompt.setInput(instr)\n\n self.assertEquals(instr, self.prompt._instr)\n self.assertEquals(instr.getvalue(), \"\")\n\n with mock.patch('__builtin__.raw_input', return_value='mocked input') as mockinput:\n result = self.prompt._prompt({}, {\n 'say': 'test',\n 'ask': 'varname'\n })\n\n self.assertEquals(result['ansible_facts']['varname'], 'mocked input')", "def prompt_for_input(prepend_prompt=''):\n if not prepend_prompt == '':\n prepend_prompt += ' '\n return raw_input(prepend_prompt + '> ').strip()", "def getstring(message = \"Enter a value: \"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\treturn raw_input(message)", "def get_string_input():\n string_input = input('Please enter string: ')\n return string_input", "def _handle_stdin(self, line):\r\n return input(line.replace(STDIN_PROMPT, \"\"))", "def prompt(msg, default=NO_DEFAULT, validate=None):\n while True:\n response = input(msg + \" \").strip()\n if not response:\n if default is NO_DEFAULT:\n continue\n return default\n if validate is None or validate(response):\n return response", "def read_user_input(self):\n\n self.commandline = raw_input(\"Enter the string you want to parse\\n\")", "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def pseudo_raw_input(self, prompt):\n\n # Deal with the vagaries of readline and ANSI escape codes\n safe_prompt = self._surround_ansi_escapes(prompt)\n\n if self.use_rawinput:\n try:\n if sys.stdin.isatty():\n line = sm.input(safe_prompt)\n else:\n line = sm.input()\n if self.echo:\n sys.stdout.write('{}{}\\n'.format(safe_prompt, line))\n except EOFError:\n line = 'eof'\n else:\n if self.stdin.isatty():\n # on a tty, print the prompt first, then read the line\n self.poutput(safe_prompt, end='')\n self.stdout.flush()\n line = self.stdin.readline()\n if len(line) == 0:\n line = 'eof'\n else:\n # we are reading from a pipe, read the line to see if there is\n # anything there, if so, then decide whether to print the\n # prompt or not\n line = self.stdin.readline()\n if len(line):\n # we read something, output the prompt and the something\n if self.echo:\n self.poutput('{}{}'.format(safe_prompt, line))\n else:\n line = 'eof'\n return line.strip()", "def input(prompt=\"Input\"):\n \n __PyInputHelper__.userInput = None\n \n __PyInputHelper__.showAlertWithPrompt(prompt)\n \n while (__PyInputHelper__.userInput == None):\n \n if (threading.currentThread() in ignoredThreads):\n return \"\"\n \n continue\n \n userInput = __PyInputHelper__.userInput\n __PyInputHelper__.userInput = None\n return str(userInput)", "def prompt_string(prompt=\"Enter a value\",\n default=None):\n _new = None\n while True:\n try:\n _new = str(input(f\"{prompt}? [{str(default)}]: \")) # nosec\n break\n except ValueError:\n print(\"Sorry, I didn't understand that.\")\n continue\n except KeyboardInterrupt:\n break\n return default if _new in [None, ''] else _new", "def input_wrapper(msg):\n userinput = input(msg)\n if userinput != 'q':\n return userinput\n else:\n sys.exit()", "def cont():\n\n try:\n input = raw_input()\n except Exception:\n pass", "def get_input_string():\n return input(\"Enter input string: \")", "def clean_input(prompt='Error'): # A special input function that will reject a\r\n # user's input of text when a number is requested -- if no prompt is\r\n # specified in the program, it will display \"Error\"\r\n text = True\r\n phrase = '0'\r\n while text:\r\n phrase = input(prompt + '\\n')\r\n try: # Adapted from an example in the ThinkPython textbook (15.7) -\r\n # Checks whether the input is a number, positive or negative. If\r\n # not, rejects the input and user gets to try again\r\n float(phrase)\r\n text = False\r\n except ValueError:\r\n print(\"Error: Non-Numeric Entry Detected\")\r\n # if phrase.isnumeric(): # Checks for a positive number (negative\r\n # rejected as well as text) - replaced with superior form from textbook\r\n # example\r\n # return float(phrase) # Return the number the user entered\r\n # else:\r\n # print(\"Error: Non-Numeric Entry Detected\")\r\n return float(phrase) # Return the number the user entered\r", "def _input(msg):\n if sys.version_info.major >= 3:\n ans = input(msg)\n elif sys.version_info.major == 2:\n ans = raw_input(msg)\n else:\n raise Exception(\"Unsupported python version. Please upgrade to python 2 or higher.\")\n\n return ans", "def getInput(prompt):\n if platform.python_version().startswith('3'):\n userInput = input('%s ' % prompt).strip()\n if platform.python_version().startswith('2'):\n userInput = raw_input('%s ' % prompt).strip()\n return userInput", "def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()", "def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()", "def _get_string():\n result = sys.stdin.readline().rstrip('\\n')\n return result", "def text_input():\n return input(\">>>\")", "def read_input(args, parser):\n if args.text == sys.stdin:\n # check if stdin is empty\n stdin_ready, _, _ = select.select([sys.stdin], [], [], 0)\n if stdin_ready:\n return sys.stdin.read().strip()\n\n parser.print_help()\n sys.exit(1)\n\n return args.text", "def read_stdin():\n return \"\".join(sys.stdin.readlines()).strip()", "def get_manual_test_string():\n test_string = \"\"\n while test_string == \"\":\n test_string = input(\"String to test (type 'q' to exit): \")\n test_string = test_string.strip()\n\n if test_string == \"\":\n print (\"Error: You must provide some input for the system to reply.\")\n return test_string", "def input(self, prompt):\r\n return console_input(prompt)", "def get_input(message:str) -> str:\n response = \"\"\n try:\n response = input(message)\n except:\n pass \n return response", "def __alt_prompt(self, prompt_text: str):\r\n if self.__use_windows_prompt:\r\n sys.stdout.write(prompt_text)\r\n sys.stdout.flush()\r\n i = sys.stdin.readline()\r\n return i.strip()\r\n return input(prompt_text)", "def read_input():\n\n read = sys.stdin.readlines()\n\n text = ''\n for line in read:\n text += line\n\n return text", "def safe_input(response):\n try:\n return input(response)\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def get_user_input(self):\n return stdin.readline().strip()", "def verify_prompt(prompt: str, expected: str = \"y\"):\n input_ = input(prompt)\n if input_ != expected:\n raise ValidationError(f\"Expected {expected}, got {input_}\")", "def text_input(self, prompt, default=None):\n try:\n user_input = self(prompt)\n if default is not None and user_input == \"\":\n return default\n except InputDisabled:\n if default is not None:\n return default\n raise\n\n return user_input", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def multiline_input(prompt):\n\n lines=[]\n print(prompt+\" (input will end after entering a blank line) : >\")\n while True:\n line=input()\n if not line.strip():\n break\n else:\n lines.append(line)\n\n return \"\\n\".join(lines)", "def prompt_base(prompt):\n return input(prompt + \": \")", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def test_prompt_ask_say_missing_valid(self):\n with mock.patch('__builtin__.raw_input', return_value='mocked input') as mockinput:\n result = self.prompt._prompt({}, {\n 'ask': 'varname'\n })\n\n self.assertEquals(result['ansible_facts']['varname'], 'mocked input')", "def rawInputWithCheck(prompt):\n proceed = False\n i = None\n while not(proceed):\n i = raw_input(prompt)\n print \"Is this correct?\"\n print ' '*3, repr(i)\n proceed = YNInput(' '*2)\n return i", "def inask(question: str) -> str:\n answer = input(question)\n return answer", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def ask(message, ofile=sys.stderr, ifile=sys.stdin, style=Fore.MAGENTA,\r\n noecho=False, accept_empty=True):\r\n if noecho and ifile != sys.stdin:\r\n raise ValueError(\"noecho option implies input from stdin\")\r\n\r\n while True:\r\n with ScopedColoredStream(ofile, style, flush_on_exit=True) as stream:\r\n stream.write(message)\r\n\r\n if noecho:\r\n ans = getpass.getpass(prompt=\"\", stream=ofile)\r\n else:\r\n ans = ifile.readline().rstrip(\"\\n\\r\")\r\n\r\n if not accept_empty and not ans.strip():\r\n continue\r\n return ans", "def input_timeout(prompt: str, t_timeout: [float, int] = 30, default: str = None) -> str:\n print(prompt, end=\" \")\n rlist, _, _ = select.select([sys.stdin], [], [], t_timeout)\n\n if not rlist:\n if default is None:\n raise RuntimeError(f\"No input received within {t_timeout}s!\")\n else:\n return default\n\n return sys.stdin.readline().strip()", "def s_input(prompt : str = \">\", accepted_inputs : list = [\"break\"], case_sensitive : bool = False, fail_message : str = \"\") -> str:\r\n\r\n user_input = \"\"\r\n first = True #For checking if the fail message should print or not\r\n while user_input not in accepted_inputs:\r\n if fail_message != \"\" and not first:\r\n print(fail_message) #Prints the assigned fail message if it isn't the first iteration\r\n user_input = input(prompt) #Gets user input\r\n if not case_sensitive:\r\n user_input = user_input.lower() #Sets the input to lower if needed\r\n first = False #Ensures that it is not the first iteration anymore\r\n return user_input", "def userInput(self, message):\n data = raw_input(\"%s\" % message)\n return str(data)", "def get_input(*args, **kw):\n if sys.version[0] == \"2\":\n return raw_input(*args, **kw)\n else:\n return input(*args, **kw)", "def textinput(self, title, prompt):\n return simpledialog.askstring(title, prompt)", "def _password_prompt(question: str, console: io.IO) -> str:\n console.tell(question)\n while True:\n password1 = console.getpass('Password: ')\n try:\n _password_validate(password1)\n except ValueError as e:\n console.error(e)\n continue\n password2 = console.getpass('Password (again): ')\n if password1 != password2:\n console.error('Passwords do not match, please try again')\n continue\n return password1", "def _get_string(self):\n result = sys.stdin.readline().rstrip('\\n')\n return result", "def _get_input(prompt, options, allow_new=False, reprompt_options=None):\n\n _lwr_opts = [x.lower() for x in options]\n if reprompt_options is None:\n reprompt_options = options\n\n while True:\n _resp = input(prompt).strip()\n\n # Check that input is one of the options\n try:\n i = _lwr_opts.index(_resp.lower())\n return options[i]\n except ValueError:\n if not allow_new:\n print(f'Response must be one of the following: {\", \".join(reprompt_options)}')\n\n if allow_new and _resp: # If have a non-empty string\n return _resp", "def ask_input(self, prompt):\n self._vim.command('call inputsave()')\n self._vim.command('let user_input = input(\"{} \")'.format(prompt))\n self._vim.command('call inputrestore()')\n response = self._vim.eval('user_input')\n self._vim.command('unlet user_input')\n return response", "def QueryStr(cls, varName: str) -> str:\n\n global userInput\n\n try:\n userInput = input(\"{}: \".format(varName.capitalize()))\n\n # Raises a ValueError if userInput CAN be recast as integer.\n if userInput.isdigit():\n raise ValueError\n\n except ValueError:\n # Reprompt user for valid entry.\n print(\"\\nPlease enter a valid {}.\".format(varName))\n cls.QueryStr(varName)\n\n except Exception:\n print(\"\\nOops something is buggy\")\n\n return userInput", "def prompt(self):\n return input(self.message + \": \").strip()", "def get_input(user_input):\n return input(user_input)", "def input_is(string,msg):\n user_input = raw_input(msg)\n sys.stdout.write('\\n')\n return user_input == string", "def input_str() -> str:\n\t\tinput_string = str(input('Enter your string: '))\n\t\treturn input_string", "def input_str() -> str:\n\tinput_string = str(input('Enter your string: '))\n\treturn input_string", "def input_with_default(prompt, default):\n response = raw_input(\"%s (Default %s) \"%(prompt, default))\n if not response:\n return default\n return response", "def get_user_string(message):\n while True:\n user_input = input('{}: '.format(message))\n # This is a bad way to check if the user input is not empty.\n # It will be True if the user enters spaces, tabs, etc.\n if user_input:\n return user_input\n else:\n print('You must enter something.')", "def raw_next_line() -> str:\n return input()", "def interact(prompt, rules, default_responses, aloud=False):\n # Read a line, process it, and print the results until no input remains.\n while True:\n try:\n # Remove the punctuation from the input and convert to upper-case\n # to simplify matching.\n _input = input(prompt)\n _input = remove_punct(_input.upper())\n if not _input:\n continue\n except:\n break\n \n #Stopping condition\n if _input.startswith('GOODBYE'):\n output_response(\"Oh, okay. It was nice talking to you. Goodbye.\", aloud)\n break\n output_response(respond(rules, _input, default_responses), aloud)", "def input(cls, prompt=''):\n text_in = cls.input_reference(prompt)\n corrected_text = cls.auto_correct(text_in)\n return corrected_text", "def __init__(self):\n self.the_input = raw_input().strip().replace(' ', '')\n if self.the_input == '':\n print ('No input detected')\n exit(1)", "def prompt(self):\n # TODO: fix null input\n print('Enter user input: ')\n userinput = input()\n print(f'User chose: {userinput}')\n return userinput", "def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query", "def test_prompt_msg_trim_off_valid(self):\n with mock.patch('__builtin__.raw_input', return_value=\" trim value \") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"First Name\",\n \"ask\": \"first_name\",\n \"trim\": False\n })\n\n self.assertEquals(result['ansible_facts']['first_name'], ' trim value ')", "def validate_stdin(context, param, value):\n # check if input is a file or stdin\n if value.name == '<stdin>':\n # raise error if stdin is empty\n if sys.stdin.isatty():\n raise click.BadParameter('you need to pipe something to stdin')\n\n return value", "def prompt_with_options(prompt, default=None, options=None):\n\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n if options and value not in options:\n value = None\n elif default is not None:\n value = default\n\n return value", "def test_strip_prompt():\n string = \"\"\"MyRouter version 1.25.9\nmyhostname>\"\"\"\n connection = FakeBaseConnection(RESPONSE_RETURN=\"\\n\", base_prompt=\"myhostname>\")\n result = connection.strip_prompt(string)\n assert result == \"MyRouter version 1.25.9\"", "def noinput():\n env.prompt = False", "def input(msg: str):\n ret = input(msg)\n return ret", "def read(self):\n return raw_input('> ')", "def user_prompt(prompt, default=None):\n prompt = f\"\\n {prompt} [{default}] runs or type an amount: \"\n response = input(prompt)\n if not response and default:\n return default\n else:\n return response", "def AskString(prompt, default = \"\", id=261, ok=None, cancel=None):\n\n raise NotImplementedError(\"AskString\")", "def clean(self):\n self._prompt.read()", "def get_user_input(prompt):\n while True:\n user_input = input(prompt)\n try:\n tmp = int(user_input)\n return tmp\n except ValueError:\n print('Not a number')", "def clear_in():\r\n c = sys.stdin.read(1)\r\n while(c!='\\n'):\r\n c = sys.stdin.read(1)" ]
[ "0.7439654", "0.723748", "0.70752895", "0.6913516", "0.68963534", "0.6866671", "0.68438375", "0.6834515", "0.67756915", "0.6763464", "0.6721034", "0.6602405", "0.65703905", "0.65552646", "0.65077746", "0.64992315", "0.64948124", "0.64894426", "0.64843607", "0.64575666", "0.64274603", "0.64274603", "0.637787", "0.6321883", "0.63008523", "0.62755066", "0.62584275", "0.6256506", "0.6252478", "0.6226008", "0.62244815", "0.62242645", "0.6194994", "0.6193931", "0.6165745", "0.61599666", "0.6158351", "0.61570907", "0.61389315", "0.61235964", "0.61171836", "0.610471", "0.610471", "0.60929424", "0.6076909", "0.60593176", "0.60222584", "0.60214996", "0.59880805", "0.59755903", "0.594717", "0.5941537", "0.5925745", "0.5892634", "0.58836186", "0.58804846", "0.5847604", "0.5845857", "0.58403623", "0.5839431", "0.5824776", "0.5808183", "0.5803081", "0.57365876", "0.5728411", "0.5725833", "0.5718631", "0.5709192", "0.57079965", "0.5687192", "0.56758225", "0.5674612", "0.56577265", "0.56420946", "0.563817", "0.5610801", "0.56071836", "0.55811644", "0.55803716", "0.5575318", "0.5568745", "0.55676377", "0.554906", "0.5541227", "0.5529277", "0.55210745", "0.55196196", "0.55190736", "0.5491859", "0.54638183", "0.5453246", "0.5452457", "0.54521656", "0.545178", "0.5450831", "0.5446928", "0.5437915", "0.5424032", "0.5421008", "0.53935933" ]
0.76236886
0
Prompts a user for input. If the user aborts the input by sending an interrupt signal, this
def choice( options: Union[List[str], Mapping[str, str]], text: str = '', default: Optional[str] = None, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, start_index: int = 0 ) -> Union[str, int]: # TODO: completer for numbers? type_: click.ParamType if isinstance(options, Mapping): # (Y/I/N/O/D/Z) [default=N] text = f"{text} ({'/'.join(options.keys())})" type_ = click.STRING for choice, descripton in options.items(): click.echo(f" {choice} : {descripton}") else: type_ = click.IntRange(start_index, len(options) + 1 - start_index) for idx, descripton in enumerate(options): idx += start_index click.echo(f" [{idx}] {descripton}") if default is not None and show_default: text += f" [default={default}]" while True: selection = prompt( text=text, default=default, type=type_, prompt_suffix=prompt_suffix, show_default=False, err=err, ) if isinstance(options, Mapping): selection = selection.strip().upper() if selection not in options: click.echo(f"Please enter a valid option.") else: return selection else: return selection - start_index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def cont():\n\n try:\n input = raw_input()\n except Exception:\n pass", "def clean_input(prompt):\n try:\n return input(prompt)\n # There is a general handling of KeyboardInterrupt in main() but\n # here it leads to a cleaner exit as the option to quit is returned.\n except KeyboardInterrupt:\n return 'Quit'", "def safe_input():\n try:\n input(\"Please enter something: \")\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def _WaitForAnyUserInput() -> None:\n _get_input('Press any key to continue')", "def prompt_for_exit():\n g.message = c.r + \"Press ctrl-c again to exit\" + c.w\n g.content = generate_songlist_display()\n screen_update()\n\n try:\n userinput = input(c.r + \" > \" + c.w)\n\n except (KeyboardInterrupt, EOFError):\n quits(showlogo=False)\n\n return userinput", "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def ask_input(self, prompt):\n self._vim.command('call inputsave()')\n self._vim.command('let user_input = input(\"{} \")'.format(prompt))\n self._vim.command('call inputrestore()')\n response = self._vim.eval('user_input')\n self._vim.command('unlet user_input')\n return response", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def input_for_user_selection(self):\n user_input = \"\"\n while user_input not in range(0, len(self.users)):\n print(\"Pick user from above, or type 'cancel'\")\n user_input = input()\n if user_input == \"cancel\":\n raise ValueError\n user_input = int(user_input)\n return user_input", "def checkInterrupt():\n if wasInterrupted():\n raise KeyboardInterrupt()", "def wait_for_input(self):\n if self._dont_enter_interactive_mode:\n return\n stop = False\n while True:\n print(\">>> \", end='')\n try:\n command_str = input()\n except EOFError:\n print(\"Exiting interactive mode\")\n break\n stop = self.interpret_command(command_str)\n if stop:\n print(\"Exiting interactive mode\")\n break", "def input_wrapper(msg):\n userinput = input(msg)\n if userinput != 'q':\n return userinput\n else:\n sys.exit()", "def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)", "def wait() -> None:\n\n process_input(input())", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def wait_for_input(self):\n pass", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)", "def noinput():\n env.prompt = False", "def handle_input():\n\n # wait for user input and get timeout or character to process\n char = read_input()\n\n # handle user input\n if not is_input_valid(char):\n # No valid input, keep waiting for input\n return True\n\n # if terminal size is not valid, stop here\n if not nuqql.config.WinConfig.is_terminal_valid():\n show_terminal_warning()\n return True\n\n # if terminal resized, resize and redraw active windows\n if char == curses.KEY_RESIZE:\n nuqql.conversation.resize_main_window()\n return True\n\n # pass user input to active conversation\n for conv in nuqql.conversation.CONVERSATIONS:\n if conv.is_active():\n conv.process_input(char)\n return True\n\n # if no conversation is active pass input to active list window\n if nuqql.win.MAIN_WINS[\"list\"].state.active:\n # list window navigation\n nuqql.win.MAIN_WINS[\"input\"].redraw()\n nuqql.win.MAIN_WINS[\"log\"].redraw()\n nuqql.win.MAIN_WINS[\"list\"].process_input(char)\n return True\n\n # list window is also inactive -> user quit\n return False", "def handle_input(sock):\n\tprint(\"Type message, enter to send. 'q' to quit\")\n\twhile True:\n\t\tmsg = input() #Blocks\n\t\tif msg == 'q':\n\t\t\tprint('Shut Down Client')\n\t\t\tsock.shutdown(socket.SHUT_RDWR)\n\t\t\tsock.close()\n\t\t\tbreak\n\t\ttry:\n\t\t\ttincanchat.send_msg(sock,msg) #Blocks until sent\n\t\texcept(BrokenPipeError,ConnectionError):\n\t\t\tbreak", "def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()", "def wait_for_enter(field_data=\"\"):\n try:\n input(f\"{field_data}\\n\" f\"Press the 'ENTER' key to continue\")\n except KeyboardInterrupt:\n pass", "def safe_input(prompt=\"\"):\n\n\ttry:\n\t\tresult = input(prompt)\n\t\treturn result\n\texcept KeyboardInterrupt:\n\t\tsys.exit()\n\texcept:\n\t\treturn \"\"", "def waitenterpressed(message = \"Press ENTER to continue...\"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\traw_input(message)\n\treturn 0", "def s_input(prompt : str = \">\", accepted_inputs : list = [\"break\"], case_sensitive : bool = False, fail_message : str = \"\") -> str:\r\n\r\n user_input = \"\"\r\n first = True #For checking if the fail message should print or not\r\n while user_input not in accepted_inputs:\r\n if fail_message != \"\" and not first:\r\n print(fail_message) #Prints the assigned fail message if it isn't the first iteration\r\n user_input = input(prompt) #Gets user input\r\n if not case_sensitive:\r\n user_input = user_input.lower() #Sets the input to lower if needed\r\n first = False #Ensures that it is not the first iteration anymore\r\n return user_input", "def keep_going(text=\"Do you wish to continue? Answer Y or N.\"):\n answer = input(text)\n\n if answer == 'Y':\n print(\"The script is now running....\")\n else:\n print(\"You have chosen to quit this program\")\n raise SystemExit", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def handle_input(self, key):\n if key == 'Q' or key == 'q':\n if(self.proc is not None):\n self.proc.send_signal(signal.SIGINT)\n\n raise urwid.ExitMainLoop()\n if key == 'R' or key == 'r':\n self.model.running = True\n self.run()\n if key == 'P' or key == 'p':\n self.togglePause()", "def Wait(p_question: str):\n input(p_question)\n return", "def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query", "def __exit_condition(data_logger):\n try:\n while True:\n raw_input(\"\") # FIXME: is raw_input the right approach\n if CLOSE:\n raise KeyboardInterrupt()\n\n except (KeyboardInterrupt, EOFError):\n sys.stdin.close()\n data_logger.stop()", "def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer", "def stop(self):\n command = input(\"Enter anything to finish (or 'exit' to cancel)>>>\")\n return command != 'exit'", "def input(prompt=\"Input\"):\n \n __PyInputHelper__.userInput = None\n \n __PyInputHelper__.showAlertWithPrompt(prompt)\n \n while (__PyInputHelper__.userInput == None):\n \n if (threading.currentThread() in ignoredThreads):\n return \"\"\n \n continue\n \n userInput = __PyInputHelper__.userInput\n __PyInputHelper__.userInput = None\n return str(userInput)", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def haltExec(self, input_pin=0):\n if not 0 <= input_pin < 2:\n raise(ValueError('`input_pin` [{0}] must be between 0 and 2'\n ''.format(input_sig)))\n cmd_string = 'H{0}'.format(input_sig)\n return self.sendRcv(cmd_string)", "def stdin_thread(self):\n while True:\n if not self.is_running():\n time.sleep(0.1)\n continue\n msg = self._stdin_queue.get()\n if msg is None:\n break # Ask to stop\n self._say(msg)", "def quit():\n while True:\n try:\n choice = input('press q to quit \\n r to restart')\n choice = choice.lower() # sanitize inputs before comparision\n\n except TypeError:\n print('Please enter q to quit or r to restart')\n if choice not in ('q', 'r'):\n continue\n else:\n break\n if choice == 'q':\n return True\n elif choice == 'r':\n return False", "def input(self):\n try:\n temp = ord(raw_input())\n self.tape.replace(temp)\n except:\n print \"Error -002\"\n raise", "def get_input():\n return getch()", "def receive_interrupt_request(self, _: EmptyMsg):\n self.renderer.interrupt()", "def signal_handler(signal, frame): \n import signal\n import sys\n from time import localtime, strftime\n time = strftime(\"%H:%M:%S\", localtime())\n sel = raw_input('\\n\\n%s: Paused. Press return to resume, or type exit to quit: \\n' % time)\n if sel.startswith('e') or sel.startswith('E'):\n sys.exit(0)\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation resumed.\\n' % time", "def inask(question: str) -> str:\n answer = input(question)\n return answer", "def input(self, prompt):\r\n return console_input(prompt)", "def on_KeyboardInterrupt(player):\n print(\"paused by KeyboardInterrupt\")\n player.edit()", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def switch_input(cls):\n try:\n assert globals()[\"input\"]\n cls.restore_input()\n except KeyError:\n cls.override_input()", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)", "def HandleInput(self, input):\r\n if input.IsKeyDown(K_UP, once=True):\r\n self.selected_option -= 1\r\n if self.selected_option < 0:\r\n self.selected_option = len(self.options) - 1\r\n if input.IsKeyDown(K_DOWN, once=True):\r\n self.selected_option += 1\r\n if self.selected_option >= len(self.options):\r\n self.selected_option = 0\r\n \r\n # Selection\r\n if input.IsKeyDown(K_RETURN, once=True):\r\n self.SelectOption()\r\n \r\n # If they hit ESC, leave bartering\r\n if input.IsKeyDown(K_ESCAPE, once=True):\r\n Log('Quick removed barter from dialogue.')\r\n self.dialogue.SelectLastConversation()", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def prompt_user():\n print()\n while True:\n print('Please choose one of the following options:')\n print(\"1: Send a Thank You\")\n print(\"2: Create a report\")\n print(\"3: Send letters to everyone\")\n print(\"4: Match donations\")\n print(\"5: Quit\")\n try:\n return int(input(\"Option: \"))\n except ValueError as e:\n print(\"***INVALID Option Selected***\")", "def getInput(self):\n\t\tkeyPressed = self.screen.getch()\n\t\tif keyPressed == 113:\t\t# <escape>\n\t\t\tself.terminate()\n\t\t\tself.running = False\n\t\treturn keyPressed \t\t# return key for (possible) further action in calling program", "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def signal_handler(signal, _): \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)", "def instruction_in(self, register):\n if len(self.input_buffer) == 0:\n user_input = raw_input() + '\\n'\n self.input_buffer = deque(user_input)\n\n char = self.input_buffer.popleft()\n value = ord(char)\n\n self.set_register(register, value)", "def interact(self):\n print('Ready to interact on socket connected with {}.'.format(self.remote_addr))\n try:\n # get initial input from user\n print('Enter input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n while True:\n if data.startswith('exit'):\n print('[*] Closing remote shell.')\n self.close()\n break\n # wait for response from target host\n recv_len = 1\n response = ''\n while recv_len:\n data = self.remote_socket.recv(4096)\n recv_len = len(data)\n response += data.decode()\n if recv_len < 4096:\n break\n print(response)\n # get further input from user\n print('Enter further input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n except Exception as e:\n print(e)\n print('[*] Closing remote shell.')\n self.close()", "def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()", "def interrupt(self):\r\n self.interrupting = True", "def input(self, message=''):\r\n from javax.swing import JOptionPane\r\n return JOptionPane.showInputDialog(frame, message)", "def ask(question=WARNING_DIFF):\n\t\t\tfd = sys.stdin.fileno()\n\n\t\t\toldterm = termios.tcgetattr(fd)\n\t\t\tnewattr = termios.tcgetattr(fd)\n\t\t\tnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n\t\t\ttermios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n\t\t\toldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\t\t\tself.stdout.write(question)\n\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfirstCharacter = sys.stdin.read(1)\n\t\t\t\t\t\treturn forceUnicode(firstCharacter) in (u\"y\", u\"Y\")\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\ttermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\t\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)", "def loop(self):\n\t\twhile (self.quit == 0):\n\t\t\ttry:\n\t\t\t\tuserInput = raw_input(\"> \")\n\t\t\t\tself.processCommand(userInput)\n\t\t\texcept EOFError:\n\t\t\t\tsys.exit()\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.exit()", "def ask_for_interface():\n return input(\"Interface name : \")", "def _interrupt(self, signum: int, frame: Optional[Any]) -> None:\n if self._in_task(frame):\n raise KeyboardInterrupt\n else:\n self._interrupted = True\n self._ready_tasks.interrupt()", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(1) # no match", "def enter():\n input(\"\\nClick Enter to continue \")", "def confirm_prompt(prompt):\n while True:\n print(prompt, end=' [Y/n]: ')\n\n if not os.isatty(sys.stdout.fileno()):\n print(\"Not running interactively. Assuming 'N'.\")\n return False\n pass\n\n r = input().strip().lower()\n if r in ['y', 'yes', '']:\n return True\n elif r in ['n', 'no']:\n return False", "def prompt(self):\n # TODO: fix null input\n print('Enter user input: ')\n userinput = input()\n print(f'User chose: {userinput}')\n return userinput", "def check_or_exit(msg):\n while True:\n user_input = raw_input(\"%s (y/n): \" % msg).lower()\n if user_input in ['y', 'yes']:\n print\n return\n if user_input in ['n', 'no']:\n print\n print_warning(\"Please complete the required steps and then \"\n \"re-run the script.\")\n sys.exit(1)", "def safe_input(response):\n try:\n return input(response)\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def pause():\n global pause_continue\n if pause_continue:\n return\n line = input(\n \"Paused. 'q' to quit, 'c' to continue without pausing, anything else to step.\"\n )\n if line:\n if line[0] == \"q\":\n exit(0)\n if line[0] == \"c\":\n pause_continue = True", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def prompt_user(prompt: str) -> bool:\n positive_response = {'yes', 'y', 'ye', '', 'ok'}\n negative_response = {'no', 'n'}\n\n while True:\n answer = input(prompt).lower()\n if answer in positive_response:\n return True\n elif answer in negative_response:\n return False\n else:\n print(\"Please respond with 'yes' or 'no'\\n\", file=sys.stderr)", "def control(message, the_function):\n while True:\n user_choice = input(message)\n if user_choice in ('Y', 'y'):\n the_function()\n elif user_choice in ('N', 'n'):\n print(\"exiting program.....\")\n print(\"Have a nice day!\")\n break\n else:\n print(\"Not a valid option, try again\")", "def _handle_interrupts(signal_number, current_stack_frame):\n print(\" Interrupted!\\n\", file=sys.stderr)\n _display_help()\n sys.exit(0)", "def alert_on_error(error_msg: str, prompt_user: bool):\n print(error_msg)\n g.alerts.send(error_msg)\n if prompt_user:\n while True:\n user_response = input(\"Continue execution? [Y/N]\\n\").upper()\n if user_response == \"Y\":\n break\n elif user_response == \"N\":\n raise KeyboardInterrupt\n else:\n print(\"Please type in 'Y' or 'N' as a response.\")", "def get_user_input(self):\n while not self.suspended:\n input = raw_input()\n input = input.split('|')\n if input[0] in ['exit', 'quit', 'kill']:\n self.broadcast('kill')\n self.suspended = True\n for client in self.clients.values():\n client.socket.close()\n self.s.close() # Have to connect to socket to exit server.\n sock = socket(AF_INET, SOCK_STREAM)\n port = bind_to_random(sock)\n sock.connect((str(self.ip), self.port))\n elif len(input) > 1:\n msg = '|'.join(['#server']+input[1:])\n if input[0][:1] == '@':\n destination = input[0][1:].lower()\n if destination == 'server':\n print msg\n elif destination == 'all':\n self.broadcast(msg)\n else:\n client = self.clients.get(destination, None)\n if client:\n client_send(client.socket, msg)\n else:\n print 'Destination not active'\n else:\n print msg", "def waitenter(times=1):\n\n # For each time\n for _ in range(times):\n # Ask for user input\n input(\"\")", "def _confirm(message):\n result = ''\n while result not in ('y', 'n'):\n try:\n result = raw_input('%s Continue (y/n)? ' % message)\n except EOFError:\n result = 'n'\n return result == 'y'", "def deny():\n raise InterruptEvent", "def inp(text):\r\n input(text)", "def exit_prompt(message=''):\r\n if message != '': print(str(message))\r\n input('\\nPress [Enter] to exit...')\r\n sys.exit()", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def takeInAndConfirmUserInput():\n validInput = False\n userInput = \"\"\n while validInput != True:\n userInput = input(\"~ \")\n\n print(f\"you have written {userInput}, is this correct? y/[n]\")\n\n confirmation = input(\"~ \")\n\n if confirmation.lower() == \"y\":\n validInput = True\n\n return userInput", "def textinput(self, title, prompt):\n return simpledialog.askstring(title, prompt)", "def prompt(prompt, validator=(lambda x: True), hint=None):\n user_input = input(prompt)\n while not validator(user_input):\n user_input = input(prompt)\n return user_input", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def interrupt(self):\n raise NotImplementedError", "def get_input(label):\n result = input(label)\n sounds.play_input_beep()\n return result", "def raw_input(self, prompt=''):\r\n \r\n newQueue = Queue()\r\n \r\n self.alert.append(newQueue)\r\n \r\n def requestItem(prompt=''):\r\n out = newQueue.get()\r\n return out\r\n \r\n return requestItem", "def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)", "def rawInputWithCheck(prompt):\n proceed = False\n i = None\n while not(proceed):\n i = raw_input(prompt)\n print \"Is this correct?\"\n print ' '*3, repr(i)\n proceed = YNInput(' '*2)\n return i", "def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'", "def get_input():\n letters = input('Enter letters, Enter to quit:\\n')\n return letters", "def keyboard_interrupt_handler(interrupt_signal, frame):\n print(\"Scanning finished\")\n print(\"KeyboardInterrupt ID: {} {} has been caught.\".format(interrupt_signal, frame))\n exit(1)" ]
[ "0.7103245", "0.6709058", "0.6595114", "0.6572123", "0.65524346", "0.6543129", "0.65140307", "0.6374105", "0.63740534", "0.631756", "0.6300449", "0.628559", "0.6279168", "0.6268421", "0.62003326", "0.61359113", "0.6018758", "0.6006392", "0.5985524", "0.5977991", "0.5903809", "0.5901102", "0.58932143", "0.58821493", "0.5881889", "0.58528554", "0.5851489", "0.5829887", "0.58218426", "0.580879", "0.5799902", "0.5769742", "0.57581633", "0.57563305", "0.5753157", "0.5742028", "0.5738671", "0.5709427", "0.5626534", "0.5598273", "0.55768156", "0.55691", "0.5550456", "0.5545746", "0.5534817", "0.55345047", "0.55338264", "0.5528914", "0.5516103", "0.55097866", "0.5506788", "0.5506088", "0.5502953", "0.54993415", "0.54712605", "0.54712605", "0.54697394", "0.5463613", "0.544483", "0.54409194", "0.5439392", "0.5437502", "0.5433869", "0.54304296", "0.54197", "0.5418252", "0.5416434", "0.541478", "0.5401092", "0.5387482", "0.53832775", "0.5381311", "0.5373347", "0.5348624", "0.53456473", "0.5344623", "0.53398114", "0.53249764", "0.53244877", "0.53225887", "0.5320025", "0.531944", "0.5319373", "0.5300253", "0.528626", "0.5285827", "0.528339", "0.52782756", "0.5273595", "0.52722824", "0.52722716", "0.52668864", "0.5265775", "0.5263341", "0.5262927", "0.52520883", "0.52468044", "0.5242886", "0.52378786", "0.5235992", "0.5229826" ]
0.0
-1
Return canonical form for control state.
def canonical_ctrl_state(ctrl_state, num_qubits): if not num_qubits: return '' if isinstance(ctrl_state, CtrlAll): if ctrl_state == CtrlAll.One: return '1' * num_qubits return '0' * num_qubits if isinstance(ctrl_state, int): # If the user inputs an integer, convert it to binary bit string converted_str = f'{ctrl_state:b}'.zfill(num_qubits)[::-1] if len(converted_str) != num_qubits: raise ValueError( f'Control state specified as {ctrl_state} ({converted_str}) is higher than maximum for {num_qubits} ' f'qubits: {2 ** num_qubits - 1}' ) return converted_str if isinstance(ctrl_state, str): # If the user inputs bit string, directly use it if len(ctrl_state) != num_qubits: raise ValueError( f'Control state {ctrl_state} has different length than the number of control qubits {num_qubits}' ) if not set(ctrl_state).issubset({'0', '1'}): raise ValueError(f'Control state {ctrl_state} has string other than 1 and 0') return ctrl_state raise TypeError('Input must be a string, an integer or an enum value of class State')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_state(self):\n return self.cleaned_data['state'].upper()", "def state(self) -> str:", "def native_value(self) -> str:\n if isinstance(self._state, Enum):\n return self._state.name.lower()\n return self._state.lower()", "def reflect_state(self, s):\n s[2:8] = reflect_control_vector(s[2:8])\n s[11:17] = reflect_control_vector(s[11:17])\n return s", "def state(self):\n return str(self)", "def state(self):\r\n return str(self)", "def state_raw(self):\n return self._state_raw", "def state(self):\n\n\t\treturn str(self)", "def input(self):\n return ''.join([state[1] for state in self.condensed_input_states])", "def canonical_code(self) -> Optional[pulumi.Input['StatusConditionCanonicalCode']]:\n return pulumi.get(self, \"canonical_code\")", "def logic_program_form(self):\r\n s = ''\r\n return s", "def normalize_state(self):\n self.state = 2 * (self.state - 0.5)", "def get_initial_state(self) -> str:\n return ''", "def getState( self, cCtrlName ):\n return self.getControlModelProperty( cCtrlName, \"State\" )", "def get_state_s(self, lower = True):\r\n\r\n state_s = STATE_STRINGS[self._state - 1]\r\n state_s = state_s.lower() if lower else state_s\r\n return state_s", "def __str__(self):\n return ''.join(str(e) + ' ' for e in self.state)", "def get_human_state(self):\n return ReferralState(self.state).label", "def state(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"state\")", "def canonicalize(self):\n return _libsbml.ASTNode_canonicalize(self)", "def state(self) -> str:\n return self._state", "def state(self) -> str:\n return self._state", "def state(self) -> str:\n return self._state", "def state(self) -> str:\n return self._state", "def state(self) -> str:\n return self._state", "def get_state(self, state):\n return state", "def state(self) -> str:\n try:\n state_bytes: bytes | None = self._redis.get(self._namespace(\"state\"))\n except RedisError:\n self.logger.error(\n \"RedisError: falling back to default circuit state\", exc_info=True\n )\n return self._fallback_circuit_state\n\n state = self._fallback_circuit_state\n if state_bytes is not None:\n state = state_bytes.decode(\"utf-8\")\n else:\n # state retrieved from redis was missing, so we re-initialize\n # the circuit breaker state on redis\n self._initialize_redis_state(self._fallback_circuit_state)\n\n return state", "def __getstate__(self):\n state = composites.Composite.__getstate__(self)\n state[\"o\"] = None\n return state", "def canonical(gra):\n can_key_dct = canonical_keys(gra, backbone_only=False)\n return relabel(gra, can_key_dct)", "def state(self) -> str | None:\n return self._state", "def getInitialState(self):\r\n return self.originalState", "def initialState(self):\n return \"\"", "def __getstate__(self):\n state = self.__dict__.copy()\n self.__cleanState__(state)\n return state", "def stateString(self):\n return self._mdp.stateString(self._cur_state);", "def logic_program_form(self):\n #TODO\n return ''", "def __getstate__(self):\n state = composites.Composite.__getstate__(self)\n return state", "def __str__(self):\n return \"\".join(list(map(lambda row: ''.join(row), self.state)))", "def state_string(self):\n return AddressStates._to_string(self.state)", "def get_initial_state(self):\n # type: () -> str\n return 'tia_diff'", "def logic_program_form(self):\r\n #TODO\r\n return ''", "def control_type(self):\n return self._control_type", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def state(self):\n # type: () -> string_types\n return self._state", "def canonical(self):\r\n if not hasattr(self, '_canonical'):\r\n self._canonical = conf.lib.clang_getCanonicalCursor(self)\r\n\r\n return self._canonical", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def state(self, state: str) -> None:", "def _state_container_prefix(self):\n return self._name_camel_plural", "def cntrl(self):\n return self._cntrl", "def canonicalize(self):\n self.rules = canonicalize_grammar(self,self.empty)\n self.is_canonical = True", "def normalize_instance_state(self, instance_state):\n return InstanceState.UNKNOWN", "def make_state_dict(self):\r\n state_dict = {c.TRANSITION_IN: self.transition_in,\r\n c.TRANSITION_OUT: self.transition_out,\r\n c.NORMAL: self.normal_update}\r\n\r\n return state_dict", "def __invert__(self):\n return self.wc", "def __invert__(self):\n return self.wc", "def state(self):\n return {self._reverse_mapping[k]: v for k, v in enumerate(self._state)}", "def states_initial(self):\n return self.states(\"Initial = YES\")", "def __getstate__(self):\n return self._", "def make_state_dict(self):\n state_dict = {c.TRANSITION_IN: self.transition_in,\n c.TRANSITION_OUT: self.transition_out,\n c.NORMAL: self.normal_update}\n\n return state_dict", "def state(self) -> str | None:\n return self._values.get(self.value_type)", "def getCheckBoxState( self, cCtrlName ):\n oControl = self.getControl( cCtrlName )\n return oControl.getState();", "def get_state_display(self, obj):\n return obj.get_state_display()", "def __getstate__(self):\n\n return self.get_DER()", "def __repr__( self ):\n\n return self.__class__.__name__ + \"( \" + repr(self.state) + \")\";", "def short_form(self):\n return normalize_pred_string(self.string)", "def state_name(self):\n return self.state.name if self.state else None", "def get_input_capacitance(self):\n return self.gate_c(self.nmos_width+self.pmos_width)", "def state_initial(self):\n return self.states_initial()[0]", "def gate_logic(self):\r\n return self._gate_logic", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def state(self) -> str:\n return pulumi.get(self, \"state\")", "def __getstate__(self):\n state = Object.__getstate__(self)\n state['_strain'] = set()\n return state", "def getstate(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\treturn self._state.ustate", "def successor_state(self):\n return self._successor_state", "def transposition_string(self, state):\n return state.__str__()", "def _get_state(self):", "def _repr_(self):\n return \"Transition from %s to %s: %s\" % (repr(self.from_state),\n repr(self.to_state),\n self._in_out_label_())", "def __str__(self):\n \n t=self.boolVals[:]\n t.reverse()\n bitstring=\"\"\n for i in range(len(t)):\n if t[i]:\n bitstring +=\"1\"\n else:\n bitstring +=\"0\"\n \n return bitstring", "def get_final_machine_state(self):\n\t\treturn self.machine_state", "def silly(self) -> str:\n print(f\"Getting {self._name}'s State\")\n return self._state", "def state(self):\n if self._state is None:\n return None\n\n if self._sensor_type in [ATTR_CYCLE1_START, ATTR_CYCLE2_START, ATTR_TIME]:\n if self._state[0] == 255:\n return \"Disabled\"\n return '{:02d}:{:02d}'.format(self._state[0], self._state[1])\n elif self._sensor_type == ATTR_STATUS:\n return STATUS_CHOICES[self._state]\n\n return self._state", "def state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"state\")", "def state(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"state\")" ]
[ "0.6335245", "0.5661792", "0.56419164", "0.56038505", "0.55594033", "0.55442035", "0.5506558", "0.54213786", "0.54014575", "0.5394251", "0.5330471", "0.5314586", "0.52940786", "0.5290036", "0.52667534", "0.5218633", "0.5182664", "0.5157008", "0.5146271", "0.511659", "0.511659", "0.511659", "0.511659", "0.511659", "0.5090097", "0.5071018", "0.50652236", "0.50629073", "0.5046554", "0.5044131", "0.50358164", "0.50235975", "0.49986187", "0.49928614", "0.49704525", "0.49605343", "0.49346015", "0.4922591", "0.49179748", "0.49077004", "0.48899734", "0.4884234", "0.48771903", "0.48759407", "0.48724538", "0.48594224", "0.4857814", "0.48552412", "0.48452634", "0.48413742", "0.48411244", "0.48411244", "0.48351148", "0.48308513", "0.48288622", "0.48105764", "0.47940627", "0.4791608", "0.47869962", "0.47867575", "0.47864968", "0.47849002", "0.47810414", "0.4779152", "0.47656006", "0.47612768", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47533646", "0.47521815", "0.47489625", "0.47415432", "0.4739375", "0.4729795", "0.47189465", "0.47169828", "0.4712273", "0.47074598", "0.4706356", "0.46972662", "0.46972662", "0.46972662", "0.46972662" ]
0.61829215
1
Return True if command cmd has a compute/uncompute tag.
def _has_compute_uncompute_tag(cmd): for tag in cmd.tags: if tag in [UncomputeTag(), ComputeTag()]: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isCmdStandalone(tgen):\n features = getattr(tgen, 'features', [])\n otherFeatures = set(features) - set(('runcmd', ))\n return not otherFeatures and getattr(tgen, 'rule', None) is None", "def isOp(self):\n return True", "def is_cmd(self, name):\n \n return name in self.cmds", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def is_instruction(self):\n return False", "def is_compute(self, nb_iterations):\n return nb_iterations % self.nb_iterations_between_compute == 0", "def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def is_empty():\n try:\n command(\"T\")\n except EppException:\n return False\n else:\n return True", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def has_negative_control(cmd):\n return get_control_count(cmd) > 0 and '0' in cmd.control_state", "def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)", "def has_openstack_compute(labels):\n if not labels:\n return False\n\n for label in labels:\n if label.label_key == helm_common.LABEL_COMPUTE_LABEL and label.label_value:\n return helm_common.LABEL_VALUE_ENABLED == label.label_value.lower()\n\n # We haven't found the openstack compute node key. Return False\n return False", "def fingertip_no_recompute(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_recompute\", False)", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False", "def isComputed(self) -> bool:\n ...", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True", "def has_commands(self) -> bool:\n return len(self.commands) > 0", "def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False", "def is_instruction(self):\n return True", "def is_empty(self) -> bool:\n return self.command is None and not self.required", "def has_flag(flag, cmd):\n return bool(next((x for x in cmd if x.startswith(flag)), False))", "def is_command(oin, env, pred_name: YPredName, arg: Any=None):\n return (env.check_predicate(obj, pred_name, arg) for obj in oin)", "def is_computing(self):\n raise NotImplementedError", "def is_command(self, text):\n return text.split(' ', 1)[0].startswith(\"!\")", "def _is_pop_command(self):\n return self._match_memory_pattern(\"pop\")", "def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False", "def _is_push_command(self):\n return self._match_memory_pattern(\"push\")", "def is_valid_command(command):\n return is_get(command) or is_insert(command) or is_update(command) or is_delete(command) or is_showall(command) or is_search(command)", "def has_command_with_name(self, command_name):\n return command_name in self.commands", "def is_pure(self) -> bool:\r\n return self.is_valid and np.all([x[\"operation\"].is_pure for x in self.operations_by_name.values()])", "def is_trusted_portion_of_utask(command_name):\n task_type = COMMAND_TYPES[command_name]\n # Postprocess and preprocess tasks are executed on tworkers, while utask_mains\n # are executed on uworkers. Note that the uworker_main command will be used to\n # execute uworker_main, while the name of the task itself will be used to\n # request execution of the preprocess step.\n return task_type in (PostprocessTask, UTask)", "def assumed_state(self):\n return self._command_state is False", "async def contains(self, operation: Operation) -> bool:\n return operation.instance_name in self.operations", "def __bool__(self):\n return any(self.smask)", "def __commandExists(self, command, cmdtype):\n try:\n # method exists\n if hasattr(self, self.__getFullCommandName(command, cmdtype)):\n # command handler type exists\n if self.__commandHandlerTypeExists(cmdtype):\n return True\n else:\n return False\n else:\n return False\n # any key does not exist\n except KeyError:\n return False", "def is_empty(self):\n return len(self.commands) == 0", "def isSetOperation(self):\n return _libsbml.FluxBound_isSetOperation(self)", "def is_config(command): \n if command.startswith('<') and command.endswith('>') and \\\n ('WRITE' not in command) and ('READ' not in command):\n return True\n else:\n return False\n # end if", "def is_resumable(self, tag=\"current\"):\n\n flag_file = os.path.join(self._dir_name, tag, \"flag.p\")\n if os.path.isfile(flag_file):\n return True\n else:\n return False", "def initialized(self):\n return len(self.ops) > 0", "def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__", "def _has_nc_config():\n return _has_prog(\"nc-config\")", "def has_tag(self, tag):\n return tag in self.tags", "def has_tag(self, tag):\n return tag in self.tags", "def validate_command(command):\n return command in list(VALID_COMMANDS.keys())", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def hasMACCommands(self):\n return hasattr(self, 'commands') and len(self.commands) > 0", "def need_context_computation(self):\n\n # We check that symbols preceding the completion is smthing like foo.bar.\n if re.match('.*\\..*', self.get_word_before()):\n return True\n\n\n return False", "def is_valid_command(self, string):\n return string[:3] == \"--!\"", "def needsProcessing(self):\n return self.isMarkdown() or self.hasMetadata()", "def verify_tag(tag):\n command = [\"git\", \"tag\", \"--points-at\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n tag_str = proc.stdout.readline()\n return tag_str.decode(\"utf-8\").rstrip() == tag", "def is_tag(self):\n return (self.__type & NODE_TAG) == NODE_TAG and not self.is_root", "def __is_active(self, command):\n return True", "def should_execute(self, message):\n\t\tif self.command_str is not None:\n\t\t\treturn message.content.startswith(\"{}{}\".format(cmd_prefix, self.command_str))\n\t\telse:\n\t\t\treturn False", "def _like_rnncell(cell):\n conditions = [hasattr(cell, \"output_size\"), hasattr(cell, \"state_size\"),\n hasattr(cell, \"zero_state\"), callable(cell)]\n return all(conditions)", "def is_cumulative(self):\n return self.name in (\n \"app.cpu\",\n \"app.uptime\",\n \"app.disk.bytes\",\n \"app.disk.requests\",\n \"app.mem.majflt\",\n \"app.io.wait\",\n )", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def has_more_commands(self):\n return self._current_inst < len(self._lines) - 1", "def is_arithmetic_op(self):\r\n return self.value in [\"+\", \"-\"]", "def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2", "def command_registered(self, command: str) -> bool:\n return command in self._commands", "def is_using(self, inst):\n if self.op_name in spirv.DECORATION_INSTRUCTIONS:\n return False\n if self.op_name in spirv.DEBUG_INSTRUCTIONS:\n return False\n if self.type_id == inst.result_id:\n return True\n for operand in self.operands:\n if operand == inst.result_id:\n return True\n return False", "def in_host():\n return not in_docker()", "def is_button(widget):\n # CEBALERT: document why try/except is needed\n try:\n button = 'command' in widget.config() and not hasattr(widget,'toggle')\n except T.TclError:\n button = False\n return button", "def not_in_image(self) -> bool:\n return not self.vector", "def _is_unary_op(op):\n if op.type == TokenType.BitwiseNot:\n return True\n return False", "def hasaccelerator():\n\n return torch.cuda.is_available() or torch.backends.mps.is_available() or bool(Models.finddevice())", "def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd", "def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass", "def has_expression(self):\n return self._expression is not None", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n return self.ctx.running_calc < 2 and self.can_restart()", "def has_side_effect(self):\n # XXX Need to handle OpExtInst correctly (it is conservative now)\n if self.result_id is None:\n return True\n return self.op_name in spirv.HAS_SIDE_EFFECT", "def has_compiled(self, phase='predict'):\n return self._graph_executor.has_compiled(phase)", "def _preprocess(self):\n if not super()._preprocess():\n return False\n\n # Format the arguments\n self._args.operation = self._args.operation.lower()\n\n # Check the arguments and generate the commands\n op = self._args.operation\n if op not in self.__operations:\n self._result.set_return_code(ReturnCode.INVALID_ARGUMENT)\n logger.error(\n 'Unsupported operation of NCCL test - benchmark: {}, operation: {}, expected: {}.'.format(\n self._name, op, ' '.join(list(self.__operations.keys()))\n )\n )\n return False\n else:\n self._bin_name = self.__operations[op]\n if not self._set_binary_path():\n return False\n\n command = os.path.join(self._args.bin_dir, self._bin_name)\n command += ' -b {} -e {} -f {} -g {} -c {} -n {} -w {}'.format(\n self._args.minbytes, self._args.maxbytes, str(self._args.stepfactor), str(self._args.ngpus),\n str(self._args.check), str(self._args.iters), str(self._args.warmup_iters)\n )\n self._commands.append(command)\n\n return True", "def supports_operation(self, operation: str) -> bool:\n return True", "def finishesCommand(self):\n\n return self.flag in ':fF'", "def check_commands(tags):\r\n return [tag for tag in tags.keys() if tag in COMMANDLIST]", "def is_command_valid(command):\n if not command:\n return False\n\n try:\n # call command silentyly\n with open(devnull, 'wb') as no_out:\n subprocess.call(command, stdout=no_out, stderr=no_out)\n except OSError:\n return False\n else:\n return True", "def __bool__(self) -> bool:\n return self._rpc is not None", "def has_more_commands(self):\n return not self.eof", "def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False", "def is_implemented(self, strcommand):\n result = ct.c_bool()\n command = ct.c_wchar_p(strcommand)\n self.lib.AT_IsImplemented(self.AT_H, command, ct.addressof(result))\n return result.value", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def is_cont_node():\n return False", "def is_valid_executes(args, skip=False):\n if is_valid_command(args) or skip:\n if args.executes is not False:\n return True\n return False", "def _msg_is_command(self, msg):\n return isinstance(msg, dict)", "def docker_available(): # type: () -> bool\n return bool(get_docker_command())", "def is_cmd_mailbox_idle(self):\n mb_cmd_word = self.microblaze.read(MAILBOX_OFFSET +\n MAILBOX_PY2IOP_CMD_OFFSET)\n return (mb_cmd_word & 0x1) == 0", "def _has_outdir(self, cmd):\n\n cmd = [self._tool_path, cmd, \"-h\"]\n helptext = self._command(cmd)\n\n return \"outdir\" in helptext", "def is_of_type(cmd):\r\n raise NotImplementedError()", "def isUnConditional(self) -> bool:\n ...", "def cmdDone(self, cmdData):\n # or the done bit and error bit\n return True if ((cmdData & i2cDoneMask) == i2cDoneBit) else False", "def is_valid_command(command):\n # TODO(etscrivner): Eventually we'd like to construct this dynamically from\n # a list of all available commands\n valid_commands = [\n 'add', 'append', 'decr', 'delete', 'flush_all', 'get', 'gets', 'incr',\n 'prepend', 'quit', 'replace', 'set', 'stats', 'verbosity', 'version',\n ]\n\n if not command:\n return False\n\n parts = command.split('\\r\\n')\n command_parts = parts[0].split(' ')\n\n command = command_parts[0]\n return command.strip().lower() in valid_commands", "def is_atomic(self):\n found = True\n if self.ant is not None:\n for p in self.ant:\n if p.conn != 'at':\n found = False\n if self.con is not None:\n for prop in self.con:\n if prop.conn != 'at':\n found= False\n return found", "def _is_desired_tag(self, tag):\n if self._tags is None:\n return True\n\n if self._ignore_namespace:\n for desired_tag in self._tags:\n if tag.localname == desired_tag.localname:\n return True\n else:\n for desired_tag in self._tags:\n if tag == desired_tag:\n return True\n\n return False", "def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )" ]
[ "0.581931", "0.5786073", "0.57732224", "0.5759559", "0.5728099", "0.5650557", "0.5638925", "0.5607169", "0.56052583", "0.55509794", "0.5506383", "0.55021065", "0.5499619", "0.54725796", "0.54692024", "0.546461", "0.5463064", "0.5448807", "0.5434303", "0.5420862", "0.5408913", "0.539978", "0.5390406", "0.53820294", "0.53612214", "0.5352319", "0.5343789", "0.53217447", "0.5299678", "0.52534163", "0.5238882", "0.52055115", "0.5191993", "0.51819605", "0.51633275", "0.51534235", "0.5144324", "0.51379424", "0.5134731", "0.5099121", "0.509177", "0.5067313", "0.5065212", "0.5064955", "0.5045218", "0.50439", "0.50429773", "0.50429773", "0.50429124", "0.5004885", "0.49939036", "0.49847656", "0.49833328", "0.49755692", "0.49731517", "0.49688557", "0.49558514", "0.49486583", "0.49391735", "0.49374142", "0.49334714", "0.4928381", "0.49280697", "0.49176246", "0.49130124", "0.49091625", "0.488923", "0.48826718", "0.4876803", "0.48494586", "0.48478714", "0.484182", "0.4835565", "0.4834973", "0.48205626", "0.4805194", "0.48038667", "0.48022977", "0.48000702", "0.4799032", "0.47962755", "0.47884846", "0.47868535", "0.47751814", "0.47721556", "0.47706082", "0.4769988", "0.476129", "0.4750547", "0.47433504", "0.47309852", "0.47303903", "0.47290927", "0.47290006", "0.47285587", "0.4726817", "0.47186193", "0.47131938", "0.47103208", "0.47094893" ]
0.9016012
0
Initialize the control engine.
def __init__(self, qubits, ctrl_state=CtrlAll.One): super().__init__() self._qubits = qubits self._state = ctrl_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def controls_setup(self):\n pass", "def _initialize(self):\n self.send_init_command()", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def do_init(self):\n\n pass", "def initialise(self):\n self.set_up()", "def on_init(self):\n self.controller = gameController.Controller()", "def init():", "def init(self):\n self.connect_to_switches()\n self.reset_states()", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def run_init(self):\n InitEditor(self.root, self)", "def __init__(self):\n self.modes = {}\n self.modelist = []\n self.mode = 'main'\n self.defs = {}\n events.bind(Key=self.dispatch)", "def init():\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self):\n\n ShowBase.__init__(self)\n controls.setup_mouse()\n self.tpp_camera = TPPCamera()\n\n try:\n self.world = World()\n except OSError:\n raise\n\n self.physics = Physics(self.world.player)\n base.taskMgr.add(self.__main_loop, \"__main_loop\")", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def _initControls(self):\n\n print \"DEBUG: Initializing Controls\"\n Game.Controls[pygame.K_a] = Game.MoveLeft\n Game.Controls[pygame.K_d] = Game.MoveRight\n Game.Controls[pygame.K_w] = Game.Jump\n Game.Controls[pygame.K_s] = Game.Duck\n Game.Controls[pygame.K_SPACE] = Game.Fly\n Game.Controls[pygame.K_j] = Game.Fire\n Game.Controls[pygame.K_ESCAPE] = Game.Quit\n\n Game.BoundControls.append(pygame.K_a)\n Game.BoundControls.append(pygame.K_d)\n Game.BoundControls.append(pygame.K_w)\n Game.BoundControls.append(pygame.K_s)\n Game.BoundControls.append(pygame.K_j)\n Game.BoundControls.append(pygame.K_SPACE)\n Game.BoundControls.append(pygame.K_ESCAPE)", "def initialize(self):\n self.Update()\n ViewportManager.updateAll()\n self.wxStep()\n ViewportManager.initializeAll()\n # Position the camera\n if base.trackball is not None:\n base.trackball.node().setPos(0, 30, 0)\n base.trackball.node().setHpr(0, 15, 0)\n\n # to make persp view as default\n self.perspViewMenuItem.Check()\n self.onViewChange(None, 3)\n\n # initializing direct\n if self.fStartDirect:\n base.startDirect(fWantTk = 0, fWantWx = 0)\n\n base.direct.disableMouseEvents()\n newMouseEvents = [\"_le_per_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.mouseEvents]\n base.direct.mouseEvents = newMouseEvents\n base.direct.enableMouseEvents()\n\n base.direct.disableKeyEvents()\n keyEvents = [\"_le_per_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.keyEvents]\n base.direct.keyEvents = keyEvents\n base.direct.enableKeyEvents()\n\n base.direct.disableModifierEvents()\n modifierEvents = [\"_le_per_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.modifierEvents]\n base.direct.modifierEvents = modifierEvents\n base.direct.enableModifierEvents()\n\n base.direct.cameraControl.lockRoll = True\n base.direct.setFScaleWidgetByCam(1)\n\n unpickables = [\n \"z-guide\",\n \"y-guide\",\n \"x-guide\",\n \"x-disc-geom\",\n \"x-ring-line\",\n \"x-post-line\",\n \"y-disc-geom\",\n \"y-ring-line\",\n \"y-post-line\",\n \"z-disc-geom\",\n \"z-ring-line\",\n \"z-post-line\",\n \"centerLines\",\n \"majorLines\",\n \"minorLines\",\n \"Sphere\",]\n\n for unpickable in unpickables:\n base.direct.addUnpickable(unpickable)\n\n base.direct.manipulationControl.optionalSkipFlags |= SKIP_UNPICKABLE\n base.direct.manipulationControl.fAllowMarquee = 1\n base.direct.manipulationControl.supportMultiView()\n base.direct.cameraControl.useMayaCamControls = 1\n base.direct.cameraControl.perspCollPlane = self.perspView.collPlane\n base.direct.cameraControl.perspCollPlane2 = self.perspView.collPlane2\n\n for widget in base.direct.manipulationControl.widgetList:\n widget.setBin('gui-popup', 0)\n widget.setDepthTest(0)\n\n # [gjeon] to intercept messages here\n base.direct.ignore('DIRECT-delete')\n base.direct.ignore('DIRECT-select')\n base.direct.ignore('DIRECT-preDeselectAll')\n base.direct.ignore('DIRECT-toggleWidgetVis')\n base.direct.fIgnoreDirectOnlyKeyMap = 1\n\n # [gjeon] do not use the old way of finding current DR\n base.direct.drList.tryToGetCurrentDr = False\n\n else:\n base.direct=None\n #base.closeWindow(base.win)\n base.win = base.winList[3]", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def __init__(self):\n # clears the console window\n if sys.platform in ('linux-i386','linux2'):\n os.system(\"clear\")\n elif sys.platform in ('win32','dos','ms-dos'):\n os.system(\"cls\")\n\n # print scripts info\n print self.WELCOME_MESSAGE\n\n # initialize all instance variables\n self.guiElements = {} # dictionary of gui elements (buttons, strings, sliders, ...)\n self.gui_events = [] # list of events\n self.gui_event_ids = {} # dictionary of event ids\n self.config = {} # configuration dictionary\n self.target = None # import or export\n self.callback = None # function to call when config gui is done\n self.texpathIndex = 0\n self.texpathCurrent = ''\n\n # reset GUI coordinates\n self.xPos = self.XORIGIN\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]\n\n # load configuration\n self.load()", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def _initComponent(self):\n\n self.optimizer = self._initOptimizer()\n self.scheduler = self._initScheduler()", "def __init__(self):\n BasicEngine.__init__(self)\n self._active_qubits = 0\n self._num_rotations = 0\n self._rotations = []", "def initialize(self):\n\t\tpass", "def init():\n # vs_reset(None)\n global controllers\n # create global valuaspace controller for ipython\n g.visit_tree_item.add(colorize_headlines_visitor)\n g.registerHandler('after-create-leo-frame', onCreate)\n g.plugin_signon(__name__)\n return True", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "def __init__(self, properties):\n super(Engine, self).__init__(self, constants.SYSTEM_PLUGIN_NAME)\n self.context._plugin_id = 0\n self._logger = self.context.get_logger()\n\n # Engine properties\n if not isinstance(properties, dict):\n self._properties = {}\n else:\n self._properties = properties.copy()\n\n #Init engine plugin UUID\n self._properties[constants.PROP_UID] = str(self.uid)\n\n # Next plugin Id (start at 1, as 0 is reserved for the engine itself)\n self._next_plugin_id = 1\n\n #Plugins dict pluginId->plugin\n self._plugins = {}\n self._plugins_lock = threading.RLock()\n\n self._event_dispatcher = EventDispatcher(self)\n self._start_level = 0\n self._state = states.STARTING\n self._logger.info(\"Engine successfully created\")", "def __init__(self):\n game_engine = get_gameengine()\n if game_engine is not None:\n self = game_engine\n else:\n ## The targeted frames per second\n self.target_fps = 200\n\n ## The start time\n self.time = time.time()\n\n ## A list of all registered game objects\n self.game_objects = list()\n\n ## A list of colliders\n self.colliders = list()\n\n ## Manage the user inputs\n self.input_manager = InputManager(self)\n\n ## Determines the state of the Game Engine\n self.running = False\n\n ## Variable to pause the Game Engine\n self.paused = False\n\n self.time_node = pm.PyNode('time1')\n # end if", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def initengine(self):\n\n\t\tif glutInit(sys.argv):\n\t\t\tglutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\n\n\t\t\t# get a 640 x 480 window\n\t\t\tglutInitWindowSize(640, 480)\n\n\t\t\t# the window starts at the upper left corner of the screen\n\t\t\tglutInitWindowPosition(0, 0)\n\t\t\tself.window = glutCreateWindow(\"test\")\n\t\t\tglutDisplayFunc(render)\n\t\t\tglutIdleFunc(doAnimationStep)\n\t\t\tglutKeyboardFunc(keyPressed)\n\t\t\tinitgl(640, 480)\n\t\t\tglutMainLoop()", "def _init(self):\n pass", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.screen = None\n self.engine = None\n self.engines = []\n self.i_e = InputEngine()\n self.e_e = EventEngine(self.i_e)", "def init(): \n\tset_verbosity()\n\t_set_threads()\n\t_set_heartbeat()\n\t#_set_storage()\n\t\n\tinit_targets()\n\t\n\tsend_heartbeat(start=True)\n\t\n\tinfo_msg = \"init plugin script\"\n\tlogger.info(info_msg)\n\n\tinit_plugin()\n\n\tinfo_msg = \"loaded %s plugin(s)\" %(len(kb.plugins.handle))\n\tlogger.info(info_msg)", "def init(self) -> None:\n ...", "def Init(self, config):\r\n pass", "def _initialize_engine(self, engine: ExecutionEngine) -> None:\n _logger.debug('Middleware %r initialized with engine: %r', self, engine)", "def init_controls(self):\n\n\n controls_keypress_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'a': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'q': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land_approach(),\n 'v': lambda: self.toggle_use_voice(),\n 't': lambda: self.toggle_tracking(),\n 'k': lambda: self.toggle_distance_mode(),\n 'm': lambda: self.toogle_manual_control(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n \n \n \n \n \n \n # '0': lambda: self.drone.set_video_encoder_rate(0),\n # '1': lambda: self.drone.set_video_encoder_rate(1),\n # '2': lambda: self.drone.set_video_encoder_rate(2),\n # '3': lambda: self.drone.set_video_encoder_rate(3),\n # '4': lambda: self.drone.set_video_encoder_rate(4),\n # '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'a': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'q': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n controls_keypress_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'q': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'a': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land(),\n 't': lambda: self.toggle_tracking(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n '0': lambda: self.drone.set_video_encoder_rate(0),\n '1': lambda: self.drone.set_video_encoder_rate(1),\n '2': lambda: self.drone.set_video_encoder_rate(2),\n '3': lambda: self.drone.set_video_encoder_rate(3),\n '4': lambda: self.drone.set_video_encoder_rate(4),\n '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'q': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'a': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n if self.kbd_layout == \"AZERTY\":\n self.controls_keypress = controls_keypress_AZERTY\n self.controls_keyrelease = controls_keyrelease_AZERTY\n else:\n self.controls_keypress = controls_keypress_QWERTY\n self.controls_keyrelease = controls_keyrelease_QWERTY\n self.key_listener = keyboard.Listener(on_press=self.on_press,\n on_release=self.on_release)\n self.key_listener.start()", "def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED", "def _initialise_run(self) -> None:", "def initialize(self):\n watch_tv = self.args['watch_tv']\n cleaning = self.args['cleaning']\n self.sensor_living = self.get_app('globals').sensor_living # type: Sensor\n self.sensor_bedroom = self.get_app('globals').sensor_bedroom # type: Sensor\n self.sensor_spare = self.get_app('globals').sensor_spare # type: Sensor\n self.listen_state(self.watching_tv, watch_tv, new=\"on\")\n self.listen_state(self.stop_watching, watch_tv, new=\"off\")\n self.listen_state(self.clean_on, cleaning, new='on')\n self.listen_state(self.clean_off, cleaning, new='off')", "def init(self):\n self._frame_idx = 0\n if self.initialization is not None:\n del self.initialization\n self.initialization = None\n\n if self.config.initialization is not None:\n self.initialization = INITIALIZATION.load(self.config.initialization, **self.__kwargs)\n self.initialization.init()\n\n if self.preprocessing is not None:\n del self.preprocessing\n self.preprocessing = None\n\n if self.config.preprocessing is not None:\n self.preprocessing = Preprocessing(self.config.preprocessing, **self.__kwargs)\n\n if self.odometry is None:\n assert self.config.odometry is not None\n self.odometry = ODOMETRY.load(self.config.odometry, **self.__kwargs)\n\n assert self.odometry is not None\n self.odometry.init()\n if self.loop_closure is None and self.config.loop_closure is not None:\n self.loop_closure = LOOP_CLOSURE.load(self.config.loop_closure, **self.__kwargs)\n if self.loop_closure is not None:\n self.loop_closure.init()\n if self.config.backend is not None:\n self.backend = BACKEND.load(self.config.backend, **self.__kwargs)\n if self.backend is not None:\n self.backend.init()\n else:\n logging.warning(\"[SLAMAlgorithm]Defined a Loop Closure Algorithm Without a Backend\")", "def on_start(self):\n self.init()", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def initialize(self): \r\n pass", "def onInit(self):\n pass", "def __init__(self):\n self._workload = None\n self._engine = Engine()", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def init(self):\n self.focus_modes = []\n for focus_mode in self['focusModes']:\n self.focus_modes.append(\\\n {'modeName': focus_mode.modeName,\n 'lensCombination': eval(focus_mode.lensCombination),\n 'lensModes': eval(focus_mode.lensModes),\n 'size': eval(focus_mode.size),\n 'message': eval(focus_mode.message),\n 'diverg': eval(focus_mode.divergence)})\n self.focus_motors_dict = {}\n\n focus_motors = []\n focus_motors = eval(self.getProperty('focusMotors'))\n\n for focus_motor in focus_motors:\n self.focus_motors_dict[focus_motor] = []\n\n #TODO\n self.motors_groups = [self.getObjectByRole(\"P14ExpTbl\"),\n self.getObjectByRole(\"P14KB\"),\n self.getObjectByRole(\"P14DetTrans\"),\n self.getObjectByRole(\"P14BCU\"),\n self.getObjectByRole(\"slitsMotors\")]\n \n\n if len(self.motors_groups) > 0:\n for motors_group in self.motors_groups:\n self.connect(motors_group,\n 'mGroupFocModeChanged',\n self.motor_group_focus_mode_changed)\n else:\n logging.getLogger(\"HWR\").debug('BeamFocusing: No motors defined')\n self.active_focus_mode = self.focus_modes[0]['modeName']\n self.size = self.focus_modes[0]['size']\n self.update_values()\n\n self.cmd_set_calibration_name = self.getCommandObject(\\\n 'cmdSetCallibrationName')\n try:\n self.cmd_set_phase = eval(self.getProperty('setPhaseCmd'))\n except:\n pass", "def __enter__(self):\n if len(self._qubits) > 0:\n engine = ControlEngine(self._qubits, self._state)\n insert_engine(self.engine, engine)", "def init(self):", "def init(self):", "def initialize(self) -> None:\n pass", "def initialize(self):\n return", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def autonomousInit(self):\n #self.timer.reset()\n #self.timer.start()\n pass", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def initialise(self):", "def initialize(self, cwrap):\n pass", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def autonomousInit(self):\n self.globalInit()\n self.autonomous.start()", "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def __init__(self):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n ## Check descriptormodel\n self._assert_correctness()", "def teleopInit(self):\n self.globalInit()\n self.teleop.start()", "def initialize(self):\n if not self._ready:\n self._real_initialize()\n self._ready = True", "def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def initialize(self):\n pass # pragma: no cover", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def setUp(self) -> None:\n self.engine = EvalHPOA()", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def _setup(self) -> None:\n # Call base implementation\n super()._setup()\n\n # Configure the low-level integrator\n engine_options = self.simulator.engine.get_options()\n engine_options[\"stepper\"][\"iterMax\"] = 0\n engine_options[\"stepper\"][\"dtMax\"] = min(0.02, self.step_dt)\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = False\n\n # Set maximum computation time for single internal integration steps\n if self.debug:\n engine_options[\"stepper\"][\"timeout\"] = 0.0\n else:\n engine_options[\"stepper\"][\"timeout\"] = 2.0\n\n # Enable logging of geometries in debug mode\n if self.debug:\n engine_options[\"telemetry\"][\"isPersistent\"] = True\n\n # Update engine options\n self.simulator.engine.set_options(engine_options)\n\n # Set robot in neutral configuration\n qpos = self._neutral()\n framesForwardKinematics(\n self.robot.pinocchio_model, self.robot.pinocchio_data, qpos)", "def __init__(self):\n\n self.controller = None\n\n self.game_running = False\n self.menu_view_running = False\n self.end_game_running = False", "def init(self) -> None:", "def initialize(self, *args, **kwargs):\n pass", "def _real_initialize(self):\n pass" ]
[ "0.6916761", "0.6699283", "0.6575671", "0.6537157", "0.6448048", "0.6433576", "0.6422153", "0.64024854", "0.64004904", "0.64004904", "0.64004904", "0.64004904", "0.64004904", "0.64004904", "0.64004904", "0.64004904", "0.63673574", "0.63242495", "0.6309775", "0.63037765", "0.62917405", "0.6270151", "0.6270151", "0.6269447", "0.6264018", "0.6262658", "0.6262582", "0.62610257", "0.62610257", "0.62610257", "0.6234333", "0.6234333", "0.6234333", "0.6234333", "0.6234333", "0.62294775", "0.62279606", "0.6223345", "0.6222628", "0.62192565", "0.62059355", "0.61918795", "0.618445", "0.61793935", "0.61702627", "0.61643714", "0.615424", "0.6148531", "0.61455405", "0.61378115", "0.6130229", "0.6107043", "0.6106852", "0.61052495", "0.60954016", "0.6089986", "0.60660166", "0.60640085", "0.6061074", "0.60515285", "0.6047289", "0.60446584", "0.6038252", "0.6037429", "0.60328245", "0.60325325", "0.60293436", "0.6025415", "0.6025415", "0.6024822", "0.6013867", "0.6010452", "0.6010452", "0.60098356", "0.6002352", "0.60016584", "0.59960175", "0.59937", "0.59937", "0.5981089", "0.5979302", "0.5974276", "0.59741074", "0.5954052", "0.5949854", "0.5930803", "0.5927828", "0.5920565", "0.5909367", "0.5909367", "0.59045696", "0.59045696", "0.59045696", "0.59045696", "0.5902274", "0.5899796", "0.5898325", "0.58976376", "0.58973074", "0.58971155", "0.5892977" ]
0.0
-1
Receive a list of commands.
def receive(self, command_list): for cmd in command_list: self._handle_command(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receive(self, command_list):\n for cmd in command_list:\n self._send_cmd_with_mapped_ids(cmd)", "def receive(self, command_list):\n for cmd in command_list:\n if not cmd.gate == FlushGate():\n self._add_cmd(cmd)\n\n # (try to) send on\n if not self.is_last_engine:\n if self._is_rotation(cmd):\n orig_cmd = cmd\n sequence = self._rotations.pop(0)\n for elem in sequence:\n self.send([elem])\n else:\n self.send([cmd])", "def communicate(self, commands):\n\n if not isinstance(commands, list):\n commands = [commands]\n\n self.socket.send_multipart([json.dumps(commands).encode('utf-8')])\n\n return self.socket.recv_multipart()", "async def _list_commands(self):\n message_cmds = \"regular commands:\\n\"\n tts_cmds = \"tts commands:\\n\"\n cur = self.conn.cursor()\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is true;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n tts_cmds += invoke[0] + ', '\n tts_cmds = tts_cmds[0:-2]\n cur.execute(\n \"SELECT invoke FROM message_commands WHERE istts is false;\")\n cmd_invokes = cur.fetchall()\n for invoke in cmd_invokes:\n message_cmds += invoke[0] + ', '\n message_cmds = message_cmds[0:-2]\n cur.close()\n await self.bot.say(message_cmds)\n await self.bot.say(tts_cmds)", "def process_commands(self, commands: List[str]):", "def receive(self, command_list):\n for cmd in command_list:\n if isinstance(cmd.gate, FlushGate):\n while self._stored_commands:\n self._run()\n self.send([cmd])\n else:\n self._stored_commands.append(cmd)\n # Storage is full: Create new map and send some gates away:\n if len(self._stored_commands) >= self.storage:\n self._run()", "def list_commands():\n print(' ')\n print('Chat Client Commands')\n print('-----------------------')\n print(\"Whisper: Send a online user a private message: /w username (message)\")\n print('Current Users: Get a list of all current online users: /users')\n print('File Transfer (Upload): Transfer a file to the server: /file (file path)')\n print('File Transfer (Download): Prints out the contents of a file: /file_download (file name)')\n print('File List: Lists all files currently stored on a server: /file_list')\n print('Save Username: Save your current username to the server to auto login at this ip address: /save')\n print('Exit: Close the client: quit or exit')\n print('Commands: Lists all commands for the Client: /help')\n print('Feed: Redisplay all messages: /feed')\n print('-----------------------')\n print(' ')", "async def list_command(ctx, cmd:str=False):\n if cmd == False: # for quick list of commands\n ls = await get_quick_list(ctx)\n await ctx.send(embed=ls)\n if cmd == \"all\" or cmd == \"commands\":\n ls = await get_list(ctx.message.author, 1)\n sent_list = await ctx.send(embed=ls)\n await sent_list.add_reaction(EMOJI_FAST_REVERSE)\n await sent_list.add_reaction(EMOJI_LEFT_ARROW)\n await sent_list.add_reaction(EMOJI_RIGHT_ARROW)\n await sent_list.add_reaction(EMOJI_FAST_FORWARD)\n elif cmd == \"states\":\n states_list = await get_state_list()\n list = assemble_embed(\n title=\"List of all states\",\n desc=\"\\n\".join([f\"`{state}`\" for state in states_list])\n )\n await ctx.send(embed=list)\n elif cmd == \"events\":\n events_list = [r['eventName'] for r in EVENT_INFO]\n list = assemble_embed(\n title=\"List of all events\",\n desc=\"\\n\".join([f\"`{name}`\" for name in events_list])\n )\n await ctx.send(embed=list)", "def list_command(ctx: Any) -> None:\n pass", "def run(self, commands: list[str]):\n ...", "def list_commands(self, ctx):\n return self.daemon.list_actions()", "def run_commands(self, commands, check_rc=True):\n output = None\n queue = list()\n responses = list()\n\n def run_queue(queue, output):\n try:\n response = to_list(self.send_request(queue, output=output))\n except Exception as exc:\n if check_rc:\n raise\n return to_text(exc)\n\n if output == 'json':\n response = [json.loads(item) for item in response]\n return response\n\n for item in to_list(commands):\n cmd_output = 'text'\n if isinstance(item, dict):\n command = item['command']\n if 'output' in item:\n cmd_output = item['output']\n else:\n command = item\n\n # Emulate '| json' from CLI\n if command.endswith('| json'):\n command = command.rsplit('|', 1)[0]\n cmd_output = 'json'\n\n if output and output != cmd_output:\n responses.extend(run_queue(queue, output))\n queue = list()\n\n output = cmd_output\n queue.append(command)\n\n if queue:\n responses.extend(run_queue(queue, output))\n\n return responses", "def recv(self, *messages):\n for message in messages:\n self.input.put(message)", "async def list(self, *args, **kwargs):\n return f\"Command list: {', '.join(self.get_commands())}\"", "def cmd_list(self):\n rc = self.socket_command('list', False)\n return rc", "def issue_cmds(self, cmds):\n output = []\n for cmd in cmds:\n output.append(self.send_and_parse(cmd))\n return output", "def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []", "def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")", "def commands(self) -> List[Command]:\n return []", "def run_commands(ip_address, user, password, commandList, platform, buffer=5000):\n print \"Configuring \" + ip_address\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n remote_conn_pre.connect(ip_address, username=user, password=password)\n remote_conn = remote_conn_pre.invoke_shell()\n if platform == \"cisco\":\n remote_conn.send(\"enable\\n\")\n time.sleep(1)\n remote_conn.send(password+'\\n')\n time.sleep(1)\n commands = commandList.split('\\n')\n for com in commands:\n remote_conn.send(com+'\\n')\n time.sleep(1)\n output = remote_conn.recv(buffer)\n #print output", "async def send_commands(ans: Message):\n await ans.answer(all_commands)", "def send_commands(self, commands=None):\n commands = commands or []\n command_list = {}\n for command in commands:\n command_list[command.id] = {\n 'speed': command.speed, 'direction': command.direction\n }\n data = {'commands': command_list}\n state = self._post(data)\n status = state['status'].lower()\n print(\"status: {}\".format(status))\n if status == 'error':\n print(\"message: {}\".format(state['message']))\n elif status == 'finished':\n print(\"finished! Score: {} Watch result at: {}\".format(state['score'], state['visualization']))\n if 'requests' not in state:\n state['requests'] = []\n for elevator_data in state.get('elevators', []):\n if 'buttons_pressed' not in elevator_data:\n elevator_data['buttons_pressed'] = []\n\n return state", "async def listcommands(self, ctx):\n\t\twith open('custom_commands.json', 'r') as f:\n\t\t\tcommands = json.load(f)\n\t\t\toutput = \", \".join([*commands])\n\t\t\tawait ctx.send(f\"```List of custom commands:\\n{output}```\")", "def get_all_commands():\n\n session_attributes = {}\n card_title = \"All Commands\"\n speech_output = \"You can ask for a synonym, antonym, rhyme, definition, part of speech, syllables, or frequency of a word by saying something like 'synonym for happy'. You can also ask for a random synonym, antonym, definition, or rhyme by saying something like 'random synonym for happy'. If you want all of them, say something like 'all synonyms for happy.'\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, or frequency of a word! Or say 'all commands' to get hear all commands.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))", "def _parse_cmds(self):\n lst = self.inbuffer.split('\\n')\n # leave trailing text (not terminated by \\n) in inbuffer\n self.inbuffer = lst.pop(-1)\n if lst:\n for cmd in lst:\n self.cmds.append(cmd)", "async def _c_list(self, ctx):\n command_list = self.database.get_guild_commands(ctx.guild.id)\n if len(command_list) == 0:\n await ctx.send(\"This server has no custom commands\")\n return\n out = \"```\\nServer Commands:\\n\"\n for command in command_list:\n out += f\"{command.name}: {command.text}\\n\"\n out += \"```\"\n await ctx.send(out)", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def sendCMDlist(self): \n\n if self.cmdlist:\n for cmd in self.cmdlist:\n try:\n tmp = self.FixLineEndingsForWindows(cmd)\n charssent= self.leicasocket.send(tmp)\n # we actually need to make sure\n # we sent the whole string by comparing charssent.\n if charssent != len(tmp):\n print \"Error sending commands\"\n raise CAMSendCharsError\n except:\n print \"error sending command\", cmd\n return False\n time.sleep(self.delay) # wait some time between sending each line\n self.emptyCMDlist()\n time.sleep(self.delay)", "def cmd_list(args):", "def receive(self, lines):\n if isinstance(lines, str):\n lines = [lines]\n for l in lines:\n self.client.line_received(l)", "def do_list(self, cmd):\n\t\tif self.client.client is None:\n\t\t\tself.stdout.write(\"Error: Not connected!\\n\")\n\t\t\treturn\n\t\tif not self.ingroup:\n\t\t\tself.stdout.write(\"Error: Not in a group!\\n\")\n\t\t\treturn\n\t\tstats = self.client.list()\n\t\tself.stdout.write(\n\t\t\t\" Type |From Pid |From Port| To Pid | To Port | Recv | Send \\n\"\n\t\t\t)\n\t\tself.stdout.write(\"---------+\"*6+\"---------\\n\")\n\t\tfor l in stats:\n\t\t\ttext = (\n\t\t\t\t(\"{:>9}|\"*7)[:-1]\n\t\t\t\t).format(*l).replace(\"None\", \"----\")\n\t\t\tself.stdout.write(text+\"\\n\")\n\t\tself.stdout.write(\"\\n\")", "def test_CommandList():\n from paradrop.confd.command import CommandList\n\n clist = CommandList()\n clist.append(20, \"b\")\n clist.append(20, \"c\")\n clist.append(10, \"a\")\n\n commands = list(clist.commands())\n assert commands == [\"a\", \"b\", \"c\"]", "def get_available_commands(self, caller):\n # commands = [{\"name\":\"LOOK\", \"cmd\":\"look\", \"args\":self.dbref}]\n commands = [{\"name\":\"LOOT\", \"cmd\":\"loot\", \"args\":self.dbref}]\n return commands", "def get_commands(self, view=None):\n return self._get(\"commands\", ApiCommand, True,\n params = view and dict(view=view) or None)", "def _execute_impl(self, commands):\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((self.host, self.port))\n for c in commands:\n conn.sendall(c)\n conn.recv(4096)\n conn.close()", "def list_commands(self, ctx):\n commands = self._iter_commands()\n return commands.keys()", "def send_and_parse(self, cmd):\n\n lines = self.__send(cmd)\n messages = self.__protocol(lines)\n return messages", "def get_commands(self):\n return list(self.commands.values())", "def list_commands(self, context):\n\t\treturn self.commands.keys()", "def commands():", "def get_available_commands(self, caller):\n # commands = [{\"name\":\"LOOK\", \"cmd\":\"look\", \"args\":self.dbref}]\n commands = [{\"name\":\"TALK\", \"cmd\":\"talk\", \"args\":self.dbref}]\n return commands", "def read_commands_from_console():\n cmds = []\n print 'Please provide commands for REA Robot. One command per line. An empty line finishes the input:'\n while True:\n line = raw_input()\n if line:\n cmds += [line.upper()]\n else:\n break\n\n return cmds", "def list_commands(self):\n response = self.do_command('list_commands')\n stripped = [s for s in (t.strip() for t in response.split(\"\\n\"))]\n return [s for s in stripped if is_well_formed_gtp_word(s)]", "def get_commands(self):\n\t\treturn list(self.command_handlers.keys())", "def execute_commands(self, commands):\n for cmd in commands:\n self.action_list[cmd](commands[cmd])\n if cmd == 'r':\n break", "def getCommands(self):", "def _i2c_read_words_from_cmd(\n self, command: List[int], delay: float, reply_size: int\n ) -> List[int]:\n with self._device:\n self._device.write(bytes(command))\n time.sleep(delay)\n if not reply_size:\n return None\n crc_result = bytearray(reply_size * (_SGP30_WORD_LEN + 1))\n self._device.readinto(crc_result)\n # print(\"\\tRaw Read: \", crc_result)\n result = []\n for i in range(reply_size):\n word = [crc_result[3 * i], crc_result[3 * i + 1]]\n crc = crc_result[3 * i + 2]\n if self._generate_crc(word) != crc:\n raise RuntimeError(\"CRC Error\")\n result.append(word[0] << 8 | word[1])\n # print(\"\\tOK Data: \", [hex(i) for i in result])\n return result", "def commands(server_object, client, address, command_args):\n\n\t#: Import inspect so that we can get the docstring.\n\timport inspect\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the commands, and what they do.\n\tfor command in server_object.client_command_list.keys():\n\n\t\tmsg += \"\\n/\" + command + \" - \"\n\n\t\t#: Get the docstring\n\t\tdocstring = inspect.getdoc(server_object.client_command_list[command][0])\n\n\t\t#: Ignore the portion containing the permission level.\n\t\tdocstring = docstring[:docstring.index(\"Permission_level\")]\n\n\t\tmsg += docstring.strip()\n\t\tmsg += \"\\n\"\n\n\tclient.send(msg.encode())", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def listen_commands(self):\n self._consumer_rabbit_connection = BlockingConnection(ConnectionParameters(self._rabbit_host))\n self._consumer_rabbit_channel = self._consumer_rabbit_connection.channel()\n\n # Listen buy/sell orders from external system\n self._listen_queue(QueueName.CMD_BUYSELL, self.on_cmd_buysell)\n self._listen_queue(QueueName.MSG_RAW, self.on_raw_msg)\n # self._logger.info(f\"Declaring rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.queue_declare(queue=QueueName.CMD_BUYSELL, durable=True, auto_delete=True)\n # self._logger.info(f\"Consiming to rabbit queue {QueueName.CMD_BUYSELL}\")\n # self._consumer_rabbit_channel.basic_consume(QueueName.CMD_BUYSELL, self.on_cmd_buysell,\n # consumer_tag=\"WebQuikBroker\")\n self._consumer_rabbit_channel.start_consuming()", "def receive_captured_list(self):\n reply = self.socket.recv(4096)\n print(\"Pokemon capturados\")\n print(reply[1:].decode())", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)", "def list_commands(self, ctx):\n commands = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n commands.append(filename[4:-3])\n commands.sort()\n return commands", "def main_list(args):\n return list_commands(args.directory)", "def run(self):\n print(self, end='')\n command = input()\n command_list = []\n while command != \"end\":\n command = command.strip()\n command_list.append(command)\n print(self, end='')\n command = input()\n return command_list", "def multi_command_stop(self):\n\n log.debug(\n 'Stopping multi-command message for device \"{0}\"'.format(self.name))\n\n if self.multi_command is None:\n raise ValueError('Multi-command message not started.')\n elif not self.multi_command:\n # No commands.\n return []\n\n commands = self.multi_command\n # This ensures that write and ask will not buffer the real message.\n self.multi_command = None\n\n # Only commands not starting with \"*\" get a \":\" prefix.\n commands = [cmd if cmd[0] == '*' else ':' + cmd for cmd in commands]\n message = ';'.join(commands)\n\n if self.responses_expected:\n result = self.ask(message)\n\n # FIXME: What if the response contains a meaningful \";\" somewhere?\n return result.split(';', self.responses_expected - 1)\n else:\n self.write(message)\n\n return []", "def _read_request_commands(self):\n request_string = self._read_socket()\n self._xml_logger.info(request_string.replace('\\r', '') + \"\\n\\n\")\n requests = RequestsParser.parse_request_commands(request_string)\n self._logger.debug(requests)\n return requests", "def list_commands(self, ctx: Context) -> List[str]:\n return self.docs_command.all_commands", "def recvCommand(self):\n return", "def _list(self, irc, msg, args):\n # TODO: write _list; use local.punny modules print/list if avail\n pass", "def do_list_commands(self):\n result = \"\\n\".join(self.commands.keys())\n return result, True", "def commands(self) -> typing.List[str]:\n return self._values.get(\"commands\")", "def _receive_multiple(self, what, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag_multiple(what)\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n ' --print --address ' + address +\n ' ' + tag_string\n )\n\n try:\n client = subprocess.Popen(cmd, shell=False,\n stdout=subprocess.PIPE)\n\n # client.communicate is blocking\n raw_out = client.communicate()\n # print(f'DEBUG enip _receive_multiple {raw_out}: ', raw_out)\n\n # value is stored as first tuple element\n # between a pair of square brackets\n values =[]\n raw_string = raw_out[0]\n split_string = raw_string.split(b\"\\n\")\n for word in split_string:\n values.append(word[(word.find(b'[') + 1):word.find(b']')])\n values.pop()\n return values\n\n except Exception as error:\n print('ERROR enip _receive_multiple: ', error)", "def load_command_list(filename=None):\n contents = None\n if filename:\n logger.debug('Attempting to read commands from \"{}\"'.format(filename))\n with open(filename, 'r') as fp:\n contents = fp.read().strip()\n\n if not contents:\n contents = ''\n\n # Split data as lines (ignore empty)\n return [l.strip().upper() for l in contents.split('\\n') if l.strip() != '']", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "async def list(self, ctx):\n message = '\\n'.join(sorted(self.etrigs['etrigs'].keys(), key=str.lower))\n message = '```http\\n{}\\n```'.format(message)\n await ctx.send(message)", "def get_commands(self):\r\n return self._commands", "def grab_command_list(index_url):\n r = requests.get(index_url)\n r.raise_for_status()\n soup = bs4.BeautifulSoup(r.text, 'lxml')\n main_list = soup.ul\n commands = []\n base_url = os.path.dirname(index_url)\n for item in main_list.children:\n if not isinstance(item, bs4.element.Tag):\n continue\n cmd_tag = item.find_all('a')[0]\n name = cmd_tag.string\n page = cmd_tag.attrs['href']\n c = CondorCmd(name, url=os.path.join(base_url, page))\n commands.append(c)\n return commands", "def list_commands(self) -> dict[str, str] | None:\n try:\n return cast(dict[str, str], self._client.list_commands(self._alias))\n except PyNUTError as err:\n _LOGGER.error(\"Error retrieving supported commands %s\", err)\n return None", "def send_and_parse(self, cmd):\n if not self.is_alive:\n # try reconnecting once if the connection is not alive\n self._connect()\n\n self.ssh.sendline(cmd)\n self.ssh.prompt()\n results = []\n data = self.ssh.before.decode('ascii').split('\\r\\n')\n for item in data[:-1]:\n if cmd.count(item.strip()) == 0:\n results.append(item)\n return results", "def list_commands(self, ctx): # noqa\n return self.commands.keys()", "def send_command(self, command):\n stdin, stdout, stderr = self.ssh_client.exec_command(command)\n return stdout.readlines()", "def parse_commands(self) -> list:\n\n command = self.path.split(\"?\")[1]\n commands = command.split(\"&\")\n\n return commands", "def commands(self, flags=DEFAULT_FLAGS):\n return self._commands(flags)", "def recv_input_locally(self):\n\n while self.commands.poll():\n command = self.commands.recv()\n self.local_commands.append(command)\n\n while self.audio_play.poll():\n frame = self.audio_play.recv()\n self.local_audio_play.append(frame)", "def _commands(self) -> Dict[str, List[str]]:\r\n pass", "def run_backlog_commands(self):\n if not hasattr(self, 'commands') or self.commands == '':\n print('{BLUE}No commands for {f_name}, skipping.{NOCOLOR}'.format(**colors, **self))\n else:\n self.mqtt.connect(self.mqtt_host)\n backlog_topic = '{c_topic}/backlog'.format(**self)\n # Join all command/payload pairs together with semicolons. If the\n # payload is a tasmota GPIO, use the value of the enumeration.\n backlog_payload = '; '.join(['{c} {p}'.format(c=i['command'], p=get_gpio(i['payload']) if 'GPIO' in i['payload'] else i['payload']) for i in self.commands]) + '; restart 1'\n print('{BLUE}Sending {topic} {payload}{NOCOLOR}'.format(topic=backlog_topic, payload=backlog_payload, **colors))\n self.mqtt.publish(backlog_topic, backlog_payload)\n self.mqtt.disconnect()", "def evecommands(self, irc, msg, args):\n desc = \"\\n\".join((\"EVESpai commands:\",\n \"{0} {1}\".format(ircutils.bold(\"'evecommands'\"), \"List available commands.\"),\n \"{0} {1}\".format(ircutils.bold(\"'pos [<system>]'\"), \"Lists all POSes.\"),\n \"{0} {1}\".format(ircutils.bold(\"'evetime'\"), \"Get current time on Tranquility.\"),\n \"{0} {1}\".format(ircutils.bold(\"'whereis <character>'\"), \"List the location and currently boarded ship of <character>.\"),\n \"{0} {1}\".format(ircutils.bold(\"'cache <calltype>'\"), \"List the cache time of given call type.\"),\n \"{0} {1}\".format(ircutils.bold(\"'whoat <system>'\"), \"List characters and their ships in <system>. If --all is given, ignore the max lines limitation.\"),\n \"{0} {1}\".format(ircutils.bold(\"'ship <shiptype>'\"), \"List characters in <shiptype>.\"),\n \"{0} {1}\".format(ircutils.bold(\"'chars <user>'\"), \"List all cha)racters belonging to <user>\"),\n \"{0} {1}\".format(ircutils.bold(\"'price [--location=(<solarsystem>|<region>)] <typeName>'\"), \"List buy/sell/volume of <type> in <location>, defaults to Jita.\"),\n \"{0} {1}\".format(ircutils.bold(\"'markets'\"), \"List all price indexed markets.\"),\n \"{0} {1}\".format(ircutils.bold(\"'player <character>'\"), \"List username of those who own *<character>*\")))\n\n for line in desc.splitlines():\n irc.reply(line.strip(), prefixNick=False)", "def multiple_messages(self, messages):\n for message in messages:\n cmd = '{}serverMessage \"{}\"'.format(self.console, Commands.aquote(message))\n self.write_command(cmd)", "def get_commands(self):\r\n return list(filter(None, self._commands.keys()))", "def run_cmd_list(commands, work_dir=None):\n if not isinstance(commands, list):\n raise TypeError(\"commands must be a list\")\n for command in commands:\n for line in run_cmd(command, work_dir):\n yield line", "def parse_commands(command_list: List[str]) -> List[str]:\n return [' '.join(x.split('-')) for x in command_list]", "def iterate(self):\n byte = self.sp.read()\n if not byte:\n return\n data = ord(byte)\n received_data = []\n handler = None\n if data < START_SYSEX:\n # These commands can have 'channel data' like a pin nummber appended.\n try:\n handler = self._command_handlers[data & 0xF0]\n except KeyError:\n return\n received_data.append(data & 0x0F)\n while len(received_data) < handler.bytes_needed:\n received_data.append(ord(self.sp.read()))\n elif data == START_SYSEX:\n data = ord(self.sp.read())\n handler = self._command_handlers.get(data)\n if not handler:\n return\n data = ord(self.sp.read())\n while data != END_SYSEX:\n received_data.append(data)\n data = ord(self.sp.read())\n else:\n try:\n handler = self._command_handlers[data]\n except KeyError:\n return\n while len(received_data) < handler.bytes_needed:\n received_data.append(ord(self.sp.read()))\n # Handle the data\n try:\n handler(*received_data)\n except ValueError:\n pass", "def run_ssh_cmd_list(host, commands, work_dir=None, username=None,\n key_filename=None):\n if not isinstance(commands, list):\n raise TypeError(\"commands must be a list\")\n\n ssh = ssh_conn.connect(host, username, key_filename)\n\n for command in commands:\n for line in run_ssh_cmd(host, command, work_dir, username,\n key_filename, ssh):\n yield line\n\n ssh.close()", "def commands(self, commands):\n\n self._commands = commands", "def run_commands(self):\n processes = []\n\n i = 0\n ## get list of commands\n commands = self.get_commands()\n cnum = multiprocessing.cpu_count()\n\n while len(commands)>0:\n while len(processes)<cnum-1:\n c = commands.pop()\n i+=1\n print \"command #\",i, c\n ## run commands\n processes.append((i,subprocess.Popen(c, shell=True)))\n\n for j,p in processes:\n if p.poll() is not None:\n print j, \" status: \", p.poll()\n processes.remove((j,p))\n break\n else:\n time.sleep(10)\n return", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "async def test_commands(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/command\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"command.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.commands()\n\n assert response\n assert isinstance(response, List)\n\n assert response[0]\n assert isinstance(response[0], models.CommandItem)", "def run_commands(self, commands):\n if isinstance(commands, str):\n commands = commands.split('\\n')\n for item in commands:\n if item.strip().startswith(\"#\"):\n continue\n should_exit, result = self.cmd_with_result(item)\n if not result:\n break", "def send_cmds(self, cmds):\r\n self.socket.sendall(cmds)", "def get_commands(self):\n return self._commands", "async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:\n num_repeats = kwargs[ATTR_NUM_REPEATS]\n\n for _ in range(num_repeats):\n for single_command in command:\n await self.coordinator.roku.remote(single_command)\n\n await self.coordinator.async_request_refresh()", "def get_commands():\n # Continuously prompt the user for commands, yielding them as we receive\n # them, and finishing when the \"q\"(uit) command is entered.\n while True:\n cmd = raw_input(\"Command: \").lower()\n if cmd == 'q':\n break\n yield cmd", "def commands(self):\n if 'Commands' in self:\n return self['Commands']\n\n text = self['Body']\n commands = []\n cmd = [\"\", \"\"]\n isCmd = False\n isArg = False\n isComment = False\n for i in range(len(text)):\n # don't parse the commented lines\n # ignore everyline starting with '>'\n if text[i] == '>':\n j = i-1\n while text[j] in (' ', '\\t'):\n j -= 1\n if text[j] == '\\n':\n isComment = True\n elif text[i] == '\\n':\n isComment = False\n if isComment:\n if isArg:\n cmd[1] += text[i]\n continue\n\n if text[i-1] != '\\\\' and text[i:i+2] == '._' and (isCmd or isArg):\n isArg = False\n commands.append(cmd)\n cmd = [\"\", \"\"]\n elif isCmd:\n if text[i] == ' ':\n isArg = True\n isCmd = False\n else:\n cmd[0] += text[i]\n elif isArg:\n if text[i:i+3] in ('\\\\._', '\\\\_.'):\n pass\n else:\n cmd[1] += text[i]\n elif text[i-1] != '\\\\' and text[i-1:i+1] == '_.':\n isCmd = True\n\n return commands", "async def _command_dispatcher(self):\n # sysex commands are assembled into this list for processing\n sysex = []\n\n while True:\n try:\n next_command_byte = await self.read()\n # if this is a SYSEX command, then assemble the entire\n # command process it\n if next_command_byte == PrivateConstants.START_SYSEX:\n while next_command_byte != PrivateConstants.END_SYSEX:\n await asyncio.sleep(self.sleep_tune)\n next_command_byte = await self.read()\n sysex.append(next_command_byte)\n await self.command_dictionary[sysex[0]](sysex)\n sysex = []\n await asyncio.sleep(self.sleep_tune)\n # if this is an analog message, process it.\n elif 0xE0 <= next_command_byte <= 0xEF:\n # analog message\n # assemble the entire analog message in command\n command = []\n # get the pin number for the message\n pin = next_command_byte & 0x0f\n command.append(pin)\n # get the next 2 bytes for the command\n command = await self._wait_for_data(command, 2)\n # process the analog message\n await self._analog_message(command)\n # handle the digital message\n elif 0x90 <= next_command_byte <= 0x9F:\n command = []\n pin = next_command_byte & 0x0f\n command.append(pin)\n command = await self._wait_for_data(command, 2)\n await self._digital_message(command)\n # handle all other messages by looking them up in the\n # command dictionary\n elif next_command_byte in self.command_dictionary:\n await self.command_dictionary[next_command_byte]()\n await asyncio.sleep(self.sleep_tune)\n else:\n # we need to yield back to the loop\n await asyncio.sleep(self.sleep_tune)\n continue\n except Exception as ex:\n # A error occurred while transmitting the Firmata message, message arrived invalid.\n if self.log_output:\n logging.exception(ex)\n else:\n print(ex)\n await self.shutdown()\n\n await self.serial_port.close()\n\n print(\"An exception occurred on the asyncio event loop while receiving data. Invalid message.\")\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.close()\n loop.stop()\n sys.exit(0)", "def gets(con,*keys):\n keyss=\" \".join(keys)\n # print(keyss)\n thecommand=\"get {keyss}\\r\\n\".format(keyss=keyss)\n con.send(thecommand.encode('utf-8'))\n # http://stackoverflow.com/questions/2716788/reading-http-server-push-streams-with-python\n # need a readline like\n # f = con.makefile(\"rb\") # converts a socket to a file\n response={}\n while 1:\n (akey,avalue)=parse_value(con)\n # print(\"got akey\",akey,avalue)\n if (akey==None):\n # f.close()\n break\n else:\n response[akey]=avalue\n return response" ]
[ "0.7666051", "0.7365854", "0.6984754", "0.69481695", "0.6923602", "0.6804654", "0.6704374", "0.6583639", "0.6574708", "0.6483976", "0.6443524", "0.6436", "0.64315933", "0.64236295", "0.6422955", "0.63806427", "0.63790524", "0.6377476", "0.63279116", "0.6277306", "0.62516063", "0.625115", "0.62159145", "0.6208946", "0.6208143", "0.6181727", "0.6179454", "0.61721987", "0.61287814", "0.61091584", "0.60995793", "0.6093464", "0.60817415", "0.6013025", "0.60109293", "0.5996816", "0.5992649", "0.599239", "0.59916246", "0.59915334", "0.5984932", "0.59323984", "0.59320146", "0.5926919", "0.59121704", "0.59075624", "0.59066224", "0.58972406", "0.5884115", "0.58840644", "0.5883168", "0.58664757", "0.58664757", "0.58664757", "0.58664757", "0.58545977", "0.5842515", "0.5837814", "0.58347267", "0.58316594", "0.58181185", "0.581752", "0.58125174", "0.5799989", "0.57965755", "0.5782433", "0.5769845", "0.5765243", "0.5755841", "0.57481635", "0.574814", "0.57446295", "0.57440263", "0.57358116", "0.5731842", "0.5729544", "0.5722464", "0.5718729", "0.571686", "0.57065606", "0.57065", "0.5705831", "0.5698649", "0.568506", "0.5677784", "0.5670659", "0.5665866", "0.5661408", "0.5655878", "0.56431645", "0.56347865", "0.56329316", "0.5618729", "0.56103975", "0.56000704", "0.5596714", "0.559104", "0.5588443", "0.5584888", "0.5581515" ]
0.8273917
0
Enter a controlled section.
def __init__(self, engine, qubits, ctrl_state=CtrlAll.One): self.engine = engine if isinstance(qubits, tuple): raise TypeError('Control qubits must be a list, not a tuple!') if isinstance(qubits, BasicQubit): qubits = [qubits] self._qubits = qubits self._state = canonical_ctrl_state(ctrl_state, len(self._qubits))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enter():\n pass", "def enter(self):\r\n self.turnOffLightboxes()\r\n self.installShortcutKeys()\r\n\r\n # Set parameter set node if absent\r\n self.selectParameterNode()\r\n self.editor.updateWidgetFromMRML()\r\n \r\n # If no segmentation node exists then create one so that the user does not have to create one manually\r\n if not self.editor.segmentationNodeID():\r\n newSegmentationNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLSegmentationNode')\r\n slicer.mrmlScene.AddNode(newSegmentationNode)\r\n self.editor.setSegmentationNode(newSegmentationNode)\r\n masterVolumeNodeID = self.getDefaultMasterVolumeNodeID()\r\n self.editor.setMasterVolumeNodeID(masterVolumeNodeID)", "def state_chosen_enter(cfg, app, win):", "def visit_section(self, node):\n self.section_level += 1\n self.body.append(self.starttag(node, \"section\"))", "def add_new_section(self, name, context=...):\n ...", "def _section_write(self, sname, stitle, selname):\n if not self.args.old_galaxy:\n this_sect = etree.SubElement(self.inputs, 'section', name=sname, title=stitle, expanded='False')\n when_yes = None\n else:\n this_sect = etree.SubElement(self.inputs, 'conditional', name=sname)\n this_sect_sel = etree.SubElement(this_sect, 'param', name=selname, type='select',\n label=stitle)\n opt_yes = etree.SubElement(this_sect_sel, 'option', value='yes')\n opt_yes.text = 'yes'\n opt_no = etree.SubElement(this_sect_sel, 'option', value='no', selected='true')\n opt_no.text = 'no'\n when_yes = etree.SubElement(this_sect, 'when', value='yes')\n return this_sect, when_yes", "def state_choose_enter(cfg, app, win):", "def enter(self, identifier):\n self.current.enter(identifier)", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def state_print_enter(cfg, app, win):", "def enter(self):\n self.indent += 1", "def enter_case(self, case):\n print \" \" + case.how_to_behave + '.'", "def on_mouse_enter(self, event):\n global controller\n controller = self\n if len(self.tasks) == 1:\n # for k, v in self.tasks.items():\n # self.task = v\n self.task = next(iter(self.tasks.values()))\n ht = self.task.helptext()\n self.set_help_text(ht)\n self.task.start(self.x, self.y)\n else:\n # show selection of available tasks\n self.set_help_text(None)", "def handle_panel_update(self, section_dict):", "def state_preview_enter(cfg, app, win):", "def section_start(self, header_str):\n # we should remove any newline character from the header\n header_str = header_str.replace('\\n|\\r', '')\n if self.active_section is None:\n self.active_section = Section(header_str, proc=self.proc)\n self.sections.append(self.active_section)\n else:\n self.active_section = self.active_section.subsection(header_str)", "def configure(self, section):", "def add_section(self, section_name: str) -> None:\n pass", "def add_section(self, section_name: str) -> None:\n pass", "def sectionSelect():\r\n self.MyInput = self.buttonValue.get() # gets the value of the radiobutton that the user selected.\r\n if self.MyInput == 1: # if it was the first radio button, prepare for part a\r\n \r\n self.textLabel1.grid(row=1, column = 0, sticky = E) # add and position the relevant widgets for this section of the simulation\r\n self.tickBox1.grid(row=1, column = 1, sticky = E)\r\n self.textLabel4.grid(row=2, column = 0, sticky = E)\r\n self.tickBox4.grid(row=2, column = 1, sticky = E)\r\n self.textLabel2.grid(row=1, column = 2, sticky = E)\r\n self.textInput2.grid(row=1, column = 3, sticky = E)\r\n self.textLabel3.grid(row=1, column = 4, sticky = E) \r\n self.textInput3.grid(row=1, column = 5, sticky = E)\r\n self.textLabel5.grid(row=2, column = 2, sticky = E) \r\n self.textInput5.grid(row=2, column = 3, sticky = E) \r\n self.textLabel6.grid(row=2, column = 4, sticky = E) \r\n self.textInput6.grid(row=2, column = 5, sticky = E)\r\n self.textLabel7.grid(row=3, column = 0, sticky = E) \r\n self.tickBox7.grid(row=3, column = 1, sticky = E)\r\n self.textLabel10.grid(row=4, column = 0, sticky = E) \r\n self.tickBox10.grid(row=4, column = 1, sticky = E)\r\n self.textLabel8.grid(row=3, column = 2, sticky = E) \r\n self.textInput8.grid(row=3, column = 3, sticky = E)\r\n self.textLabel9.grid(row=3, column = 4, sticky = E) \r\n self.textInput9.grid(row=3, column = 5, sticky = E) \r\n self.textLabel11.grid(row=5, column = 0, sticky = E) \r\n self.tickBox11.grid(row=5, column = 1, sticky = E)\r\n self.textLabel13.grid(row=6, column = 0, sticky = E) \r\n self.tickBox13.grid(row=6, column = 1, sticky = E)\r\n self.textInput12.grid(row=5, column = 2, sticky = W)\r\n self.textInput14.grid(row=6, column = 2, sticky = W)\r\n self.textLabel15.grid(row=7, column = 0, sticky = E)\r\n self.textInput15.grid(row=7, column = 1, sticky = E)\r\n \r\n self.textLabel16.grid_forget() #remove these widgets\r\n self.textLabel17.grid_forget()\r\n \r\n self.inputErrorText.set(\"\") #set these text labels to empty\r\n self.outputText.set(\"\")\r\n \r\n #here we add in suggested starting conditions for this section, the user is free to change them.\r\n self.tickBox1.select() #select polar coordinates radiobutton\r\n self.coordinate.set(0)\r\n coordinateSelect()\r\n self.textInput2Variable.set(\"400\")\r\n self.textInput3Variable.set(\"0\")\r\n self.tickBox10.select()\r\n self.circular.set(1)\r\n circularSelect()\r\n self.tickBox11.select()\r\n self.durationType.set(0)\r\n durationTypeSelect()\r\n self.textInput12Variable.set(\"3\")\r\n self.textInput15Variable.set(\"1\")\r\n \r\n elif self.MyInput == 2: # if it is the second radio button then prepare for part b\r\n selectionFrame1.grid_rowconfigure(1, weight = 1) # here we readjust the rows and columns so that we can centre the widgets\r\n selectionFrame1.grid_rowconfigure(2, weight = 1)\r\n selectionFrame1.grid_rowconfigure(3, weight = 1)\r\n selectionFrame1.grid_columnconfigure(0, weight = 1)\r\n selectionFrame1.grid_columnconfigure(1, weight = 1)\r\n \r\n self.textLabel16.grid(row=1, column = 0, sticky = E) # add these widgets\r\n self.textLabel17.grid(row=2, column = 0, sticky = E)\r\n self.textLabel15.grid(row=3, column = 0, sticky = E)\r\n self.textInput15.grid(row=3, column = 1, sticky = W)\r\n \r\n self.textInput14.grid_forget() # remove these widgets\r\n self.textInput12.grid_forget()\r\n self.tickBox13.grid_forget()\r\n self.textLabel13.grid_forget()\r\n self.tickBox11.grid_forget()\r\n self.textLabel11.grid_forget()\r\n self.textInput9.grid_forget()\r\n self.textLabel9.grid_forget()\r\n self.textInput8.grid_forget()\r\n self.textLabel8.grid_forget()\r\n self.tickBox10.grid_forget()\r\n self.textLabel10.grid_forget()\r\n self.tickBox7.grid_forget()\r\n self.textLabel7.grid_forget()\r\n self.textInput6.grid_forget()\r\n self.textLabel6.grid_forget()\r\n self.textInput5.grid_forget()\r\n self.textLabel5.grid_forget()\r\n self.textInput3.grid_forget()\r\n self.textLabel3.grid_forget()\r\n self.textInput2.grid_forget()\r\n self.textLabel2.grid_forget()\r\n self.tickBox4.grid_forget()\r\n self.textLabel4.grid_forget()\r\n self.tickBox1.grid_forget()\r\n self.textLabel1.grid_forget()\r\n self.textLabel15.grid_forget()\r\n self.textInput15.grid_forget()\r\n \r\n self.inputErrorText.set(\"\")\r\n self.outputText.set(\"\")\r\n \r\n elif self.MyInput == 3: # if part b - manual is selected \r\n self.textLabel1.grid(row=1, column = 0, sticky = E) #add these widgets\r\n self.tickBox1.grid(row=1, column = 1, sticky = E)\r\n self.textLabel4.grid(row=2, column = 0, sticky = E)\r\n self.tickBox4.grid(row=2, column = 1, sticky = E)\r\n self.textLabel2.grid(row=1, column = 2, sticky = E)\r\n self.textInput2.grid(row=1, column = 3, sticky = E)\r\n self.textLabel3.grid(row=1, column = 4, sticky = E) \r\n self.textInput3.grid(row=1, column = 5, sticky = E)\r\n self.textLabel5.grid(row=2, column = 2, sticky = E) \r\n self.textInput5.grid(row=2, column = 3, sticky = E) \r\n self.textLabel6.grid(row=2, column = 4, sticky = E) \r\n self.textInput6.grid(row=2, column = 5, sticky = E)\r\n self.textLabel8.grid(row=3, column = 0, sticky = E) \r\n self.textInput8.grid(row=3, column = 1, sticky = E)\r\n self.textLabel9.grid(row=3, column = 2, sticky = E) \r\n self.textInput9.grid(row=3, column = 3, sticky = E) \r\n self.textLabel13.grid(row=4, column = 0, sticky = E) \r\n self.textInput14.grid(row=4, column = 1, sticky = W)\r\n self.textLabel15.grid(row=5, column = 0, sticky = E)\r\n self.textInput15.grid(row=5, column = 1, sticky = E)\r\n \r\n self.textLabel16.grid_forget() #remove these widgets\r\n self.textLabel17.grid_forget()\r\n self.textLabel7.grid_forget()\r\n self.tickBox7.grid_forget()\r\n self.textLabel10.grid_forget()\r\n self.tickBox10.grid_forget()\r\n self.textLabel11.grid_forget()\r\n self.tickBox11.grid_forget()\r\n self.textInput12.grid_forget()\r\n self.tickBox13.grid_forget()\r\n \r\n self.inputErrorText.set(\"\")\r\n self.outputText.set(\"\")\r\n \r\n self.tickBox1.select() #add these suggested starting conditions\r\n self.coordinate.set(0)\r\n coordinateSelect()\r\n self.textInput2Variable.set(\"7000\")\r\n self.textInput3Variable.set(\"270\")\r\n self.circular.set(0)\r\n circularSelect()\r\n self.textInput8Variable.set(\"0\")\r\n self.textInput9Variable.set(\"7569.7\")\r\n self.durationType.set(1)\r\n durationTypeSelect()\r\n self.textInput14Variable.set(\"941760\")\r\n self.textInput15Variable.set(\"1\")\r\n self.textInput15Variable.set(\"50\")", "def enter(self):\n\t\tself._translate(True)\n\t\tinputCore.manager.emulateGesture(keyboardHandler.KeyboardInputGesture.fromName(\"enter\"))", "def state_processing_enter(cfg, app, win):", "def test_section_change(self):\n form = ComplaintActions(\n initial=self.initial_values,\n data={**self.initial_values, 'assigned_section': 'VOT'}\n )\n\n self.assertEqual(form.errors, {})\n self.assertCountEqual(form.get_actions(), [\n ('Assigned section:', 'Updated from \"ADM\" to \"VOT\"')\n ])", "def enter():\n from pynput.keyboard import Key, Controller\n kb = Controller()\n kb.press(Key.enter)\n kb.release(Key.enter)", "def do_section(parser, token, template='parts/section.html', end='endsection'):\n bits = token.split_contents()[1:]\n if len(bits) is 0:\n title, attrs = '', {}\n elif len(bits) is 1:\n title, attrs = bits[0], {}\n elif len(bits) % 2 is 0:\n raise template.TemplateSyntaxError(\"Your attributes don't match up: %s\" % ', '.join(bits[1:]))\n else:\n title = bits[0]\n attrs = dict(zip(bits[1::2], bits[2::2]))\n nodelist = parser.parse((end,))\n parser.delete_first_token()\n return SectionNode(template, title, attrs, nodelist)", "def _output_section_write(self):\n if not self.args.old_galaxy:\n self.output_sect = etree.SubElement(self.inputs, 'section', name='output_opt', title='Additional Output Parameters', expanded='False')\n else:\n self.output_sect = etree.SubElement(self.inputs, 'conditional', name='output_opt')\n self.output_sect_sel = etree.SubElement(self.output_sect, 'param', name='output_opt_sel', type='select',\n label='Additional output parameters?')\n self.opt_yes = etree.SubElement(self.output_sect_sel, 'option', value='yes')\n self.opt_yes.text = 'yes'\n self.opt_no = etree.SubElement(self.output_sect_sel, 'option', value='no', selected='true')\n self.opt_no.text = 'no'\n self.when_yes = etree.SubElement(self.output_sect, 'when', value='yes')", "def data_editor(self, section_num):\n section_num = int(section_num)\n if 1 <= section_num <= 11:\n a_tuple = self.__teaColumns[section_num-1]\n entered_data = self.tView.input_w_default(5, self.__teaholder[a_tuple[1]]) # 5 is a text prompt num\n newdata = self.data_verifier(section_num, entered_data)\n if newdata is not None:\n self.__teaholder[a_tuple[1]] = newdata\n self.__edited_sect = 1\n else:\n self.__edited_sect = 0", "def enterEvent(self, event):\n self.parent().parent().setHelpText(self.help_text)", "def _writeSection(self, sectionName, options):\n return True", "def render(self):\n self.env.render()\n #input(\"Press enter to take a step \")", "def _write_section_start(section_name, fobj):\n\n fobj.write(string.capwords(section_name, '_') + '\\n')", "def entered(self, mover):\n pass", "def add_section(self) -> None:\n\n if self.rows:\n self.rows[-1].end_section = True", "def load_sections():\n pass", "def on_enter(self):\n raise NotImplemented(\"on_enter method should be implemented.\")", "def section_end(self):\n if self.active_section:\n self.active_section = self.active_section.get_parent()\n else:\n print(\n \"Section end called without active section. This is caused \" +\n \"by multiple section_end() calls.\"\n )", "def section_reset(self):\n self.active_section = None", "def enter():\n input(\"\\nClick Enter to continue \")", "def add_section(self, section):\n if section.lower() == \"default\":\n raise ValueError, 'Invalid section name: %s' % section\n\n if section in self._sections:\n raise DuplicateSectionError(section)\n self._sections[section] = self._dict()", "def add_section(self, section: NetSection) -> None:\n if section.name in self.sections:\n self.logger.warning(\"Overriding section {}\".format(section.name))\n self.sections[section.name] = section", "def start_module():\n\n # your code\n title = \"\\nHuman Resources manager\\n\"\n list_option = ['Show Table', 'Add to table', 'Remove from Table via ID', \n 'Update record via ID', 'Get Oldest Person', 'Get closest to average']\n\n exit_message = \"Go back to the main menu\"\n while True:\n ui.print_menu(title, list_option, exit_message)\n inputs = ui.get_inputs([\"Please enter a number: \"], \"\")\n option = inputs[0]\n if option == \"1\":\n show_table(table)\n elif option == \"2\":\n add(table)\n elif option == \"3\":\n return_inputs = ui.get_inputs(['ID'],'Enter the key of the corresponding record you want removed.')\n remove(table,return_inputs[0])\n elif option == \"4\":\n return_inputs = ui.get_inputs(['ID'],'Enter the key of the corresponding record you want to update.')\n update(table,return_inputs[0])\n elif option == \"5\":\n print('\\n')\n get_oldest_person(table)\n elif option == \"6\":\n print('\\n')\n get_persons_closest_to_average(table)\n elif option == \"0\":\n break\n else:\n raise KeyError(\"There is no such option.\")", "def add_section(self, text: str) -> None:\n\n tag = r'''\\newpage\n \\section{%s}''' % (text)\n self.doc = self.doc + tag", "def enter(self, thing):\n self.in_thing.append(thing)\n return self\n # if thing == 'cave':\n # if self.finished_places == 5:\n # self.finished_places += 1", "def state_finish_enter(cfg, app, win):", "def import_control_section(self, filename_suffix='run'):\n pass", "def entry(self) -> None:\n\n assert self.shelfname\n\n self.handle_success('entered-task')\n\n self.load_shelf(self.shelfname, state=GuestState.READY)", "def enter(self):\n\t\t# Make sure parameter node exists and observed\n\t\tself.initializeParameterNode()\n\t\tself.active = True\n\t\tself.onPlanChange()", "def add_section(self, section, lyrics):\n self.sections[section] = lyrics", "def enter(self):\n for child in self.children:\n child.enter()\n self.enabled = True", "def enter(self):\n LOGGER.debug(\"State 'open' entered\")", "def test_enable_section_by_progress_linear_flow(self):\n\n self.launchSurvey(\"test_progress_value_source_section_enabled_no_hub\")\n\n self.assertInBody(\"Section 1 Question 1\")\n self.post({\"s1-b1-q1-a1\": 1})\n\n self.assertInBody(\"Section 1 Question 2\")\n self.post({\"s1-b2-q1-a1\": 1})\n\n self.assertInBody(\"Section 2 Question 1\")\n self.post({\"s2-b1-q1-a1\": 1})", "def set_section_data(self, section_name, value):\n section_name = JSONSchema.format_section_name(section_name)\n self._sections[section_name] = self._json_schema.check_section_value(section_name, value)\n return True", "def set_section(self, new_section, updating=False):\n SECTION_QUERY = \"\"\"UPDATE Section SET num_students = %s, comment1 = %s, comment2 = %s WHERE course_name = %s, semester = %s, section_id = %s, year = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Section (course_name, semester, section_id, num_students, comment1, comment2, year) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\"\"\n\n if updating:\n self.db_cursor.execute(SECTION_QUERY,\n (new_section.num_students, new_section.comment1, new_section.comment2, new_section.course_name, new_section.semester, new_section.section_id, new_section.year))\n else:\n self.db_cursor.execute(SECTION_QUERY,\n (new_section.course_name, new_section.semester, new_section.section_id,\n new_section.num_students, new_section.comment1, new_section.comment2, new_section.year))\n self.db_connection.commit()", "def state_capture_enter(cfg, app, win):", "def crfform_section(self, crfform_section):\n\n self._crfform_section = crfform_section", "def hit_enter():\n keyboard.press_and_release('Enter')", "def section_id(self, section_id):\n\n self._section_id = section_id", "def henhouseDisplayMenu () :\r\n print('1.Predict egg production')\r\n print('2.Display needs')\r\n print('0.Exit henhouse management')\r\n print()\r\n print('Please choose an option from the above menu')", "def __getitem__(self, section_id):", "def wrap_up_section(section_type, section, output_handle):\n if section_type != 'LKG':\n return\n read_1, read_2 = get_paired_reads(section)\n output_handle.write('\\t'.join([read_1, read_2]) + '\\n')", "def addSection(self, name, fontstyle=None):\n if self.isFirstSection:\n self.isFirstSection = False\n else:\n self.menu.addSeparator() # menu.add(JSeparator()) ???\n\n label = JLabel(name)\n label.setLocation(4, 4)\n if fontstyle is not None:\n label.font = fontstyle\n self.applyStyle(label)\n self.menu.add(label)", "def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def write_section(self, fhandle, sect):\n fhandle.write(\"[%s]\\n\" % sect)\n for opt in sorted(self.file_parser.options(sect)):\n fhandle.write('{0} = {1}\\n'.format(opt, self.file_parser.get(sect, opt)))", "def enter(self):\n # lets not immediately run the animation\n assert self.notify.debugStateCall(self)\n self.node.postFlatten()\n # for some reason phaseIvals must be created here, doesn't work in __init__\n self.createPhaseIntervals()\n AnimatedProp.AnimatedProp.enter(self)\n\n # make it look like the other props by forcing pose 0\n defaultAnim = self.node.getAnimControl('anim')\n numFrames = defaultAnim.getNumFrames()\n self.node.pose('phase0', 0)\n self.accept(\"%sZeroPhase\" % self.propString, self.handleNewPhase)\n self.accept(\"%sZeroIsRunning\" % self.propString, self.handleNewIsRunning)\n self.startIfNeeded()", "def _add_section(self, section, before_section=None):\n inserted_before_other = False\n\n if before_section is not None:\n if before_section in self._sections:\n # If before_section was already introduced, we simply need to\n # insert the new section on its position, which will put it\n # exactly behind before_section.\n idx = self._sections.index(before_section)\n self._sections.insert(idx, section)\n inserted_before_other = True\n else:\n # If before_section hasn't been introduced yet, we know we need\n # to insert it after section when it's finally added to the\n # menu. So, we preserve that info in the _after_sections dict.\n self._after_sections[before_section] = section\n\n # Append section to the list of sections because we assume\n # people build menus from top to bottom, i.e. they add its\n # upper sections first.\n self._sections.append(section)\n else:\n self._sections.append(section)\n\n # Check if section should be inserted after another one, according to\n # what we have in _after_sections.\n after_section = self._after_sections.pop(section, None)\n\n if after_section is not None:\n if not inserted_before_other:\n # Insert section to the right of after_section, if it was not\n # inserted before another one.\n if section in self._sections:\n self._sections.remove(section)\n\n index = self._sections.index(after_section)\n self._sections.insert(index + 1, section)\n else:\n # If section was already inserted before another one, then we\n # need to move after_section to its left instead.\n if after_section in self._sections:\n self._sections.remove(after_section)\n\n idx = self._sections.index(section)\n idx = idx if (idx == 0) else (idx - 1)\n self._sections.insert(idx, after_section)", "def print_section(self, s):\n section = s.upper()\n\n self.print_newline()\n self.print_newline()\n self._write('%s\\n' % section)\n self._write('%s\\n' % ('-' * len(section)))\n self.print_newline()", "def on_key_press(self, key: str):\n if key == \"down\":\n self.selection_index += 1\n self.selection_index %= len(self.OPTIONS)\n self.draw()\n elif key == \"up\":\n self.selection_index -= 1\n self.selection_index %= len(self.OPTIONS)\n self.draw()\n elif key == \"enter\":\n self.OPTIONS[self.selection_index][1]()\n self.draw()", "def enter_moves(self, event):\n self._solution = self.input_move.get()", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def start(self):\n op = self.menu()\n self.opcoes(op)\n if op != \"q\" and op != \"w\":\n self.start()", "def state_wait_enter(cfg, app, win):", "def test_section_save(self):\n res = self.client().post('/api/v1/sections/', headers={'Content-Type': 'application/json'},\n data=json.dumps(self.section))\n self.assertEqual(res.status_code, 201)", "def setup_unrelated_section(enrollment_open=True):\n call_command(\"createtestuser\")\n\n # set up section\n mentor_user = User.objects.create(\n username=\"testmentor\",\n first_name=\"Test\",\n last_name=\"Mentor\",\n email=\"testmentor@berkeley.edu\",\n )\n if enrollment_open:\n cs61a = Course.objects.create(\n name=\"CS61A\",\n title=\"Structure and Interpretation of Computer Programs\",\n permitted_absences=2,\n enrollment_start=now_minus(30),\n section_start=now_minus(15),\n enrollment_end=now_plus(15),\n valid_until=now_plus(30),\n is_restricted=True, # restricted course\n )\n else:\n cs61a = Course.objects.create(\n name=\"CS61A\",\n title=\"Structure and Interpretation of Computer Programs\",\n permitted_absences=2,\n enrollment_start=now_plus(16),\n section_start=now_plus(30),\n enrollment_end=now_plus(45),\n valid_until=now_plus(60),\n is_restricted=True, # restricted course\n )\n mentor = Mentor.objects.create(user=mentor_user, course=cs61a)\n\n section = Section.objects.create(\n mentor=mentor, capacity=5, description=\"test section\"\n )\n Spacetime.objects.create(\n section=section,\n day_of_week=\"Monday\",\n start_time=\"11:00:00\",\n duration=\"01:00:00\",\n location=\"Cory 400\",\n )\n Spacetime.objects.create(\n section=section,\n day_of_week=\"Tuesday\",\n start_time=\"14:00:00\",\n duration=\"01:00:00\",\n location=\"Soda 380\",\n )", "def on_pre_enter(self):\n Logger.info('Application: Changed to the Settings screen.')", "def add_section(self, section):\n if self.has_section(section):\n raise DuplicateSectionError(section)\n self._dict[section] = {}", "def enter_press_log_show(self, event): # makes it so you can use enter instead of having to press the button\r\n if event.keycode == 13:\r\n self.show_game(self.game_number.get())", "def put(self, cmd_names, section, key, value, env=DEFAULT_ENV):\n\n if not self.document:\n self._read()\n # Empty document prepare the initial structure.\n self.document.update({env: {self._to_key(cmd_names): {section: {key: value}}}})\n # Only update appropriate key value pairs within a section\n self.document[env][self._to_key(cmd_names)][section].update({key: value})", "def enter(self, t):\n super(Agent, self).enter(t)\n\n if self._module is not None:\n self._module.enter(t)", "def house_entry():\n choices = {\"\\033[1;32mLeave\\033[0m house\" : [\"leave\"], \"\\033[1;32mEnter\\033[0m living room\" : [\"enter\", \"living\"], \"\\033[1;32mLook\\033[0m in \\033[1;32mmirror\\033[0m\" : [\"look\", \"mirror\"]}\n print_pause(\"\\nYou're standing in the entryway of the farmhouse.\")\n print_pause(\"The rug in here is somewhat threadbare but clean, and\"\n \" really ties the room together.\")\n print_pause(\"A mirror hangs on the wall to the right\")\n action = valid_input(choices)\n if action == \"Leave house\":\n print_pause(\"You open the front door and exit the house.\")\n farmhouse()\n elif action == \"Enter living room\":\n print_pause(\"Pausing only to admire the rug one more time (it\"\n \" really does tie the room together) you\"\n \" walk into the living room.\")\n house_livingroom()\n elif action == \"Look in mirror\":\n if \"look in mirror\" in actions:\n print_pause(\"Look - there are more important things to do today\"\n \" than to stand around looking at yourself in\"\n \" the mirror all day.\")\n house_entry()\n else:\n print_pause(\"This mirror is clearly an antique, with beautiful \"\n \"hand-carved inlays and real gold leaf.\")\n print_pause(\"From what you can see, it might be time to schedule\"\n \" a haircut appointment.\")\n actions.append(\"look in mirror\")\n house_entry()", "def testInterpretingSection(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertEqual(4, pl.stdin)\n\n repl.runCommandLine('_')\n self.assertEqual(4, pl.stdin)\n\n repl.runCommandLine('[3, 6, 9]')\n self.assertEqual([3, 6, 9], pl.stdin)\n\n repl.runCommandLine(\"print('hello')\")\n self.assertEqual('hello', pl.stdin)\n\n repl.runCommandLine('echo hello too')\n self.assertEqual(['hello too'], pl.stdin)\n\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def start_module():\n\n table = data_manager.get_table_from_file(\"accounting/items.csv\")\n options = [\"Display a table\",\n \"Add record to table\",\n \"Remove record from table\",\n \"Update record\",\n \"Which year max\",\n \"Average amount per year\"]\n\n while True:\n ui.print_menu(\"Accounting menu\", options, \"Main menu\")\n option = ui.get_inputs([\"\"], \"Please enter a number: \")\n if option[0] == \"1\":\n show_table(table)\n elif option[0] == \"2\":\n table = add(table)\n elif option[0] == \"3\":\n id_ = ui.get_inputs([\"ID: \"], \"Please type ID to remove\")\n table = remove(table, id_)\n elif option[0] == \"4\":\n id_ = ui.get_inputs([\"ID: \"], \"Please type ID to remove\")\n table = update(table, id_)\n elif option[0] == \"5\":\n ui.print_result(which_year_max(table))\n elif option[0] == \"6\":\n year = ui.get_inputs([\"Year: \"], \"Please enter year: \")\n ui.print_result(avg_amount(table, year[0]), \"Averege amount is: \")\n elif option[0] == \"0\":\n break\n else:\n ui.print_error_message(\"There is no such option.\")", "def begin_set_key(self):\n\t\tself.setting_key = True\n\t\tself.player.current_level.screen_manager.set_controls(ControlSettingControls(self))", "def start(self):\n self.menu()", "def house_livingroom():\n choices = {\"Go to the \\033[1;32mkitchen\\033[0m\" : [\"kitchen\"], \"\\033[1;32mReturn\\033[0m to \\033[1;32mentryway\\033[0m\" : [\"return\", \"entryway\", \"entry\"], \"\\033[1;32mSit\\033[0m on the \\033[1;32mcouch\\033[0m\" : [\"sit\", \"couch\"],\n \"\\033[1;32mRead magazines\\033[0m\" : [\"read\", \"magazines\", \"read magazines\"], \"\\033[1;32mKnock\\033[0m on the \\033[1;32mbedroom\\033[0m door\" : [\"knock\", \"bedroom\"]}\n print_pause(\"\\nYou are standing in the living room of the farmhouse.\")\n print_pause(\"A comfy-looking couch sits next to a coffee table covered in\"\n \" fashion magazines.\")\n print_pause(\"To the left is what appears to be a small kitchen.\")\n print_pause(\"Directly ahead is a closed door.\")\n if \"make coffee\" not in actions:\n print_pause(\"From the loud snoring coming from it, this is\"\n \" probably the bedroom.\")\n if \"vase\" not in inventory and \"flowers in vase\" not in inventory:\n print_pause(\"An interesting-looking vase sits on a bureau against the\"\n \" right wall.\")\n choices.update({\"Pick up the \\033[1;32mvase\\033[0m\" : [\"vase\"]})\n if \"tray\" not in inventory:\n print_pause(\"A tea tray sits on the coffee table.\")\n choices.update({\"Pick up the \\033[1;32mtray\\033[0m\" : [\"tray\", \"tea tray\"]})\n action = valid_input(choices)\n if action == \"Go to the kitchen\":\n house_kitchen()\n elif action == \"Return to entryway\":\n house_entry()\n elif action == \"Sit on the couch\":\n print_pause(\"Well. Isn't this nice.\")\n print_pause(\"After awhile you get the vague sense that you're supposed\"\n \" to be doing something, so you stand up.\")\n house_livingroom()\n elif action == \"Pick up the tray\":\n print_pause(\"You pick up the tea tray. It's got a rather nice\"\n \" chrysanthemum inlay on it.\")\n inventory.append(\"tray\")\n elif action == \"Pick up the vase\":\n print_pause(\"You pick up the vase, which appears to be a rather cheap\"\n \" Ming dynasty reproduction.\")\n inventory.append(\"vase\")\n elif action == \"Read magazines\":\n if \"read magazines\" not in actions:\n print_pause(\"After several minutes of careful study you now\"\n \" know the secret to perfect lipstick, \"\n \" Beyonce's 15 top hair tips, and how to drive\"\n \" your man wild.\")\n print_pause(\"Congratulations.\")\n actions.append(\"read magazines\")\n else:\n print_pause(\"You've already read these magazines and learned all\"\n \" of its eldritch fashion tips.\")\n elif action == \"Knock on the bedroom door\":\n if \"make coffee\" not in actions:\n print_pause(\"You hammer on the bedroom door.\")\n print_pause(\"The only effect this seems to have in an increase in\"\n \" the volume and frequency of the snoring.\")\n print_pause(\"Some people just can't wake up without their\"\n \" morning coffee.\")\n elif \"flowers in vase\" in inventory and \"cup of coffee\" in inventory:\n house_bedroom(monster)\n elif \"flowers\" in inventory:\n if \"vase\" not in inventory:\n print_pause(\"You can't just give someone a handful\"\n \" of flowers.\")\n print_pause(\"Maybe arrange them in a nice vase with some water\"\n \" in it.\")\n else:\n print_pause(\"Those flowers will never survive without\"\n \" some water. Maybe you can get some from the\"\n \" kitchen.\")\n else:\n print_pause(\"You should probably come bearing gifts.\")\n print_pause(\"Flowers are always a nice present.\")\n print_pause(\"Maybe check the surrounding forest for\"\n \" some wildflowers.\")\n house_livingroom()", "def enter(self):\n self.num_enters += 1", "def on_acercade_activate(self, widget):\n try:\n variables.venacercade.show()\n except:\n print('error abrira acerca de')", "async def __aenter__(self):\n if not self._active:\n await self._setup()\n self._active = True\n self._entered += 1\n return self", "def start_module():\n\n exit_message = \"Back to main menu.\"\n\n list_options = [\"Show inventory list.\",\n \"Add new item.\",\n \"Remove record by id\",\n \"Update info about item.\",\n \"Get list of available items\",\n \"Get average durability by manufacturers\"]\n\n display_menu = True\n while display_menu is True:\n table = create_table_from_file()\n ui.print_menu(\"Inventory\", list_options, exit_message)\n user_choice = ui.get_inputs([\"Menu number: \"], \"Select action by menu number\")\n\n if user_choice[0] == \"1\":\n show_table(table)\n\n elif user_choice[0] == \"2\":\n table = add(table)\n write_to_file(table)\n\n elif user_choice[0] == \"3\":\n show_table(table)\n id_ = ui.get_inputs([\"Id: \"], \"Type id of record to remove\")\n table = remove(table, id_)\n write_to_file(table)\n\n elif user_choice[0] == \"0\":\n display_menu = False\n\n if len(table) > 0:\n\n if user_choice[0] == \"4\":\n id_ = ui.get_inputs([\"Id: \"], \"Type id of record to change\")\n table = update(table, id_)\n write_to_file(table)\n show_table(table)\n\n elif user_choice[0] == \"5\":\n list_of_items = get_available_items(table)\n ui.print_result(list_of_items, \"List of available items\")\n\n elif user_choice[0] == \"6\":\n average_durability = get_average_durability_by_manufacturers(table)\n ui.print_result(average_durability, \"Get average durability by manufacturers dctionary\")", "def start_module():\n\n \n menu_for_store = [\"Show Table\",\n \"Add\",\n \"Remove\",\n \"Update\",\n \"Item by Durability Time\",\n \"Average Durability Time by Manufacturer\",\n \"Back to main menu\"]\n \n ui.print_menu(\"Store Menu\", menu_for_store, \"Exit program\")\n \n inputs = ui.get_inputs([\"Please enter a number: \"], \"\")\n option = inputs[0]\n if option == \"1\":\n show_table(data_manager.get_table_from_file(\"inventory/inventory.csv\"))\n elif option == \"2\":\n add(data_manager.get_table_from_file(\"inventory/inventory.csv\"))\n elif option == \"3\":\n id_ = ui.get_inputs([\"Enter the ID to be Removed: \"], \"\")\n remove(data_manager.get_table_from_file(\"inventory/inventory.csv\"), id_)\n elif option == \"4\":\n id_ = ui.get_inputs([\"Enter the ID to be Updated: \"], \"\")\n update(data_manager.get_table_from_file(\"inventory/inventory.csv\"), id_)\n elif option == \"5\":\n year = ui.get_inputs([\"Enter the year of Durability: \"], \"Years of Durability\")\n result = get_available_items(data_manager.get_table_from_file(\"inventory/inventory.csv\"),year[0])\n ui.print_result(result,\"Items not exceeding \" + year[0] + \" years of durability\" + \"\\n\")\n ui.get_inputs([\"(0) Main Menu: \"],\"\")\n elif option == \"6\":\n label = \"Average Durability By Manufacturers: \" + \"\\n\"\n ui.print_result(get_average_durability_by_manufacturers(data_manager.get_table_from_file(\"inventory/inventory.csv\")),label)\n ui.get_inputs([\"(0) Main Menu: \"],\"\")", "def on_enter(event_data):\n pocs = event_data.model\n pocs.next_state = 'parking'\n\n # Make sure it's safe, dark and light enough for flats\n is_safe = pocs.wait_for_twilight()\n if not is_safe:\n return\n\n if pocs.observatory.flat_fields_required:\n pocs.observatory.take_flat_fields()\n else:\n pocs.logger.debug('Skipping twilight flat fields.')\n\n # Check if we should keep taking flats\n if pocs.observatory.is_twilight and pocs.repeat_flats:\n pocs.logger.info(\"Taking another round of twilight flat fields\")\n pocs.next_state = \"twilight_flat_fielding\"\n\n # Check if the Sun is coming up and we need to park\n elif pocs.observatory.is_past_midnight:\n pocs.next_state = 'parking'\n\n # Check if we need to focus\n else:\n pocs.next_state = 'coarse_focusing'", "def run_enter(self, expanded, unexpanded) :\n\t\tif not expanded :\n\t\t\treturn self.errormessage('Needs a version id as an argument')\n\t\tvexist = self.toObject(self.__context, expanded[0])\n\t\tif not vexist :\n\t\t\treturn self.errormessage(\"Version <em><b>%s</b></em> doesn't exist\" % expanded[0])\n\t\telse :\n\t\t\tif not self.HasPerms(vexist, 'Join/leave Versions') :\n\t\t\t\treturn -1\n\t\t\tvexist.enter(self.__context.REQUEST, self.__context.REQUEST.RESPONSE)\n\n\t\t\t# get_transaction().commit(1) doesn't seem to do it !\n\n\t\t\t# don't be fucked by Zope's automatic redirection\n\t\t\tself.__context.REQUEST.RESPONSE.setStatus(200)\n\t\t\tself.htmlmessage(\"You'll be working in version %s at the end of the current transaction\" % self.ObjectPath(vexist))", "def display_return_car(self):\n self.is_return = True\n self.login_menu()", "def section(self, section_name):\n section = site_sections.get(section_name)\n if not section:\n if site_sections.get_section_names() == []:\n raise TypeError('class site_sections is not set up. Call autodoscover first.')\n section = site_sections.get(section_name)\n if not section:\n raise TypeError('Could not find section \\'{0}\\' in site_sections. You need to define a section class for this name in section.py.'.format(section_name))\n self._section = section()", "def get_section_number() -> int:\n section_num = input('Enter a section number (1 - 4): ')\n while not (section_num.isdigit() and wf.is_valid_section(int(section_num))):\n print('Invalid section number!')\n section_num = input('Enter a section number (1 - 4): ')\n return int(section_num)", "def enable(self):\n self.SetInteractive(1)", "def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0", "def _create_section(self, parent, sectionid, title=None, term=None):\n\n idb = nodes.make_id(sectionid)\n section = nodes.section(ids=[idb])\n parent.append(section)\n\n if term:\n if term != '**':\n section.append(nodes.term('', term))\n\n definition = nodes.definition()\n section.append(definition)\n\n return definition\n\n if title:\n section.append(nodes.title('', title))\n\n return section", "def edit_modal_section(request, subject_pk, item_pk, section_pk):\n subject = get_object_or_404(Subject, pk=subject_pk)\n item = get_object_or_404(Item, pk=item_pk)\n updateSection = False\n try:\n section = get_object_or_404(ModalSection, pk=section_pk)\n updateSection = True\n except:\n section = []\n\n print(request.POST)\n\n if request.method == \"POST\":\n if updateSection:\n section.title = request.POST.get('title')\n section.information = request.POST.get('information')\n if request.POST.get('collapse'):\n section.collapse = True\n else:\n section.collapse = False\n section.save()\n else:\n information = informationForm(request.POST)\n if information.is_valid():\n information_form = information.save(commit=False)\n\n information_form.save()\n item.modal.add(information_form)\n\n return redirect('modal', subject.id, item.id)\n\n\n context = {\n 'informationForm': informationForm,\n 'subject': subject,\n 'section': section,\n 'item': item,\n }\n\n return render(request, 'modal/edit_modal_section.html', context)", "def menu(self):\n ## This is a DICTIONARY, it's a list with custom index values. Python is cool.\n # Please feel free to change the menu and add options.\n print(\"\\n *** MENU ***\") \n menu = {\"c\": (\"Calibrate\", self.calibrate),\n \"d\": (\"Dance\", self.dance),\n \"h\": (\"Hold position\", self.hold_position),\n \"n\": (\"Navigate\", self.nav),\n \"o\": (\"Obstacle count\", self.obstacle_count),\n \"q\": (\"Quit\", self.quit),\n \"v\": (\"Veer\", self.slither)\n }\n # loop and print the menu...\n for key in sorted(menu.keys()):\n print(key + \":\" + menu[key][0])\n # store the user's answer\n ans = str.lower(input(\"Your selection: \"))\n # activate the item selected\n menu.get(ans, [None, self.quit])[1]()", "def start_edit(self):\n txt = self.model.get_current_line()\n self._line.original_widget = self._line_edit\n self._line_edit.set_edit_text(txt)\n self._line_edit.set_edit_pos(len(txt))\n self._top.set_focus(2)" ]
[ "0.63331175", "0.6145211", "0.6034579", "0.5901067", "0.5828898", "0.5818313", "0.578652", "0.57040715", "0.5648721", "0.56081504", "0.54907554", "0.54647225", "0.5463961", "0.5425503", "0.5409415", "0.5407325", "0.5397818", "0.5397088", "0.5397088", "0.5394315", "0.53922415", "0.5387689", "0.5365979", "0.5352392", "0.5322396", "0.5286742", "0.5283657", "0.5280768", "0.52491933", "0.52208847", "0.52158886", "0.52109647", "0.5208305", "0.5201707", "0.5188459", "0.5154146", "0.5146242", "0.51295465", "0.508117", "0.50763303", "0.506567", "0.5061749", "0.50145185", "0.5005177", "0.49958855", "0.49952683", "0.49934527", "0.49915326", "0.4990997", "0.4978657", "0.49708793", "0.49588656", "0.49568543", "0.49543476", "0.4944216", "0.49425778", "0.49417195", "0.49284318", "0.491503", "0.49104357", "0.4908044", "0.49019918", "0.48912385", "0.48880848", "0.48864508", "0.48813003", "0.48773226", "0.48756886", "0.48706946", "0.4866581", "0.48576483", "0.48533306", "0.4850819", "0.48434773", "0.4833708", "0.48280606", "0.482299", "0.48125127", "0.48123544", "0.4809123", "0.48069847", "0.4805196", "0.48050213", "0.47969222", "0.47936338", "0.47899354", "0.47898772", "0.47856802", "0.47837016", "0.4782411", "0.47811672", "0.47648072", "0.4764618", "0.47635284", "0.4756336", "0.47563136", "0.47556663", "0.4754604", "0.4753526", "0.474965", "0.47493514" ]
0.0
-1
Context manager enter function.
def __enter__(self): if len(self._qubits) > 0: engine = ControlEngine(self._qubits, self._state) insert_engine(self.engine, engine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __enter__(self):\n self._logger.debug(\"__enter__()\")\n self.install(\"PRE\")", "def enter():\n pass", "def main_thread_enter(self):\n ...", "def enter_context(self, cm):\n # We look up the special methods on the type to match the with\n # statement\n _cm_type = type(cm)\n _exit = _cm_type.__exit__\n result = _cm_type.__enter__(cm)\n self._push_cm_exit(cm, _exit)\n return result", "def run401_02():\n\n class Context:\n def __init__(self):\n print('__init__()')\n\n def __enter__(self):\n print('__enter__()')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # print(exc_type, exc_val, exc_tb)\n print('__exit__()')\n\n with Context():\n print('do something')", "def enter(self, t):\n super(Agent, self).enter(t)\n\n if self._module is not None:\n self._module.enter(t)", "def enter(self, identifier):\n self.current.enter(identifier)", "def activate(cls, ctx):\r\n if hasattr(ctx, '_on_context_exit'):\r\n raise cls.ContextError('Context actions registered outside this parse context arg active')\r\n\r\n try:\r\n cls._active.append(ctx)\r\n ctx._on_context_exit = []\r\n yield\r\n finally:\r\n for func, args, kwargs in ctx._on_context_exit:\r\n func(*args, **kwargs)\r\n del ctx._on_context_exit\r\n cls._active.pop()", "def context_started(self, cls, example):", "def __enter__(self):\n self.manual_enter()\n return self", "def enter(self):\n log.debug(\"Entering context creator for GetDoer\")\n if not self.node.isRunning():\n conf = configs.get(self.port)\n self.node = get_node(self.port, **conf)", "def __enter__(self):\n self.__within_context = True\n if not self.__initialized:\n self.__initialization__()\n return self", "def on_enter(self, userdata):\n pass", "def in_context(self):\n pass", "def enter(self):\n log.debug(f\"Entering context creator for PutDoer; node running {self.node.isRunning()}.\")\n if not self.node.isRunning():\n conf = configs.get(self.port)\n self.node = get_node(self.port, **conf)", "def __enter__(self):\n self.start()\n return self", "def __enter__(self):\n self.start()\n return self", "def state_processing_enter(cfg, app, win):", "def on_enter(self):\n raise NotImplemented(\"on_enter method should be implemented.\")", "def __enter__(self):\n print(self.msg)\n self.start = self()\n return self", "def __enter__(self):\r\n pass", "def enterScope(self, name):", "def __enter__(self):\n stdout('Starting {}{}'.format(self.name, ' on device {}'.format(self.device) if self.device else ''))\n stdout('')\n self.timer_start('script')\n\n if self.device:\n self._device_ctx.__enter__()\n\n return self", "async def __aenter__(self):\n if not self._active:\n await self._setup()\n self._active = True\n self._entered += 1\n return self", "def __enter__(self):\n pass", "def __enter__(self):\n pass", "def switch_context(self, context):\r\n self.context_stack.append(self.current_context)\r\n self.current_context = context", "def enter_request_context(self, request):\r\n context = {\r\n 'session': self.get_session_key(request),\r\n 'user_id': self.get_user_primary_key(request),\r\n 'username': self.get_username(request),\r\n }\r\n for header_name, context_key in META_KEY_TO_CONTEXT_KEY.iteritems():\r\n context[context_key] = request.META.get(header_name, '')\r\n\r\n context.update(contexts.course_context_from_url(request.build_absolute_uri()))\r\n\r\n tracker.get_tracker().enter_context(\r\n CONTEXT_NAME,\r\n context\r\n )", "def state_capture_enter(cfg, app, win):", "def enter_global_profile():\n # We set a global variable because otherwise the context object will be garbage\n # collected which will call __exit__ as soon as this function scope ends.\n global GLOBAL_PROFILE_CM\n\n if GLOBAL_PROFILE_CM:\n return # A global context already has been entered\n\n name = prefect.settings.get_active_profile(name_only=True)\n GLOBAL_PROFILE_CM = profile(name=name, initialize=False)\n GLOBAL_PROFILE_CM.__enter__()", "def __enter__(self, *args, **kwargs):\n for _, impl in self._client_impls.items():\n if hasattr(impl, '__enter__'):\n impl.__enter__(*args, **kwargs)\n # mark the current KeyVaultClient as _entered so that client implementations instantiated\n # subsequently will also have __enter__ called on them as appropriate\n self._entered = True\n return self", "def __enter__(self) -> None:\n raise NotImplementedError()", "def start(self) -> None:\n self.__enter__()", "def push_context(self):\n raise NotImplementedError()", "def _set_context(self, ctx):\n try:\n current_engine_name = self.parent.engine.name \n if sgtk.platform.current_engine(): \n sgtk.platform.current_engine().destroy()\n sgtk.platform.start_engine(current_engine_name, ctx.tank, ctx)\n except Exception, e:\n QtGui.QMessageBox.critical(self, \n \"Could not Switch!\", \n \"Could not change work area and start a new \" \n \"engine. This can be because the task doesn't \"\n \"have a step. Details: %s\" % e)\n return", "def on_start(self, ctx):\n pass", "def on_pre_enter(self):\n self.setup()\n self.start()", "def __enter__(self):\n self.run()\n return self", "def __enter__(self,*args,**kwargs):\n return self", "def __enter__(self):\n self.boot()\n self.process_resources()\n self.run()\n return self", "def __enter__(self):\n self.login()\n return self", "def __enter__(self):\n\n # Create a new context and add it to the stack\n try:\n self._contexts.append(HistoryManager())\n except AttributeError:\n self._contexts = [HistoryManager()]\n\n return self", "def gen_function_enter(self, args): # pragma: no cover\n raise NotImplementedError(\"Implement me!\")", "def _handler_direct_access_enter(self, *args, **kwargs):\n # Tell driver superclass to send a state change event.\n # Superclass will query the state. \n self._driver_event(DriverAsyncEvent.STATE_CHANGE)\n \n self._sent_cmds = []", "def __enter__(self):\n raise NotImplementedError", "def _handler_direct_access_enter(self, *args, **kwargs):\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)\n\n self._sent_cmds = []", "def _handler_direct_access_enter(self, *args, **kwargs):\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)\n\n self._sent_cmds = []", "def __enter__(self):\n\t\tself.push()", "def enter_state(self):\r\n self._logger_.enter_state()\r\n return", "def __enter__(self):\n\t\treturn self", "def state_print_enter(cfg, app, win):", "def __enter__(self):\n self.push()", "def state_failsafe_enter(cfg, app, win):", "def push_context(self, ctx):\n self._tpl_context = ctx", "def context(self) -> CONTEXT:", "async def __aenter__(self):\n await self.start()", "def handle_context_missing(self):", "def mouse_enter(self):\n pass", "def beginScope():", "def cooked_mode(self) -> ContextManager[None]:", "def enter(self):\n\t\t# Make sure parameter node exists and observed\n\t\tself.initializeParameterNode()\n\t\tself.active = True\n\t\tself.onPlanChange()", "def enter_loop():\n\n # Save whether we are currently in a loop or not.\n global in_loop\n in_loop_stack.append(in_loop)\n\n # We are now processing a loop body.\n in_loop = True", "def __enter__(self) -> None:\n self.log = super().__enter__()", "def __enter__(self):\n self.new_session()\n return self", "def enter(self):\n LOGGER.debug(\"State 'open' entered\")", "def ctx():\n return None", "def state_chosen_enter(cfg, app, win):", "def _enter_function(self, function_name=None):\n self._comparison_counter = 0\n self._return_label_counter = 0\n self._current_function_name = function_name if function_name else self._vm_file_name", "def on_pre_enter(self):\n Logger.info('Application: Changed to the Return screen.')", "def enter(self, env):\n env = self._find_env(env, new=True)\n env.add_agents(self)", "def init_with_context(self, context):\n return super(MainMenu, self).init_with_context(context)", "def _start(self):", "def activate(self):\n pass", "def __enter__(self):\r\n return self", "def _handler_command_enter(self, *args, **kwargs):\n # Command device to update parameters and send a config change event.\n self._update_params()\n\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)", "def ev_windowenter(self, event: WindowEvent) -> None:", "def _context(name, func):\n\tpush_aspect(name, func)\n\tyield\n\tpop_aspect(name)", "def __enter__(self):\n self.initialize()\n return self", "def __enter__(self): # suppress(no-self-use)\n Task.nest_level += 1\n IndentedLogger.__enter__()", "def append_cursor_enter_callback(self):", "def __enter__(self):\r\n if not self._session:\r\n self.restart()\r\n return self", "def __enter__(self):\n self._source.__enter__()\n return self", "def ev_windowenter(self, event: tcod.event.WindowEvent) -> T | None:", "def state_choose_enter(cfg, app, win):", "def jump_enter(self, *args):\n return _ida_hexrays.vdui_t_jump_enter(self, *args)", "def switch(self, context):\n return", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def __init__(self):\n self._context = {}", "def activate(self):\n pass", "def __enter__(self):\n self.enode.get_shell('bash').send_command('scapy', matches=self.scapy_prompt)\n self.enode.get_shell('bash').send_command('import sys', matches=self.scapy_prompt)\n self.enode.get_shell('bash').send_command('sys.path.append(\".\")', matches=self.scapy_prompt)\n self.enode.get_shell('bash').send_command('sys.path.append(\"/tmp\")', matches=self.scapy_prompt)\n return self", "def setup_with_context_manager(testcase, cm):\n val = cm.__enter__()\n testcase.addCleanup(cm.__exit__, None, None, None)\n return val", "def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]:", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def context():\n return dict()", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}" ]
[ "0.73215747", "0.70834184", "0.6925852", "0.69022465", "0.6660981", "0.6605945", "0.6528679", "0.6526397", "0.65165555", "0.64868927", "0.64753264", "0.64307684", "0.6430514", "0.6430324", "0.6379566", "0.6323365", "0.6323365", "0.6295275", "0.6294278", "0.62788516", "0.6265315", "0.6260524", "0.62565124", "0.6235004", "0.62323713", "0.62323713", "0.62101996", "0.6189461", "0.6185391", "0.6173575", "0.6170922", "0.61592984", "0.6156547", "0.61311173", "0.606584", "0.6044158", "0.60359395", "0.60046315", "0.59925246", "0.5981953", "0.59708095", "0.5948152", "0.5937813", "0.59152746", "0.58925706", "0.58759844", "0.58759844", "0.58754444", "0.5861492", "0.58614445", "0.58467656", "0.5821491", "0.578883", "0.5781107", "0.57665145", "0.57565635", "0.57331896", "0.5704951", "0.57041657", "0.56883705", "0.5684728", "0.5674919", "0.5665325", "0.56592494", "0.56533056", "0.56512994", "0.56439084", "0.5635625", "0.5634735", "0.5634616", "0.5623662", "0.5621781", "0.56181836", "0.5596493", "0.559499", "0.5593959", "0.5586598", "0.558372", "0.55655694", "0.5561604", "0.5539698", "0.5534623", "0.5515141", "0.551493", "0.55141807", "0.55132115", "0.55125743", "0.5512535", "0.550282", "0.5500763", "0.55007446", "0.5499145", "0.5498966", "0.5498966", "0.5496309", "0.5472978", "0.5472978", "0.5470987", "0.5470987", "0.5470987" ]
0.59952956
38
Context manager exit function.
def __exit__(self, exc_type, exc_value, exc_traceback): # remove control handler from engine list (i.e. skip it) if len(self._qubits) > 0: drop_engine_after(self.engine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exit(context):\n return _nfc.exit(context)", "def __exit__(self, *args, **kwargs):\n\n pass", "def exit(self):\n pass", "def exit(self):\n self.current.exit()", "def __exit__(self, *args):\n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\n\n self.quit()", "def _exit(self, save_vars):\n raise NotImplementedError()", "def exit(self):\n logger.debug(\"EXIT\")", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, *args):\n self.stop()", "def __exit(self, *args):\n sys.exit(0)", "def exit(self):\n self.exit_flag = True", "def exit(self):\n super(Agent, self).exit()\n\n if self._module is not None:\n self._module.exit()", "def __exit__(self, *args):\n if self.teardown:\n super().__exit__(*args)", "def __exit__(self, type, value, traceback):\n context = self._contexts.pop()\n context.reset()", "def on_exit(self):\n pass", "def state_finish_exit(cfg, app, win):", "def exit(self):\n self.close()", "def __exit__(self):\n self._stop_all()", "def exit(self, _):\n try:\n self.execution_manager.close()\n except QMapError as e:\n print(e)\n raise urwid.ExitMainLoop()", "def exit(self):\n return self.__exit", "def exit(self) -> None:\n self.on_exit(None)", "def exit(self):\n print(\"\\n***************************** Exit Metafor *****************************\")", "def __exit__(self, type, value, tb):\t\t\n\t\tself.disconnectTeamserver()", "def exit():\n sys.exit(1)", "def close(self):\n self.exit()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def do_exit(self,*args):\r\n return True", "def __exit(self, exit_code=0):\r\n self._workspace_manager.stopAutoLoader()\r\n self._workspace_manager.stopReportManager()\r\n\r\n self._main_window.hide()\r\n print \"Closing Faraday...\"\r\n self._workspace_manager.saveWorkspaces()\r\n envs = [env for env in self._shell_envs.itervalues()]\r\n for env in envs:\r\n env.terminate() \r\n \r\n print \"stopping model controller thread...\"\r\n self._model_controller.stop()\r\n print \"stopping model controller thread...\"\r\n self.qapp.quit()\r\n print \"Waiting for controller threads to end...\"\r\n self._model_controller.join()\r\n \r\n return exit_code", "def main_thread_exit(self):\n ...", "def __exit__(self, type, value, traceback):\n\n self.close()", "def __del__(self):\n self.exit()", "def __exit__(self, exc_type, exc_value, traceback):\n if self._close_on_exit:\n self.close()", "def exit(self, *args):\n self.stop('all')\n sys.exit(1)", "def do_exit(self, arg):\n return self._leave(arg)", "def close():\n sys.exit()", "def do_exit(self, arg):\n self.db.close_db()\n print(\" \\\\o_ Bye-bye...\")\n print(\" / \")\n print(\"<\\\\\")\n sys.exit()", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n self.close()", "def exit(self):\n \t\troot.destroy()\n \t\tpass", "def __exit__(self, type_, value, traceback):\n self.close()", "def exit_client(self):\r\n\r\n sys.exit()", "def exit_handler():\n logger.debug(\"Application exit caught.\")\n save_state()", "def __exit__(self, type=None, value=None, traceback=None):\n self.stop()", "def __exit__(self, exc_type, exc_value, traceback):\n return self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n nvmlShutdown()", "def __exit__(self, exc_type, exc_val, exc_tb):\n pass", "def OnExit(self, event):\n \n print 'Cleaning up...'\n self.Destroy()", "def __exit__(self, exc_type, exc_val, exc_tb):\r\n pass", "def exitProgram():\n canvas.destroy()\n tool.destroy()\n code_editor.destroy()\n sys.exit()", "def __exit__(self, exc_type, exc_value, traceback):\n self._cleanup()", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exc_type, exc_value, traceback):\r\n pass", "def __exit__(self, exception_type, exception_value, traceback):\n QueuingContext._active_contexts.remove(self)", "def exitprogram():\n sys.exit()", "def exit_program():\n quit()", "def exit(self):\n self.client.logout(self.creds, self.environment)\n self.transport.close()", "def on_exit(self, userdata):\n pass", "def __exit__(self, exc_type, exc_value, traceback):\n pass", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exception_type, exception, traceback):\n self.close()", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def _exit() -> None:\n\n print(\n \"Thanks for using TbSET. \"\n \"See you next time!\\n\"\n )", "def state_processing_exit(cfg, app, win):", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.__del__()", "def __exit__(self, *excinfo):\n pass", "def __exit__(self, exc_type, exc_value, exc_traceback):\n\n self.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def __exit__(self, *args):\n self.close()\n return False", "def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n self.deinit()", "def do_exit(self, args):\n return -1", "def do_exit(self, _):\n return True", "def exit_loop(self):\n self.loop.exit()", "def __exit__(self, exec_type, exec_value, traceback):\n #TODO: probably should shut down the visualization server component gracefully here", "def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.stop()", "def __exit__(self, *ex_info):\n if self.device:\n self._device_ctx.__exit__(*ex_info)\n\n stdout('')\n stdout('Finished {0} in {1:0.1f}s'.format(self.name, self.timer_elapsed('script')))", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\r\n self.close()", "def __exit__(self, exception_type, exception_value, traceback):\n self._resource.__exit__(exception_type, exception_value, traceback)", "def click_Exit(self, event):\n exit()", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n pass", "def menuExit(self, event):\n \n self.onClose(event)\n return", "def __exit__(self, exception, value, trace):\n self.manual_exit()", "def exit_engine(self):\n self.stop_flag = True", "def on_exit(session):\n session.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n printy(\"Cleaning after myself...\")\n self.key.delete()\n if self.instance:\n self.instance.terminate()\n # wait for the machine to terminate\n self.wait_for_status(48)\n\n self.sec_grp.delete()\n os.remove(self.key.name + \".pem\")\n printy(\"Builder teardown complete\")", "def exit(self): \n self.teo_exchange_intent = self.teo_wallet\n self.withdraw_intent = self.euro_wallet\n\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)\n\n if self.teo_wallet + self.euro_wallet == 0:\n print('Agent exited: ', self.__class__.__name__)\n self.model.schedule.remove(self)", "def finalizeExit(self) -> None:\n base.graphicsEngine.removeAllWindows()\n if self.win is not None:\n print(\"Exiting KarelCraft app, bye!\")\n self.closeWindow(self.win)\n self.win = None\n self.destroy()\n sys.exit()", "def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):\n self.close()", "def exit(self):\n if self.debug:\n print(\"%s exit\" % self.name)\n self.stop()" ]
[ "0.79426813", "0.76409006", "0.7519487", "0.7436684", "0.7398033", "0.7334061", "0.7196403", "0.714664", "0.7124016", "0.7124016", "0.7095566", "0.70946825", "0.70733523", "0.70722145", "0.70669067", "0.7063392", "0.70594615", "0.70451325", "0.69709545", "0.69591916", "0.6957424", "0.6931819", "0.6931609", "0.6926434", "0.6925933", "0.6923723", "0.69231623", "0.6922556", "0.6922556", "0.6922556", "0.6922556", "0.6922556", "0.69171256", "0.69019854", "0.6898544", "0.68940866", "0.68917596", "0.68914896", "0.68746984", "0.68738765", "0.6865487", "0.6861937", "0.6844871", "0.6844871", "0.6844794", "0.6832307", "0.6832307", "0.6832307", "0.6832307", "0.68273646", "0.6823884", "0.682059", "0.68128973", "0.67932355", "0.67893624", "0.6788037", "0.6787443", "0.6783447", "0.67786133", "0.6777659", "0.6775439", "0.6773684", "0.6773684", "0.6773684", "0.67710954", "0.6771038", "0.67655134", "0.67547196", "0.6753467", "0.67514", "0.6748661", "0.6748661", "0.67432904", "0.6742059", "0.6731818", "0.6726409", "0.672617", "0.67196995", "0.67148006", "0.66930354", "0.6688179", "0.6679618", "0.6675092", "0.6671087", "0.66699135", "0.6658429", "0.66542655", "0.6641518", "0.66391414", "0.66362417", "0.66359395", "0.6629156", "0.66271365", "0.66268414", "0.6610231", "0.66072214", "0.6587899", "0.6586996", "0.6568896", "0.6568142", "0.6563627" ]
0.0
-1
Return the number of control qubits of the command object cmd.
def get_control_count(cmd): return len(cmd.control_qubits)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_commands(self):\n return len(self.commands)", "def count(self):\n return len(self._commands)", "def __len__(self):\n return len(self.commands)", "def length(self):\n return len(self._commands)", "def get_count_of_controls(self, recurse: bool) -> int:\n return len(list(self.get_all_controls(recurse)))", "def ctrlqueue_num_actions(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(2), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def num_controls(self):\n return len(self._controls)", "async def count(self, ctx):\r\n if ctx.invoked_subcommand is None:\r\n await self.bot.send_cmd_help(ctx)", "def num_qubits(self) -> int:\n return self._circuit.num_qubits", "def count_qubits(operator):\n # Handle FermionOperator.\n if isinstance(operator, FermionOperator):\n num_qubits = 0\n for term in operator.terms:\n for ladder_operator in term:\n if ladder_operator[0] + 1 > num_qubits:\n num_qubits = ladder_operator[0] + 1\n return num_qubits\n\n # Handle QubitOperator.\n elif isinstance(operator, QubitOperator):\n num_qubits = 0\n for term in operator.terms:\n if term:\n if term[-1][0] + 1 > num_qubits:\n num_qubits = term[-1][0] + 1\n return num_qubits\n\n # Handle MajoranaOperator.\n if isinstance(operator, MajoranaOperator):\n num_qubits = 0\n for term in operator.terms:\n for majorana_index in term:\n if numpy.ceil((majorana_index + 1) / 2) > num_qubits:\n num_qubits = int(numpy.ceil((majorana_index + 1) / 2))\n return num_qubits\n\n # Handle DiagonalCoulombHamiltonian\n elif isinstance(operator, DiagonalCoulombHamiltonian):\n return operator.one_body.shape[0]\n\n # Handle PolynomialTensor\n elif isinstance(operator, PolynomialTensor):\n return operator.n_qubits\n\n # Raise for other classes.\n else:\n raise TypeError('Operator of invalid type.')", "def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2", "def subsystem_count(self):\n return len(self)", "def __len__(self):\n return len(self._opts) + len(self._groups)", "def command_ssize(self):\n self.print_out(\"Stack size: %s\" % (str(len(self.vm.stack))))", "def stats(self):\n nqbits = self.operator.num_qubits", "def cmd_num(self):\r\n return self._arm.cmd_num", "def get_num_of_choices(self) -> int:\n return len(self._choices)", "def num_qubits(self) -> int:\n return super().num_qubits", "def num_qubits(self) -> int:\n raise NotImplementedError()", "def count_qubits(operator):\n # Handle FermionOperator.\n valueable_type = (FermionOperator, QubitOperator, QubitExcitationOperator,\n ofops.FermionOperator, ofops.QubitOperator,\n pjops.QubitOperator)\n if isinstance(operator, valueable_type):\n num_qubits = 0\n for term in operator.terms:\n # a tuple compose of single (qubit_index,operator) subterms\n if term == ():\n qubit_index = (0,)\n else:\n qubit_index, _ = zip(*term)\n num_qubits = max(max(qubit_index) + 1,\n num_qubits) # index start with 0\n return num_qubits\n\n if isinstance(operator, PolynomialTensor):\n return operator.n_qubits\n\n raise TypeError(\"Unsupported type of operator {}\".format(operator))", "def count(self):\n self._read_keypad()\n return len(self._current_events)", "def cmd_size(args):", "def qsize(self) -> int:\n pass", "def ctrlqueue_show(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(7), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to get *Num Actions of CrlQueue* check if *Queue* is NOT empty\", e)", "def qsize(self): \n return self.__db.llen(self.key)", "def ctrlqueue_queue_size(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(10), ctypes.c_int32(0))", "def timerCount(cmds):\n return int(sum(np.asarray(cmds) == 0x400001)) # numpy version\n #return cmds.count(0x400001) # python list version", "def __len__(self):\n\t\treturn len(self._idle) + len(self._running)", "def get_num_slots(self):\n # Your code here\n return len(self.data)", "def __len__(self):\n\t\treturn self.qsize()", "def CountButtons(self):\r\n\r\n n = 0\r\n \r\n if self.HasCaption() or self.HasCaptionLeft():\r\n if isinstance(wx.GetTopLevelParent(self.window), AuiFloatingFrame):\r\n return 1\r\n \r\n if self.HasCloseButton():\r\n n += 1\r\n if self.HasMaximizeButton():\r\n n += 1\r\n if self.HasMinimizeButton():\r\n n += 1\r\n if self.HasPinButton():\r\n n += 1\r\n\r\n return n", "def qsize(self) -> int:\n return self._queue.qsize()", "def num_running(self):\n return sum(cmd.is_running for id, cmd in self.commands)", "def qsize(self):\r\n return len(self._queue)", "def getListBoxItemCount( self, cCtrlName ):\n oControl = self.getControl( cCtrlName )\n return oControl.getItemCount()", "def get_Q_size(self):\n return len(self.qTable)", "def __len__(self):\n return len(self.qc_mol.atoms) + len(self.br_mol.atoms) + len(self.pc_mol.atoms)", "def len(self) -> int:\n\n return int(self.q)", "def count(self):\n return self.connection.llen(self.key)", "def get_num_items(self):\r\n return self.num_items", "def _num_qubits(self) -> int:\n try:\n return self.__experiment_metadata[\"num_qubits\"]\n except (TypeError, KeyError):\n # Ignore experiment metadata is not set or key is not found\n return None", "def count(self):\n return self.connection._llen(self.key)", "def count(self):\n return self.size()", "def qsize(self) -> int:\n return len(self._queue)", "def count(self):\r\n\r\n return len(self.widgets_list)", "def get_number_of_actions(self):\n return self.__environment.action_space.n", "def size(self):\n\t\treturn len(self.lables)", "def do_count(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n objs = [key for key in map(lambda x: x.split(\".\")[0],\n storage.all().keys())]\n print(objs.count(arg_list[0]))", "def __len__(self):\n return len(self._subSlots)", "def count(self, conn, key):\n return conn.llen(key)", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def size(self):\n return len(self.selected)", "def XPCountChildWidgets(inWidget):\n pass", "def flushcmdcnt(self) :\n\t\ttry :\n\t\t\treturn self._flushcmdcnt\n\t\texcept Exception as e:\n\t\t\traise e", "def dimension(self):\n return len(self.qubit_values)", "def count(self):\n return len([i for i in self.iteritems()])", "def __len__(self):\n\n try:\n return len(self.counts)\n except SpectrumError:\n return len(self.cps)", "def count(self):\n with self.pdq:\n (count,)=self.pdq.cursor().execute('select count(*) from pdq').next()\n return count", "def qsize(self):\n return self.q_size.current_value", "def count(self):\n return len(self)", "def countCmdLineFlags(options, flag):\n counter = 0\n # make sure only flag was supplied\n for key, value in options.__dict__.items():\n if key == flag:\n next\n # If anything but flag was called, increment\n elif value:\n counter += 1\n\n return counter", "def __len__(self):\n return len(self.trained_rqrmi)", "def count(self):\n return len(self.deck)", "def count(self):\n return self.ming_cursor.count()", "def size(self):\n\t\treturn self._count", "def _add_cmd(self, cmd):\n if cmd.gate == Allocate:\n self._active_qubits += 1\n elif cmd.gate == Deallocate:\n self._active_qubits -= 1\n elif cmd.gate == Measure:\n for qureg in cmd.qubits:\n for qubit in qureg:\n self.main_engine.set_measurement_result(qubit, 0)\n elif self._is_rotation(cmd):\n self._num_rotations += 1\n self._rotations.append(self._decompose_rotation(cmd))", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def nmodes(self):\n if self.mode_selection is not None:\n return len(self.mode_selection)\n else:\n return len(self.mol.normal_modes.modes.freqs)", "def getNumberOfKeys(self) -> int:\n ...", "def num_ops(self):\n num_ops = len(self.desc.normal.genotype[0][0])\n return num_ops", "def size(self):\n return self._N", "def __len__(self):\n return len(self._drawables | self._updateables | self._collidables | self._projectiles)", "def count(self):\n return len(self.read_ints())", "def get_num_applies(self):\n ops = 0\n for _, remainder, _ in self:\n ops += len(remainder)\n return ops", "def has_more_commands(self):\n return self.counter < len(self.lines)", "def has_more_commands(self):\n return self.counter < len(self.lines)", "def size(self):\n return len(self.id2term)", "def __len__(self):\n return self.count", "def __len__(self) -> int:\n return len(self.actions)", "def count(self):\n return self.vcount", "def accelerator_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accelerator_count\")", "def test_get_help(self):\n subcommands = list(self.testcommand.subparser.choices.keys())\n help_message = self.testcommand.get_help()\n self.assertEqual(len(subcommands), help_message.count(\"usage\"))", "def get_num_actions():\n return 6", "def len(self):\n return self.n", "def __len__(self):\n return self._n", "def __len__(self):\n return len(self.options)", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def get_cmdnum(self):\r\n return self._arm.get_cmdnum()", "def bits(self):\n return self._q.bit_length()", "def count(self):\n return(len(self.cards))", "def count_interactive(*args):\n #print args \n return sum(filter(None, args))", "def __len__ ( self ):\n # also used in boolean context\n return len ( self.scripts )", "def test_get_help(self):\r\n subcommands = list(self.testcommand.subparser.choices.keys())\r\n help_message = self.testcommand.get_help()\r\n self.assertEqual(len(subcommands), help_message.count(\"usage\"))", "def items_num(self):\n\t\treturn len(self.items)", "def items_num(self):\n\t\treturn len(self.items)", "def nquads(self):\n return len(self.tree)", "def qsize(self):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n try:\n size = self.__db.llen(self._key)\n except redis.ConnectionError as e:\n raise redis.ConnectionError(repr(e))\n return size", "def num_operators(self) -> int:\n pass", "def num_keys(self):\n return len(self.counter.keys())", "def get_num_conformers(inchi_key: str) -> int:\n params_file = upsearch(WORKFLOW_PARAMS_FILENAME)\n\n unopt_pdbs = params_file.parent / \"unopt_pdbs\" / \"{}*.pdb\".format(inchi_key)\n\n num_conformers = len(glob(str(unopt_pdbs)))\n\n return num_conformers" ]
[ "0.71016645", "0.7081369", "0.659699", "0.63224685", "0.6274492", "0.6267259", "0.6065778", "0.5953152", "0.59503293", "0.59498817", "0.5925458", "0.58802515", "0.5851433", "0.58106935", "0.5802685", "0.5787972", "0.57744926", "0.57360053", "0.5724881", "0.5709863", "0.56957686", "0.56817234", "0.5676002", "0.5670858", "0.5658443", "0.56537336", "0.5653579", "0.56528264", "0.5639464", "0.56366473", "0.55883855", "0.55877763", "0.5586368", "0.5560849", "0.55389243", "0.55257344", "0.55127645", "0.5512633", "0.5495534", "0.5492577", "0.5481483", "0.5472607", "0.54707986", "0.5465449", "0.5439353", "0.5433208", "0.5430535", "0.54237384", "0.54206496", "0.541726", "0.54171056", "0.5415332", "0.54135185", "0.54097074", "0.54031634", "0.539868", "0.53927296", "0.5391072", "0.5381602", "0.53710186", "0.5364952", "0.5363575", "0.53615725", "0.5346803", "0.53409606", "0.5335978", "0.5329272", "0.5327882", "0.53272516", "0.53150946", "0.5313021", "0.5307848", "0.5303915", "0.5303666", "0.5301323", "0.5301323", "0.5298519", "0.5287097", "0.52865386", "0.5286493", "0.52837026", "0.5282331", "0.5281605", "0.528133", "0.5279773", "0.52742285", "0.52737814", "0.52725255", "0.52721184", "0.5268588", "0.5266693", "0.5264978", "0.52649087", "0.5264788", "0.5264788", "0.52616566", "0.52607477", "0.5259943", "0.5257889", "0.5255304" ]
0.91298246
0
Return whether a command has negatively controlled qubits.
def has_negative_control(cmd): return get_control_count(cmd) > 0 and '0' in cmd.control_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2", "def is_use_qps(self) -> bool:\n if self.qps > 0 and self.second > 0:\n return True\n else:\n return False", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def is_qword(self):\n return ida_bytes.is_qword(self.flags)", "def has_commands(self) -> bool:\n return len(self.commands) > 0", "def is_non_exclusive(self, variable):\n non_exclusive = False\n for sub_effect in self._sub_effects:\n if sub_effect.get_variable() == variable:\n if not sub_effect.is_exclusive():\n non_exclusive = True\n elif len(sub_effect.get_value()) > 0 and not sub_effect.is_negated():\n return False\n return non_exclusive", "def __bool__(self):\n return not(self.outcome != 0 or self.filled)", "def qtilde(self) -> bool:\n return self._qtilde", "def __bool__(self):\n return any(self.smask)", "def noqueue(self) -> bool:\n return not self.orders", "def is_unset(self) -> bool:\n return (None in [self.amount, self.action, self.per]) is True", "def __bool__(self):\n return not self.undefine", "def is_queued(self):\r\n return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)", "def is_terminal(self):\n return self.beta.isEmpty()", "def is_market(self):\n return(not self.is_pending)", "def still_has_questions(self):\n return self.question_number < len(self.question_list) #returns boolean value", "def is_empty(self) -> bool:\n return self.command is None and not self.required", "def canAnswer(self, layer):\n return wt.hangman in layer[wt.keywords]", "def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False", "def skill_has_nonpct_condition(es):\n if not skill_has_condition(es):\n return False\n # Is checking the threshold here right? Maybe it should just be checking one_time.\n # Or maybe it's redundant.\n return es.condition.hp_threshold or es.condition.one_time", "def no_flags_set(self):\n # TODO: unit test me\n return not any(\n (\n self.flag_bookmarked,\n self.flag_candidate,\n self.flag_final_causative,\n self.flag_for_validation,\n self.flag_molecular != \"empty\",\n self.flag_visual != \"empty\",\n self.flag_validation != \"empty\",\n self.flag_phenotype_match != \"empty\",\n self.flag_summary != \"empty\",\n )\n )", "def is_neg_unate(self, vs=None):\n vs = self._expect_vars(vs)\n basis = self.support - set(vs)\n maxcov = [PC_ONE] * (1 << len(basis))\n # Test whether table entries are monotonically decreasing\n for cf in self.iter_cofactors(vs):\n for i, item in enumerate(cf.pcdata):\n if maxcov[i] == PC_ZERO and item == PC_ONE:\n return False\n maxcov[i] = item\n return True", "def still_in_hand(self):\n return len(self.hand.cards)!=0", "def is_queued(self):\n qstat = self._grep_qstat('queued')\n if qstat:\n return True\n return False", "def hasCustomEffects(self):\n return not self.getHandle().effects.isEmpty()", "def is_pending(self):\n return self.is_disarming() or self.is_arming()", "def __bool__(self):\n\t\treturn any(c != 0 for c in self)", "def is_Q(self):\n return isinstance(self,Q)", "def still_has_questions(self):\n return self.question_number < len(self.question_list)", "def is_no_command_supported(command):\n command_type = command.get('command-type')\n if command_type:\n if command_type in ['display-table','display-rest', 'show']:\n return False\n no_supported = command.get('no-supported', True)\n if no_supported == False:\n return False\n return True", "def is_not_power_onoff(self):\n return not self.is_power_onoff()", "def any(self) -> bool:\n return len(self.queue) > 0", "def assumed_state(self):\n return self._command_state is False", "def should_poll(self):\n return self._command_state is not None", "def empty(self) -> bool:\n return not bool(self.q)", "def isEmpty(self):\n return all(isinstance(command, Macro) and command.isEmpty() for command in self.commands)", "def is_out_of_stock(self) -> bool:\n return self.on_hand == 0", "def not_pushed_down(self):\n return (self.genus >= 2 and self.n != 0) or (self.genus == 1 and self.n > 1) or (self.genus == 0 and self.n > 3)", "def has_undo(self):\n return self._current_undo_command != -1", "def is_quantitative(self):\n return self._type == 'quantitative'", "def isCtrlHeld():\n return False if pm.about(batch=True) else (pm.getModifiers() & 4) > 0", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def has_quanta(self, quanta_subset):\n\n return all(x in self.q.items()\n for x in quanta_subset.items())", "def isShiftHeld():\n return False if pm.about(batch=True) else (pm.getModifiers() & 1) > 0", "def is_cmd(self, name):\n \n return name in self.cmds", "def is_not_bias(self):\n return not self.is_bias()", "def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False", "def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed", "def empty(self):\n return not self.mystack1 and not self.mystack2", "def is_negated(x) -> bool:\n return not (x & 1 == 0)", "def isUnset(self):\n return self.sides[0].isUnset() and self.sides[1].isUnset()", "def __bool__(self):\n\n return not self.is_empty()", "def with_necks(self):\n return hasattr(self, 'necks')", "def is_exclusive(self):\n return self.exclusive", "def has(self, instruction: Union[str, Instruction], qubits: Union[int, Iterable[int]]) -> bool:\n instruction = _get_instruction_string(instruction)\n return instruction in self._map and _to_tuple(qubits) in self._map[instruction]", "def validate_command(command):\n return command in list(VALID_COMMANDS.keys())", "def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)", "def is_non_reducing(self):\n return bool(set(self.kind) & set(\"ABC\"))", "def is_telescope_off_allowed(self):\n handler = self.get_command_object(\"TelescopeOff\")\n return handler.check_allowed()", "def noyable(self):\n return False", "def have_circ_pump(self):\n return bool(self.circ_pump)", "def is_terminal(self, state):\n return len(self.get_possible_actions(state)) == 0", "def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd", "def is_call_not_answered(self) -> bool:", "def is_inequality(self):\n return False", "def isAltHeld():\n return False if pm.about(batch=True) else (pm.getModifiers() & 8) > 0", "def has_redo(self):\n return self._current_undo_command + 1 != len(self._commands)", "def contains(self, i):\n return self.__qp[i] != -1", "def is_summon(self):\n return False", "def is_violated(self,\n env\n ):\n c_value = self.get_value(env)\n flag = np.any(np.greater(c_value, 0.))\n return bool(flag)", "def is_not_final(cls, state):\n return state in cls._not_final_states", "def isValidForSimulation(self):\n for position, myQuad in self.myDesign.quads.iteritems():\n if myQuad.components != {}:\n return 1\n return 0", "def is_atom_quasiconvex(self) -> bool:\n return True", "def is_empty(self):\n return len(self.commands) == 0", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None", "def canRedo(self):\n return self._index < len(self._commands)", "def terminal_test(gameState):\n return len(gameState.get_legal_moves()) == 0", "def has_queued_passes(self, classification):\n return len(self.pass_queues[classification]) > 0", "def attribute(self):\n\n return not bool(self.arguments)", "def discrete_actions(self) -> bool:\r\n return self.actions.discrete", "def isCurrentBarEmpty(self):\r\n return self.bar[self.getCurrentPlayer()].isEmpty()", "def __bool__(self):\n\t\treturn not self.empty()", "def isZero(self):\n\t\treturn (self.p.isZero() & (self.q.isZero() == False))", "def isinvertible(self):\n if np.all(np.abs(self.maroots) > 1):\n return True\n else:\n return False", "def is_not_tilted(self, channel=None):\n return not self.get_state(channel)", "def are_progress_bars_disabled() -> bool:\n global _hf_hub_progress_bars_disabled\n return _hf_hub_progress_bars_disabled", "def can_undo(self) -> bool:\n\n return self.position > 0", "def is_atom_quasiconcave(self):\n return False", "def empty(self) -> 'bool':\n # print(self.que_one)\n # print(self.que_two)\n if not self.que_one and not self.que_two:\n return True\n return False", "def _has_compute_uncompute_tag(cmd):\n for tag in cmd.tags:\n if tag in [UncomputeTag(), ComputeTag()]:\n return True\n return False", "def isPossible(self):\n \n return bool(len(self._possibilities))", "def isSetHasOnlySubstanceUnits(self):\n return _libsbml.Species_isSetHasOnlySubstanceUnits(self)", "def is_logic(self):\n return self.value in ('and_logic', 'or_logic')", "def has_command_with_name(self, command_name):\n return command_name in self.commands", "def is_preterminal(self):\n return len(self) == 0", "def is_any_flag_playable(self, direction):\n return any(flag.is_playable(direction) for flag in self.board.flags)", "def is_atom_quasiconvex(self):\n return True", "def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS", "def _is_pop_command(self):\n return self._match_memory_pattern(\"pop\")", "def _isPlayerBolt(self):\n for y in self._bolts:\n if y.getVelocity() > 0:\n return True\n return False" ]
[ "0.6443017", "0.62384444", "0.6159438", "0.59778637", "0.59440106", "0.59436524", "0.5875699", "0.5865012", "0.5853049", "0.5851665", "0.58293706", "0.582807", "0.5757742", "0.5732011", "0.5711578", "0.56456465", "0.56133777", "0.5585858", "0.55658156", "0.55565083", "0.5542356", "0.5531333", "0.55238205", "0.55220777", "0.5516755", "0.5510306", "0.54946977", "0.5479426", "0.54598325", "0.5458763", "0.54504424", "0.5446217", "0.5440253", "0.5432273", "0.5431498", "0.5430168", "0.54195124", "0.54179204", "0.5413701", "0.5405462", "0.5395587", "0.53932786", "0.5390466", "0.53890944", "0.5378482", "0.53754693", "0.53647745", "0.5361005", "0.5359648", "0.5346086", "0.5339108", "0.5336755", "0.5335233", "0.5332985", "0.5332067", "0.5320986", "0.5318783", "0.53143585", "0.5308213", "0.5307532", "0.5307483", "0.53023624", "0.530031", "0.5299198", "0.5292272", "0.5289114", "0.5286629", "0.5284302", "0.52791804", "0.52781785", "0.5277386", "0.52734673", "0.52698714", "0.5268939", "0.5267955", "0.52670735", "0.52641076", "0.5263604", "0.52624965", "0.5261318", "0.5257359", "0.5254165", "0.52519333", "0.5244276", "0.5234583", "0.5234032", "0.5231954", "0.5226043", "0.52238834", "0.52231926", "0.52227587", "0.5216376", "0.5215806", "0.52115566", "0.5208643", "0.5199878", "0.519976", "0.5198315", "0.51969385", "0.5192928" ]
0.69478786
0
Recursively yield DirEntry objects for given directory.
def scantree(path): for entry in os.scandir(path): if entry.is_dir(follow_symlinks=False): yield from scantree(entry.path) else: yield entry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _recursive_scan(directory=None, file_extension='.dvl'):\n directory = directory or app.config['DEVICE_LOG_DRIVE']\n\n for entry in os.scandir(directory):\n if entry.is_dir(follow_symlinks=False):\n yield from _recursive_scan(entry)\n elif os.path.splitext(entry.name)[1] == file_extension:\n yield entry", "def recursive_iterdir(directory, include_hidden=False):\n dir_path = pathlib.Path(directory)\n content = dir_path.iterdir()\n if not include_hidden:\n content = (item for item in content if not is_hidden(item))\n for item in content:\n if item.is_dir():\n yield from recursive_iterdir(item)\n yield item", "def DirEnumerator(args, path):\n for d in os.scandir(path):\n try:\n if d.name == '.' or d.name == '..':\n pass\n elif d.is_symlink() and args.skiplinks:\n pass\n elif d.is_file():\n yield d.path\n elif d.is_dir() and args.recurse:\n for f in DirEnumerator(args, d.path):\n yield f\n except Exception as e:\n print(\"EXCEPTION %s accessing %s/%s\" % (e, path, d.name))", "def _collect_entries(rootdir: str, basedir: str):\n\n files = []\n dirs = []\n\n for entry in os.listdir(os.path.join(rootdir, basedir)):\n rel_path = os.path.join(basedir, entry)\n full_path = os.path.join(rootdir, rel_path)\n isdir = os.path.isdir(full_path)\n if isdir and (rel_path in ('./.git', './.pytest_cache') or entry == '__pycache__'):\n continue\n\n st = os.stat(full_path, follow_symlinks=False)\n\n (dirs if isdir else files).append((rel_path, dict(isdir=isdir, path=rel_path, size=(0 if isdir else st.st_size),\n mode=st.st_mode, omode=f'{st.st_mode:04o}',\n mtime=int(st.st_mtime))))\n\n for rel_path, entry in sorted(dirs):\n yield entry\n yield from _collect_entries(rootdir, rel_path)\n\n for _, entry in sorted(files):\n yield entry", "def walk_dir(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n self.ppath_prefix_idx += 1\n merged_path = os.path.join(prefix, dir)\n for root, dirs, files in self.fswalk_base(merged_path):\n yield merged_path, dirs, files\n else:\n yield self.fswalk_base(dir)", "def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)", "def scandir(path='.'):\r\n for name in os.listdir(path):\r\n yield GenericDirEntry(path, name)", "def walk(self): # DirObj.walk\n for name, subdir in self.subdirs.iteritems():\n for e in subdir.walk():\n yield e\n for name, fileEntry in self.files.iteritems():\n yield fileEntry\n yield self", "def _iter_tree_next(root_full, dir_rel, memo):\n\tdir_full = os.path.join(root_full, dir_rel)\n\tdir_real = os.path.realpath(dir_full)\n\n\t# Remember each encountered ancestor directory and its canonical\n\t# (real) path. If a canonical path is encountered more than once,\n\t# recursion has occurred.\n\tif dir_real not in memo:\n\t\tmemo[dir_real] = dir_rel\n\telse:\n\t\traise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)\n\n\tfor node in os.listdir(dir_full):\n\t\tnode_rel = os.path.join(dir_rel, node)\n\t\tnode_full = os.path.join(root_full, node_rel)\n\t\tnode_stat = os.stat(node_full)\n\n\t\tif stat.S_ISDIR(node_stat.st_mode):\n\t\t\t# Child node is a directory, recurse into it and yield its\n\t\t\t# decendant files.\n\t\t\tfor file_rel in _iter_tree_next(root_full, node_rel, memo):\n\t\t\t\tyield file_rel\n\n\t\telif stat.S_ISREG(node_stat.st_mode):\n\t\t\t# Child node is a file, yield it.\n\t\t\tyield node_rel\n\n\t# NOTE: Make sure to remove the canonical (real) path of the directory\n\t# from the ancestors memo once we are done with it. This allows the\n\t# same directory to appear multiple times. If this is not done, the\n\t# second occurance of the directory will be incorrectly interpreted as\n\t# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.\n\tdel memo[dir_real]", "def files_and_folders(self, depth):\n for directory in self.rarc._directories[self.directory_index:][:self.directory_count]:\n yield depth, directory\n if isinstance(directory, Folder):\n if directory.data_offset < len(self.rarc._nodes):\n node = self.rarc._nodes[directory.data_offset]\n if directory.name == \".\" or directory.name == \"..\":\n continue\n yield from node.files_and_folders(depth + 1)", "def scantree(path):\n # type: (str) -> os.DirEntry\n for entry in scandir(path):\n if entry.is_dir(follow_symlinks=True):\n # due to python2 compat, cannot use yield from here\n for t in scantree(entry.path):\n yield t\n else:\n yield entry", "def get_all_files(directory):\r\n for dirpath, _dirnames, filenames in os.walk(directory):\r\n for filename in filenames:\r\n yield (filename, dirpath)", "def getImmediateSubdirectories(dir):", "def walk(d):\n for parent, key, leaf in _walk({}, None, d):\n yield (d, parent, key, leaf)", "def get_object(directory):\r\n path = join(self.base_path, directory)\r\n for f in listdir(path):\r\n if isfile(join(path, f)):\r\n yield \"file\", f\r\n else:\r\n yield \"subdir\", join(directory, f)", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def dir_entries(dir_name, extension=None, subdir=True):\n if extension is None:\n extension = \"*\"\n if extension.startswith(\".\") is False and extension != \"*\":\n extension = \".\" + extension\n\n file_list = []\n for dfile in os.listdir(dir_name):\n dirfile = os.path.join(dir_name, dfile)\n if os.path.isfile(dirfile) is True:\n if extension == \"*\":\n file_list.append(dirfile)\n else:\n fname, fext = os.path.splitext(dirfile)\n if fext.lower() == extension.lower():\n file_list.append(dirfile)\n # recursively access file names in subdirectories\n elif os.path.isdir(dirfile) is True and subdir is True:\n file_list.extend(\n sppasDirUtils.dir_entries(dirfile, extension, subdir))\n\n return file_list", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n for path_spec in self._directory.entries:\n yield NTFSFileEntry(\n self._resolver_context, self._file_system, path_spec)", "def iter_tree(root):\n\tfor file_rel in _iter_tree_next(os.path.abspath(root), '', {}):\n\t\tyield file_rel", "def __dir_arg_parse(directory: Directory, directory_path: str) -> Entry:\n dir_split = directory_path.split(\"/\")\n for target in dir_split:\n if target == \"..\":\n if directory.get_parent():\n directory = directory.get_parent()\n elif directory.get_name() != target and target != \".\":\n directory = directory.get_entry(target)\n return directory", "def directories(self):\n for t in self.dirsIter():\n yield t\n for child in self.children:\n for t in child.directories():\n yield t", "def dirwalk(self, topdown=False): # DirObj.dirwalk\n if topdown:\n yield self\n\n for name, d in self.subdirs.iteritems():\n for dirEntry in d.dirwalk():\n yield dirEntry\n\n if not topdown:\n yield self", "def _walk_dir_meta(self):\n for key, child in sorted(self._children.items()):\n if isinstance(child, PackageEntry):\n continue\n meta = child.meta\n if meta:\n yield key + '/', meta\n for child_key, child_meta in child._walk_dir_meta():\n yield key + '/' + child_key, child_meta", "def list_all_files(in_dir):\n\n for dirname, dirs, files in os.walk(in_dir):\n for filename in files:\n yield op.join(dirname, filename)", "def scandir(path='.'):\r\n dir_p = opendir(path.encode(file_system_encoding))\r\n if not dir_p:\r\n raise posix_error(path)\r\n try:\r\n result = Dirent_p()\r\n while True:\r\n entry = Dirent()\r\n if readdir_r(dir_p, entry, result):\r\n raise posix_error(path)\r\n if not result:\r\n break\r\n name = entry.d_name.decode(file_system_encoding)\r\n if name not in ('.', '..'):\r\n yield PosixDirEntry(path, name, entry.d_type)\r\n finally:\r\n if closedir(dir_p):\r\n raise posix_error(path)", "def _get_dir_entries(dir_name, subdir, *args):\n file_list = []\n for file in os.listdir(dir_name):\n dirfile = os.path.join(dir_name, file)\n if os.path.isfile(dirfile):\n if not args:\n file_list.append(dirfile)\n else:\n if os.path.splitext(dirfile)[1][1:] in args:\n file_list.append(dirfile)\n # recursively access file names in subdirectories\n elif os.path.isdir(dirfile) and subdir:\n file_list.extend(_get_dir_entries(dirfile, subdir, *args))\n return file_list", "def _iter_files_in_dir(directory):\n for filename in os.listdir(directory):\n filepath = os.path.join(directory, filename)\n if os.path.isfile(filepath):\n yield filepath", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def directory_walker(start_dir):\n\n for root, dirs, files in os.walk(os.path.expanduser(start_dir)):\n for f in files:\n filename = os.path.join(root, f)\n # Only process if its a type of image\n file_type = mimetypes.guess_type(filename.lower())[0]\n if file_type is not None and file_type.startswith('image/'):\n yield filename", "def parse_dir(self, directory):\n for dir in os.listdir(directory):\n if dir in ['.git', '.github', '.vscode', 'docs']:\n continue\n next_dir = os.path.join(directory, dir)\n if os.path.isdir(next_dir):\n if dir.startswith('template_'):\n self.parse_template(next_dir)\n else:\n normpath = os.path.relpath(next_dir)\n normpath = os.path.normpath(normpath)\n path = normpath.split(os.sep)\n self.add_folder(path)\n # add_directory(next_dir)\n self.parse_dir(next_dir)", "def _subdirectories(self):\n for o in os.listdir(self.directory):\n if os.path.isdir(os.path.join(self.directory, o)):\n yield os.path.join(self.directory, o)", "def walkdir(self, folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))", "def path_it(d):\n for p in _path_walk([], d):\n yield p", "def scan_dir(self, dir):\n import pathlib\n import magic\n\n for filename in find_all_files(dir):\n self.filelist.append({\n \"filename\": filename,\n \"mime\": magic.from_file(filename, mime=True),\n \"size_bytes\": os.path.getsize(filename),\n \"ext\": pathlib.Path(filename).suffix\n })", "def get_directories_recursive(self, path) :\n\n if path.is_dir() :\n yield path\n for child in path.iterdir():\n yield from self.get_directories_recursive(child)\n elif path.is_file() :\n yield path", "def get_directory(self, directory: str) -> List[Dict]:\n raise NotImplementedError", "def excursion(directory):\n old_dir = os.getcwd()\n try:\n os.chdir(directory)\n yield\n finally:\n os.chdir(old_dir)", "def in_dir(directory):\n current_dir = os.getcwd()\n os.chdir(directory)\n\n # Add code that lets you handle errors\n try:\n yield\n # Ensure the directory is reset,\n # whether there was an error or not\n finally:\n os.chdir(current_dir)", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n for path_spec in self._directory.entries:\n yield HFSFileEntry(self._resolver_context, self._file_system, path_spec)", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n for path_spec in self._directory.entries:\n yield APMFileEntry(self._resolver_context, self._file_system, path_spec)", "def _get_all_files(dir_path):\n for root, _, filenames in os.walk(dir_path):\n for name in filenames:\n target = os.path.join(root, name)\n yield target", "def scan_dir(self, directory=\".\"):\n for root, dirs, files in os.walk(directory, topdown=False):\n for name in files:\n for filetype in self.allowed_file_types:\n if name.split(\".\")[-1] == filetype:\n self.song_list.append(os.path.join(root, name))", "def get_entries(self):\n for irde in self.Entries:\n if irde != None:\n if irde.Name & 0x80000000:\n # Points to a Name object\n name = obj.Object(\"_IMAGE_RESOURCE_DIR_STRING_U\", (irde.Name & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n else:\n name = int(irde.Name)\n if irde.DataOffset & 0x80000000:\n # We're another DIRECTORY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", (irde.DataOffset & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n retobj.sectoffset = self.sectoffset\n else:\n # We're a DATA_ENTRY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DATA_ENTRY\", irde.DataOffset + self.sectoffset, vm = self.obj_vm, parent = irde)\n yield (name, bool(irde.DataOffset & 0x80000000), retobj)", "def dirwalk(a_dir, a_wildcards= '*'):\n\n #iterate over files in the current dir\n for the_file in fnmatch.filter(sorted(os.listdir(a_dir)), a_wildcards):\n fullpath = os.path.join(a_dir, the_file)\n if not os.path.isdir(fullpath):\n yield fullpath\n \n sub_dirs = os.walk(a_dir).next()[1]\n #iterate over sub_dirs\n for sub_dir in sub_dirs:\n fullpath = os.path.join(a_dir, sub_dir)\n for p_elem in dirwalk(fullpath, a_wildcards):\n yield p_elem", "def find_files(d):\n for root, dirs, files in os.walk(d):\n for f in files:\n yield path.abspath(path.join(root, f))", "def dir_context(directory):\n curdir=os.getcwd()\n os.chdir(directory)\n try:\n yield directory\n finally:\n os.chdir(curdir)", "def iter_dir(tree, path):\n for f in os.listdir(path):\n if os.path.isfile(path + '/' + f + '/__init__.py'):\n tree[f] = None\n elif os.path.isdir(path + '/' + f):\n tree[f] = {}\n SnakeWM.iter_dir(tree[f], path + '/' + f)", "def working_directory(dir):\n cwd = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(cwd)", "def working_directory(dir):\n cwd = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(cwd)", "async def leaf_it(d):\n async for _parent, _key, leaf in _walk({}, None, d):\n yield leaf", "def _maybe_iterdir(path: epath.Path) -> Iterator[epath.Path]:\n # Use try/except rather than `.exists()` to avoid an extra RPC call\n # per namespace\n try:\n for f in path.iterdir():\n yield f\n except (\n OSError,\n FileNotFoundError,\n PermissionError,\n tf.errors.NotFoundError,\n tf.errors.PermissionDeniedError,\n ) as e:\n pass", "def _iter_valid_files(directory, white_list_formats, follow_links):\n def _recursive_list(subpath):\n return sorted(os.walk(subpath, followlinks=follow_links),\n key=lambda x: x[0])\n\n for root, _, files in _recursive_list(directory):\n for fname in sorted(files):\n for extension in white_list_formats:\n if fname.lower().endswith('.' + extension):\n yield root, fname", "def walkdir(folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))", "def walk(self):\n for _root, _dirs, files in os.walk(self.root):\n for filename in files:\n if self.is_key(filename):\n yield filename", "def walk(self):\n if os.path.exists(self.folder):\n for root_path, _, f_files in os.walk(self.folder):\n yield root_path, f_files\n if not self.recursive:\n break\n else:\n print(f\"[!e] Passed folder doesn't exist. Path: {self.folder}\",\n file=sys.stdout)\n exit(0)", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def iglob_recursive(directory, file_pattern):\n for root, dir_names, file_names in os.walk(directory, followlinks=True):\n files = fnmatch.filter(file_names, file_pattern)\n for filename in files:\n yield os.path.join(root, filename)", "def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):\n walk_func = get_dir_walker(recursive, topdown, followlinks)\n for root, dir_names, file_names in walk_func(dir_pathname):\n yield (root, dir_names, file_names)", "def getFilePaths(directory):\r\n\tfor folder, subs, files in os.walk(directory):\r\n\t\tfor filename in files:\r\n\t\t\tyield os.path.join(folder, filename)", "def files(directory):\n p = directory.fullpath\n with os.scandir(p) as it:\n for entry in it:\n if not entry.is_dir(follow_symlinks=False):\n if entry.is_symlink():\n d = os.readlink(os.path.join(p, entry.name))\n f = File(directory_id=directory.id,\n size=0, mtime=0,\n name=entry.name,\n link=True, destination=d)\n else:\n st = entry.stat(follow_symlinks=False)\n f = File(directory_id=directory.id,\n size=st.st_size,\n mtime=int(st.st_mtime),\n name=entry.name)\n Session.add(f)\n Session.commit()", "def _GetSubFileEntries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory:\n tar_file = self._file_system.GetTARFile()\n if tar_file:\n for path_spec in self._directory.entries:\n location = getattr(path_spec, 'location', None)\n if location is None:\n continue\n\n kwargs = {}\n try:\n kwargs['tar_info'] = tar_file.getmember(location[1:])\n except KeyError:\n kwargs['is_virtual'] = True\n\n yield TARFileEntry(\n self._resolver_context, self._file_system, path_spec, **kwargs)", "def filesInDirectory(dirName):\n for name in os.listdir(dirName):\n mode = os.stat(os.path.join(dirName, name)).st_mode\n if stat.S_ISREG(mode):\n yield name", "def GetDirFilesRecursive(directory):\n dirfiles = set()\n for dirpath, _, files in os.walk(directory):\n for name in files:\n dirfiles.add(os.path.normpath(os.path.join(dirpath, name)))\n return dirfiles", "def iterfiles(self, include_dirs: bool = False) -> Iterator[P]:\n dirs = deque([self.filetree])\n while dirs:\n for p in dirs.popleft().iterdir():\n if p.is_dir():\n dirs.append(p)\n if include_dirs:\n yield p\n else:\n yield p", "def scan_tree(path):\n list_of_file_paths = []\n for file_obj in scandir(path):\n if file_obj.is_dir(follow_symlinks=False):\n # yield from scan_tree(file_obj.path)\n list_of_file_paths.extend(scan_tree(file_obj.path))\n else:\n # yield file_path\n if 'DS_Store' not in file_obj.path:\n list_of_file_paths.append(file_obj.path)\n return list_of_file_paths", "def list_directory(project_tree, directory):\n _, subdirs, subfiles = next(project_tree.walk(directory.path))\n return DirectoryListing(directory,\n [Path(join(directory.path, subdir)) for subdir in subdirs\n if not subdir.startswith('.')],\n [Path(join(directory.path, subfile)) for subfile in subfiles])", "def ls_dir(d):\n return [d for d in [os.path.join(d, f) for f in os.listdir(d)] if os.path.isdir(d)]", "def walk_data(dist, path='/'):\r\n for rel_fn in filter(None, dist.resource_listdir(path)):\r\n full_fn = os.path.join(path, rel_fn)\r\n if dist.resource_isdir(full_fn):\r\n for fn, stream in DistributionHelper.walk_data(dist, full_fn):\r\n yield fn, stream\r\n else:\r\n yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)", "def globTree(rootDir, inclFiles=[\"*\"], exclFiles=[],\n inclDirs=[\"*\"], exclDirs=[], stripCount=0):\n return list(iterGlobTree(rootDir, inclFiles=inclFiles, exclFiles=exclFiles,\n inclDirs=inclDirs, exclDirs=exclDirs, stripCount=stripCount))", "def list_directory2(self, mdir, limit=None, marker=None):\n log.debug('ListDirectory %r', mdir)\n\n query = {}\n if limit:\n query[\"limit\"] = limit\n if marker:\n query[\"marker\"] = marker\n\n res, content = self._request(mdir, \"GET\", query=query)\n if res[\"status\"] != \"200\":\n raise errors.MantaAPIError(res, content)\n lines = content.splitlines(False)\n dirents = []\n for line in lines:\n if not line.strip():\n continue\n try:\n dirents.append(json.loads(line))\n except ValueError:\n raise errors.MantaError('invalid directory entry: %r' % line)\n return res, dirents", "def _find_files(directory: str, pattern: str) -> Iterator[str]:\n for root, dirs, files in os.walk(directory, topdown=True):\n dirs[:] = [d for d in dirs if _is_file_valid(d)]\n for basename in sorted(files):\n if _is_file_valid(basename) and fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def find(directory, slash='/', pattern=r'.+\\.out'):\n for directory, subdirectories, files in os.walk(directory):\n for file in files:\n if re.findall(pattern, str(file)):\n yield str(directory + slash + file)", "def list_image_files(dir, filter=None):\n for entry in os.listdir(dir):\n path = os.path.join(dir, entry)\n if os.path.isdir(path):\n for p in list_image_files(path, filter):\n yield p\n elif any((entry.lower().endswith(ext) for ext in image_exts)):\n if filter and not filter(path):\n continue\n yield path", "def walk(self):\n for dirpath, subdirs, files in os.walk(self.wiki_dir):\n # Skip hidden files\n files = filter(lambda d: not d.startswith('.'), files)\n\n # skip this directory if it is hidden too\n if os.path.basename(dirpath).startswith('.'):\n continue\n\n for filename in filter(self._valid_extension, files):\n full_filename = os.path.join(dirpath, filename)\n rel_name = os.path.relpath(full_filename, start=self.wiki_dir)\n name = os.path.basename(rel_name)\n dirs = rel_name.split('/')[:-1]\n\n # Make sure there are no index.* files\n if os.path.splitext(os.path.basename(full_filename))[0] == 'index':\n raise InvalidFileName(full_filename)\n\n crumbs = []\n\n # Add the root dir\n crumbs.append(Crumb('index', '/'))\n\n # Add all other parent directories\n for d in dirs:\n href = os.path.join(crumbs[-1].href, d) + '/'\n temp = Crumb(d, href)\n crumbs.append(temp)\n\n # Then add the page itself\n crumbs.append(Crumb(name, None))\n yield name, crumbs", "def pushd(directory):\n prevdir = os.getcwd()\n os.chdir(directory)\n try:\n yield prevdir\n finally:\n os.chdir(prevdir)", "def scanDirectories(directory, includes = [\"*\"], excludes = []):\n\treturn scanAll(directory, includes, excludes)[2]", "def foreach_metapack_subdir(c):\n\n for d in _build_order(c):\n d = d.resolve()\n print(\"⏩ \", d)\n\n curdir = os.getcwd()\n\n os.chdir(d)\n\n yield d\n\n os.chdir(curdir)", "def get_all_paths(dmt, directory_path=''):\n # Base case.\n if not dmt.children:\n return set()\n \n filesystem_items = set()\n for item in dmt.children.keys():\n filesystem_items.add(directory_path+item)\n # Also get the paths of subdirectory contents.\n if item[-1] == '/':\n subdir_name = item\n subdir_path = directory_path + subdir_name\n \n filesystem_items.add(subdir_path)\n filesystem_items.update(get_all_paths(dmt.children[subdir_name], subdir_path))\n \n return filesystem_items", "def walk(self):\n for name, child in sorted(self._children.items()):\n if isinstance(child, PackageEntry):\n yield name, child\n else:\n yield from child._walk(f'{name}/')", "def format_dir_list_recurse(curdir, search=\"\"):\n total = []\n for item in curdir.contents:\n if isinstance(item, ListDir):\n total.extend(format_dir_list_recurse(item, search=search))\n if item.used:\n total.append(item)\n elif isinstance(item, ListItem):\n if search in item.itempath:\n item.used = True\n curdir.used = True\n total.append(item)\n return total", "def iter_dir_tree(top, nohidden=True, pattern=\".*\"):\n for root, dirs, files in os.walk(top):\n if nohidden:\n remove_hidden_files(dirs)\n remove_hidden_files(files)\n for f in files:\n if re.match(pattern, f):\n yield os.path.join(root, f)", "def recursive_unpack(dir_path):\n exten = ['7z', 'zip', 'rar']\n one_more = False\n for r, d, files in os.walk(dir_path):\n packed = []\n for ext in exten:\n code_files = fnmatch.filter(files, '*.' + ext)\n if len(code_files) > 0:\n tmp_paths = [os.path.join(os.path.abspath(r), f) for f in code_files]\n packed.extend(tmp_paths)\n if not one_more and len(packed) > 0:\n one_more = True\n if len(packed) > 0:\n print(\"unpack list:\", packed)\n for p in packed:\n extract(p, os.path.dirname(p))\n os.remove(p)\n if one_more:\n recursive_unpack(dir_path)", "def fs_get_dir_entries(self, path):\n\t\treturn Job(SDK.PrlSrv_FsGetDirEntries(self.handle, path)[0])", "def populate_dirent_stream(self, stream):\n for dirent in stream:\n LOG.info(\"%s\", dirent.get_full_path())\n\n # If this directory was deleted, we cannot populate it as the\n # dirent stream it points to is not guaranteed. Once the directory\n # is deleted, the dirent stream it points to may be overwritten.\n if dirent.is_directory() and \\\n not dirent.is_deleted():\n\n chain_map = self.get_cluster_chain(dirent.first_cluster)\n\n if self.debug_log_enabled:\n LOG.debug(\"Reading directory: %s\", dirent.get_full_path())\n LOG.debug(\"Directory First Cluster: %08x\",\n dirent.first_cluster)\n LOG.debug(\"Chain Map: %s\", chain_map)\n\n for cluster in chain_map:\n LOG.debug(\"Reading Cluster: %08x\", cluster)\n\n dirent_stream = self.read_directory_stream(\n self.cluster_to_physical_offset(cluster))\n\n dirent.add_dirent_stream_to_this_directory(dirent_stream)\n # TODO: populate_children()\n self.populate_dirent_stream(dirent_stream)", "def get_files_in_dir(dir, ext):\n import os\n\n for root, dirs, files in os.walk(dir):\n for file in files:\n if file.split('.')[1].lower() == ext.lower() or not ext:\n file_full_path = os.path.join(root, file)\n yield file_full_path", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def get_contents_of_directory(directory, bucket=None):\n if bucket is None:\n bucket, directory = get_bucket_and_path_from_uri(directory)\n bucket = get_bucket(bucket)\n\n return [x.key for x in bucket.list(prefix=directory)]", "def printDirContents(directory: Path, level=0, show_hidden=False):\n if show_hidden:\n children = directory.glob(\"./*\")\n else:\n children = directory.glob(\"./[!.]*\")\n dirs = []\n files = []\n for node in children:\n if node.is_dir():\n dirs.append(node)\n if node.is_file():\n files.append(node)\n for d in sorted(dirs):\n printSeperator(level)\n printItem(d.name)\n printDirContents(d, level + 1)\n for f in sorted(files):\n printSeperator(level)\n printItem(f.name)\n\n printSeperator(level, end='\\n')", "def listdir_full_path(directory):\n for f in os.listdir(directory):\n if not os.path.isdir(f):\n yield os.path.abspath(os.path.join(directory, f))", "def walkFolder(self, folder, topdown=True):\n if isinstance(folder, basestring):\n folderObject = self.getFolder(folder)\n else:\n folderObject = folder\n dirs = folderObject.childFolders\n containedObjects = folderObject.containedObjects\n if dirs is None:\n dirs = []\n if containedObjects is None:\n containedObjects = []\n if topdown:\n yield folderObject, dirs, containedObjects\n\n for nextDir in dirs:\n for x in self.walkFolder(nextDir.selfUrl):\n yield x\n if not topdown:\n yield folderObject, dirs, containedObjects", "def get_files(dir: str) -> List[str]:\n ret = []\n for root, dirs, files in os.walk(dir):\n for name in dirs:\n ret.extend(get_files(os.path.join(root, name)))\n for name in files:\n ret.append(os.path.join(root, name))\n return ret", "def parse_dir(args, dirname, names):\n for name in names:\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n parse_file_from_directory(path, args)", "def list_dir_recursively(dir: str) -> list:\n all_files = []\n for root, dirs, files in os.walk(dir):\n for name in files:\n file_path = os.path.join(root, name)\n file_path = os.path.relpath(file_path, dir)\n all_files.append(file_path)\n return all_files", "def iter_files(root_dir: str, sep: str = '/') -> Generator[str, None, None]:\n def f(parent_path, parent_name):\n for f_name in os.listdir(parent_path):\n f_child_path = parent_path + os.sep + f_name\n f_child_name = parent_name + sep + f_name\n if os.path.isdir(f_child_path):\n for s in f(f_child_path, f_child_name):\n yield s\n else:\n yield f_child_name\n\n for name in os.listdir(root_dir):\n child_path = root_dir + os.sep + name\n if os.path.isdir(child_path):\n for x in f(child_path, name):\n yield x\n else:\n yield name", "def dirsIter(self):\n url = urlparse(self.baseurl)\n basepath = url2pathname(url.path)\n if self.tld is not None:\n yield self.tld, self.getTLDPathsTuple(basepath)\n for dir in self.dirs:\n yield dir, (basepath, dir)", "def walk(dir, callback):\n\n dir = abspath(dir)\n for file in listdir(dir):\n nfile = join(dir, file)\n if isdir(nfile):\n walk(nfile, callback)\n else:\n callback(nfile)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def _stat_results_from_dir(self, path):\n lines = self._host_dir(path)\n # `cache` is the \"high-level\" `StatCache` object whereas `cache._cache`\n # is the \"low-level\" `LRUCache` object.\n cache = self._lstat_cache\n # Auto-grow cache if the cache up to now can't hold as many entries as\n # there are in the directory `path`.\n if cache._enabled and len(lines) >= cache._cache.size:\n new_size = int(math.ceil(1.1 * len(lines)))\n cache.resize(new_size)\n # Yield stat results from lines.\n for line in lines:\n if self._parser.ignores_line(line):\n continue\n # Although for a `listdir` call we're only interested in the names,\n # use the `time_shift` parameter to store the correct timestamp\n # values in the cache.\n stat_result = self._parser.parse_line(line, self._host.time_shift())\n # Skip entries \".\" and \"..\".\n if stat_result._st_name in [self._host.curdir, self._host.pardir]:\n continue\n loop_path = self._path.join(path, stat_result._st_name)\n # No-op if cache is disabled.\n cache[loop_path] = stat_result\n yield stat_result", "def walk_recursive(root, pattern='*.py'):\r\n for root, dirnames, filenames in os.walk(root):\r\n for filename in fnmatch.filter(filenames, pattern):\r\n yield os.path.join(root, filename)", "def chdir(dir):\n orig_cwd = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(orig_cwd)" ]
[ "0.7087995", "0.703418", "0.6827286", "0.6822487", "0.67192256", "0.6643409", "0.66230345", "0.6553806", "0.63341504", "0.6309465", "0.6306312", "0.62912905", "0.62335205", "0.6217654", "0.61574435", "0.61347914", "0.612545", "0.6041788", "0.60254", "0.6016495", "0.6011201", "0.5978534", "0.5977925", "0.59623575", "0.5957728", "0.59296376", "0.5928391", "0.5925919", "0.591732", "0.59148985", "0.59118545", "0.59077835", "0.58933693", "0.58897763", "0.5879925", "0.5877494", "0.5849477", "0.5829628", "0.58150387", "0.5783106", "0.57785547", "0.57692146", "0.5759448", "0.57389843", "0.57349825", "0.5704677", "0.56725806", "0.5667653", "0.5667653", "0.56636554", "0.5653239", "0.5646455", "0.56362855", "0.5624027", "0.5601591", "0.55900025", "0.55800617", "0.5576914", "0.5574269", "0.5552722", "0.5550342", "0.5546577", "0.5539681", "0.5535787", "0.55311996", "0.55217826", "0.5519102", "0.5490093", "0.54857904", "0.54509264", "0.5442224", "0.54227287", "0.5417055", "0.54165936", "0.5413555", "0.5410844", "0.5409498", "0.5401366", "0.53980345", "0.53841317", "0.53720397", "0.53607464", "0.53599757", "0.53543115", "0.5334923", "0.53305143", "0.5322348", "0.53202486", "0.53181887", "0.5308403", "0.530745", "0.5304432", "0.5301075", "0.52960384", "0.52956885", "0.52943814", "0.528971", "0.528857", "0.52850586", "0.52813154" ]
0.59717333
23
Creates a starboard. A starboard is a channel which has messages with some stars. To configure this starboard (such as max age and threshold, which are 7 days and 5 stars by default), use starconfig's subcommands. See the help for details.
async def starboard(self, ctx): if self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?",(ctx.guild.id,)).fetchone(): return await ctx.say("star.already") async with ctx.typing(): await ctx.channel.edit( topic=TOPIC.format(mention=self.bot.user.mention, threshold=5, age=7), # yeah can't be localized nsfw=False, reason="Starboard preparation" ) await ctx.channel.set_permissions(ctx.guild.me, read_messages=True, send_messages=True, add_reactions=True, manage_messages=True, embed_links=True, attach_files=True, read_message_history=True, manage_roles=True, manage_channels=True ) await ctx.channel.set_permissions(ctx.guild.default_role, read_messages=True, send_messages=False, add_reactions=True, read_message_history=True ) tutorial = await ctx.say("star.done", STAR_EMOJI) try: await tutorial.pin() except discord.HTTPException: pass self.bot.db.execute("INSERT INTO starboards(guild_id, channel_id,threshold,age,enabled) VALUES (?, ?,5,7,1)", (ctx.guild.id, ctx.channel.id)) starboard_id = self.bot.db.execute("SELECT starboard_id FROM starboards WHERE channel_id = ?", (ctx.channel.id,)).fetchone()["starboard_id"] self.bot.db.execute("UPDATE guilds SET starboard_id = ? WHERE guild_id = ?", (starboard_id, ctx.guild.id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def add_starboard(self, ctx):\n channel = await ctx.get_text_channel(embed=CustomEmbeds.add(author=\"Channel\",\n description=\"Send a channel to add it to the starboard!\"))\n emotes = await ctx.get_emotes(embed=CustomEmbeds.add(author=\"Emotes\",\n description=\"React with emotes and then click ✅ to add them to the starboard.\"))\n threshold = await ctx.get_int(embed=CustomEmbeds.add(author=\"Add a Threshold\",\n description=\"Send message with the minimum number of reactions for it to be added to the starboard.\"))\n\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n if guild_starboards is None:\n starboard_len = 0\n else:\n starboard_len = len(guild_starboards.get(\"starboards\"))\n\n starboard = Starboard(index=starboard_len,\n channel=channel,\n emotes=emotes,\n threshold=threshold)\n\n await self.db_add_starboard(ctx.guild, starboard.serialize())\n await ctx.send(embed=CustomEmbeds.confirm(author=\"Starboard Added\", description=f\"ID: {starboard_len}\\n\"\n f\"Channel: {channel.mention}\\n\"\n f\"Emotes: {' '.join(emotes)}\\n\"\n f\"Threshold: {threshold}\"))", "def create_star(rk_settings, screen, stars, star_number, row_number):\r\n\tstar = Star(rk_settings, screen)\r\n\tstar_width = star.rect.width\r\n\tstar.x = star_width + 2 * star_width * star_number\r\n\tstar.rect.x = star.x\r\n\tstar.rect.y = star.rect.height + 2 * star.rect.height * row_number\r\n\tstars.add(star)", "async def stars(self, ctx: commands.Context, stars: int):\n self.stars = stars\n await self._update_db()\n\n await ctx.send(\n f\"Done.Now this server needs `{stars}` :star: to appear on the starboard channel.\"\n )", "def create_star():\n if config.S_LIST == []:\n sitem = scene.Star(randint(2, common.COLS-2), randint(2, common.R1_R))\n config.S_LIST.append(sitem)\n elif randint(0, 5) == 1:\n sitem = scene.Star(randint(2, common.COLS-2), randint(2, common.R1_R))\n config.S_LIST.append(sitem)", "def create_fleet(rk_settings, screen, rock, stars):\r\n\t# Create a star and find the number of stars in a row.\r\n\tstar = Star(rk_settings, screen)\r\n\tnumber_stars_x = get_number_stars_x(rk_settings, star.rect.width)\r\n\tnumber_rows = get_number_rows(rk_settings, rock.rect.height, \r\n\t\t\t\t\t\t\t\t\tstar.rect.height)\r\n\t\t\t\t\t\t\t\t\t\r\n\t# Create the first row of stars.\r\n\tfor row_number in range(number_rows):\r\n\t\tfor star_number in range(number_stars_x):\r\n\t\t\tcreate_star(rk_settings, screen, stars, star_number,\r\n\t\t\t\t\t\trow_number)", "def board_stars(self):\r\n return BoardStars(self)", "def board_star(self, board_star_id):\r\n return BoardStar(self, board_star_id)", "def _create_stars(self, stars_number, row_number):\n star = Star(self)\n stars_width, stars_height = star.rect.size\n star.x = stars_width + 2 * stars_width * stars_number\n star.rect.x = star.x\n star.rect.y = star.rect.height + 2 * star.rect.height * row_number\n self.stars.add(star)", "async def starboard(self, ctx):\n pass", "async def list_starboard(self, ctx):\n entries = []\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n\n if guild_starboards is None or guild_starboards.get(\"starboards\") is None:\n entries.append((\"No Starboards\", \"This guild has no starboards setup\"))\n return await StarboardPages(ctx, entries=entries).paginate()\n starboards = guild_starboards.get(\"starboards\")\n\n entries.append((\"Guild Starboard Status\", f\"Activated: `{guild_starboards.get('activated')}`\"))\n\n for starboard in starboards:\n entries.append((f\"Starboard #{starboard.get('_id')}\", f\"Channel: <#{starboard.get('channel')}>\\n\"\n f\"Emotes: {' '.join(starboard.get('emotes'))}\\n\"\n f\"Threshold: `{starboard.get('threshold')}`\\n\"\n f\"Created: `{starboard.get('created').strftime('%b %d %Y %H:%M:%S')}`\\n\"\n f\"Activated: `{starboard.get('activated')}`\"))\n\n pages = StarboardPages(ctx, entries=entries)\n await pages.paginate()", "def _create_galaxy(self):\n # Make a star.\n star = Star(self)\n stars_width, stars_height = star.rect.size\n # Fill galaxy across the screen\n available_space_x = self.settings.screen_width - (2 * stars_width)\n number_stars_x = available_space_x // (2 * stars_width)\n # Determine the number of rows of stars that fit on the screen.\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * stars_height) - ship_height)\n number_rows = available_space_y // (2 * stars_height)\n # Create the full galaxy of stars.\n for row_number in range(number_rows):\n # Create the first row of stars.\n for stars_number in range(number_stars_x):\n self._create_stars(stars_number, row_number)", "async def set_starboard_channel(self, ctx: commands.Context, channel: discord.TextChannel):\n self.check_if_exist(ctx.guild)\n\n self.starboard_guilds = self.starboard_info.find(\"guilds\")\n try:\n self.starboard_guilds[str(ctx.guild.id)][\"channel\"] = channel.id\n except:\n self.starboard_guilds[str(ctx.guild.id)] = {}\n self.starboard_guilds[str(ctx.guild.id)][\"channel\"] = channel.id\n self.starboard_info.update(\"guilds\", self.starboard_guilds)\n\n await ctx.reply(\"Starred messages will be sent to {0}\".format(channel.mention))", "async def starred(self, ctx: Message):\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tawait self.send(\n\t\t f\"Starred Message: {starred}ㅤ|ㅤMessage Creator: @{starredauthor}\")", "def create_new_board():\n\n board = Board()\n board.print_board()", "async def starboard_current(self, ctx):\n starboard_settings = self.bot.cache.starboard_settings.get(str(ctx.guild.id))\n if not starboard_settings:\n raise exceptions.Warning(\"Nothing has been configured on this server yet!\")\n\n (\n is_enabled,\n board_channel_id,\n required_reaction_count,\n emoji_name,\n emoji_id,\n emoji_type,\n log_channel_id,\n ) = starboard_settings\n\n if emoji_type == \"custom\":\n emoji = self.bot.get_emoji(emoji_id)\n else:\n emoji = emoji_name\n\n blacklisted_channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id FROM starboard_blacklist WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n as_list=True,\n )\n\n content = discord.Embed(title=\":star: Current starboard settings\", color=int(\"ffac33\", 16))\n content.add_field(\n name=\"State\", value=\":white_check_mark: Enabled\" if is_enabled else \":x: Disabled\"\n )\n content.add_field(name=\"Emoji\", value=emoji)\n content.add_field(name=\"Reactions required\", value=required_reaction_count)\n content.add_field(\n name=\"Board channel\",\n value=f\"<#{board_channel_id}>\" if board_channel_id is not None else None,\n )\n content.add_field(\n name=\"Log channel\",\n value=f\"<#{log_channel_id}>\" if log_channel_id is not None else None,\n )\n content.add_field(\n name=\"Blacklisted channels\",\n value=\" \".join(f\"<#{cid}>\" for cid in blacklisted_channels)\n if blacklisted_channels\n else None,\n )\n\n await ctx.send(embed=content)", "def addstar(starname):\n try:\n Star.create(name=starname)\n except IntegrityError:\n print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))", "def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})", "async def post_starred(bot, config, message, reaction_emoji, user):\r\n\t\treacts = message.reactions\r\n\t\tmod_starred = False\r\n\t\tstarlist = None\r\n\t\tstarcount = None\r\n\t\tstarcount_reached = False\r\n\t\t# if the post is older than a week, don't bother putting it on the board\r\n\t\tif (datetime.datetime.now() - message.created_at).total_seconds() > 604800:\r\n\t\t\treturn\r\n\t\t# check if the poster of the starred message is blacklisted from the starboard\r\n\t\tif message.author.id in config[\"starboard\"][\"blacklisted_users\"]:\r\n\t\t\treturn\r\n\t\t# count the number of stars a post has\r\n\t\tfor react in reacts:\r\n\t\t\tif react.emoji == config[\"starboard\"][\"emoji\"]:\r\n\t\t\t\tstarlist = [x async for x in react.users()]\r\n\t\t\t\tstarcount = len(starlist)\r\n\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\treturn\r\n\t\t# check if the star count was reached\r\n\t\ttry:\r\n\t\t\t# if there's a star requirement for a specific channel, and the starred message is in that channel,\r\n\t\t\t# check if the star count surpasses the requirement for that channel\r\n\t\t\tif starcount >= config[\"starboard\"][\"star_amounts\"][message.channel.name]:\r\n\t\t\t\tstarcount_reached = True\r\n\t\t# if there isn't a channel-specific star count this message must follow,\r\n\t\texcept KeyError:\r\n\t\t\t# just check to see if it meets the global requirement\r\n\t\t\tif starcount >= config[\"starboard\"][\"star_amounts\"][\"global\"]:\r\n\t\t\t\tstarcount_reached = True\r\n\t\t# check if a mod starred the post\r\n\t\tfor reactor in starlist:\r\n\t\t\tif Starboard.modcheck(bot, config, reactor) and config[\"starboard\"][\"role_override\"] == \"true\":\r\n\t\t\t\tstarcount_reached = True\r\n\t\t\t\tbreak\r\n\t\t# anti-self-star code\r\n\t\tif message.author.id == user.id:\r\n\t\t\tawait message.remove_reaction(reaction_emoji, message.author)\r\n\t\t\t# count the number of self-star alerts out of the last 50 messages\r\n\t\t\tcounter = 0\r\n\t\t\tasync for message in message.channel.history(limit=50):\r\n\t\t\t\tif \"IS A THOT AND SELF-STARRED THEIR MEME\" in message.content:\r\n\t\t\t\t\tcounter += 1\r\n\t\t\t# if there's been less than three, send a self-star alert\r\n\t\t\t# this is to prevent spam from CERTAIN THOTS THAT LOVE SPAMMING IT\r\n\t\t\tif counter < 3:\r\n\t\t\t\tselfstar_alert = '🚨 🚨 ' + user.mention + ' IS A THOT AND SELF-STARRED THEIR MEME 🚨 🚨'\r\n\t\t\t\tawait message.channel.send(selfstar_alert)\r\n\t\t\treturn\r\n\t\tif starcount_reached and message.author.id != bot.user.id:\r\n\t\t\tawait Starboard.post_to_starboard(bot, message, starcount)", "def generate_star_file(stack_label, previous_classes_bool=False, recent_class=\"classes_0.star\"):\n star_file = \"{}.star\".format(stack_label)\n if previous_classes_bool:\n print(\"It looks like previous jobs have been run in this directory. The most recent output class is: {}\".format(recent_class))\n new_star_file = os.path.splitext(recent_class)[0]+\"_appended.star\"\n print(\"Instead of classes_0.star, the new particles will be appended to the end of that par file and saved as {}\".format(new_star_file))\n _ = append_new_particles(old_particles=recent_class, new_particles=star_file, output_filename=new_star_file)\n else:\n print(\"No previous classes were found. A new par file will be generated at classes_0.star\")\n new_star_file = \"classes_0.star\"\n shutil.copy(star_file, new_star_file)\n return new_star_file", "async def setstar(self, ctx: Message):\n\t\tglobal msg #making the variables global so we can access them from any command\n\t\tglobal msgauthor\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tstarred = msg\n\t\tstarredauthor = msgauthor\n\t\tawait self.send(f\"Starred message was set! You may access it with d!starred\")", "def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass", "def SetStars(self):\r\n\t\tstartype = [self._iconstars[\r\n\t\t\tself.CalcStar(starnum,\\\r\n\t\t\t\tself._configtmp[\"imagerating\"],\r\n\t\t\t\tself._configtmp[\"userrating\"])]\\\r\n\t\t\tfor starnum in range(1,6)]\r\n\t\tself.bitmapButton1Star.SetBitmapLabel(startype[0])\r\n\t\tself.bitmapButton2Star.SetBitmapLabel(startype[1])\r\n\t\tself.bitmapButton3Star.SetBitmapLabel(startype[2])\r\n\t\tself.bitmapButton4Star.SetBitmapLabel(startype[3])\r\n\t\tself.bitmapButton5Star.SetBitmapLabel(startype[4])", "def __init__(self, img, header, starobj, halosize=40, padsize=40, mask=None, hscmask=None):\n Celestial.__init__(self, img, mask, header=header)\n if hscmask is not None:\n self.hscmask = hscmask\n self.name = 'star'\n self.scale_bar_length = 3\n # Trim the image to star size\n # starobj should at least contain x, y, (or ra, dec) and \n # Position of a star, in numpy convention\n x_int = int(starobj['x'])\n y_int = int(starobj['y'])\n dx = -1.0 * (starobj['x'] - x_int)\n dy = -1.0 * (starobj['y'] - y_int)\n halosize = int(halosize)\n # Make padded image to deal with stars near the edges\n padsize = int(padsize)\n ny, nx = self.image.shape\n im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))\n im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.image\n # Star itself, but no shift here.\n halo = im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1, \n x_int + padsize - halosize: x_int + padsize + halosize + 1]\n self._image = halo\n self.shape = halo.shape\n self.cen_xy = [x_int, y_int]\n self.dx = dx\n self.dy = dy \n # FLux\n self.flux = starobj['flux']\n self.fluxann = starobj['flux_ann']\n\n if hasattr(self, 'mask'):\n im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))\n im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.mask\n # Mask itself, but no shift here.\n halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1, \n x_int + padsize - halosize: x_int + padsize + halosize + 1])\n self._mask = halo\n \n if hasattr(self, 'hscmask'):\n im_padded = np.zeros((ny + 2 * padsize, nx + 2 * padsize))\n im_padded[padsize: ny + padsize, padsize: nx + padsize] = self.hscmask\n # Mask itself, but no shift here.\n halo = (im_padded[y_int + padsize - halosize: y_int + padsize + halosize + 1, \n x_int + padsize - halosize: x_int + padsize + halosize + 1])\n self.hscmask = halo", "def __draw_board(self):\n\n COLOR = (0, 0, 0, 200)\n LINE_WIDTH = 2\n STAR_POINT_SIZE = 4\n FONT_SIZE = 18\n\n (boardSize, drawExtraStarPoints, starPointOffset) = self.settings\n boardSize -= 1\n stepX = self.innerWidth / boardSize\n stepY = self.innerHeight / boardSize\n labelBoardSpacing = self.borderSize / 2\n draw = ImageDraw.Draw(self.baseImage)\n font = ImageFont.truetype(\"assets/font_fifteentwenty.otf\", FONT_SIZE)\n\n # Draw lines and labels\n for i in range(0, boardSize + 1):\n x = self.borderSize + stepX * i\n label = chr(ord('A') + i)\n labelWidth, labelHeight = draw.textsize(label, font)\n\n draw.line([(x, self.borderSize), (x, self.innerHeight + self.borderSize)], COLOR, LINE_WIDTH)\n draw.text((x - labelWidth / 2, self.borderSize - labelHeight - labelBoardSpacing + labelHeight / 2), label, COLOR, font)\n draw.text((x - labelWidth / 2, self.borderSize + self.innerHeight + labelBoardSpacing - labelHeight / 2), label, COLOR, font)\n\n for i in range(0, boardSize + 1):\n y = self.borderSize + stepY * i\n label = str(boardSize - i + 1)\n labelWidth, labelHeight = draw.textsize(label, font)\n\n draw.line([(self.borderSize, y), (self.innerWidth + self.borderSize, y)], COLOR, LINE_WIDTH)\n draw.text((self.borderSize - labelWidth - labelBoardSpacing + labelWidth / 2, y - labelHeight / 2), label, COLOR, font)\n draw.text((self.borderSize + self.innerWidth + labelBoardSpacing - labelWidth / 2, y - labelHeight / 2), label, COLOR, font)\n\n # Calculate star point positions\n centerX = boardSize / 2 * stepX + self.borderSize\n centerY = boardSize / 2 * stepY + self.borderSize\n leftX = starPointOffset * stepX + self.borderSize\n rightX = (boardSize - starPointOffset) * stepX + self.borderSize\n topY = starPointOffset * stepY + self.borderSize\n bottomY = (boardSize - starPointOffset) * stepY + self.borderSize\n\n # Draw star points\n draw.ellipse([(centerX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n\n if drawExtraStarPoints:\n draw.ellipse([(centerX - STAR_POINT_SIZE, topY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, topY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(leftX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (leftX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(centerX - STAR_POINT_SIZE, bottomY - STAR_POINT_SIZE), (centerX + STAR_POINT_SIZE, bottomY + STAR_POINT_SIZE)], COLOR)\n draw.ellipse([(rightX - STAR_POINT_SIZE, centerY - STAR_POINT_SIZE), (rightX + STAR_POINT_SIZE, centerY + STAR_POINT_SIZE)], COLOR)", "async def star_random(self, ctx):\n board = self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\", (ctx.guild.id,)).fetchone()\n item = self.bot.db.execute(\n \"SELECT item_id FROM starboard_items WHERE visible = 1 \" \\\n \"ORDER BY random() LIMIT 1\"\n ).fetchone()\n if not item:\n return\n try:\n board_msg = await self.bot.get_channel(board[\"channel_id\"]).fetch_message(item[\"item_id\"])\n except discord.NotFound:\n return await self.destroy_item(board[\"channel_id\"], item[\"item_id\"])\n else:\n await ctx.send(board_msg.content, embed=board_msg.embeds[0])", "def star_sprite(self):\n for _ in range(5):\n star = arcade.Sprite(\"Sprites/star.png\", .8)\n star.center_x = random.randrange(0, settings.WIDTH)\n star.center_y = random.randrange(0, settings.HEIGHT)\n self.star_sprites.append(star)", "def star_graph():\n pylon_graph = graph.graph()\n idx = pylon_graph.add_unique_node(ORIGIN, \"base\")\n star_list = pylon_graph.add_star_to_node(idx, 6)\n pylon_graph.connect_nodes(star_list)\n pylon_graph.save_graph(\"star\")\n return pylon_graph", "async def set_star_thresh(self, ctx: commands.Context, thresh: int):\n self.check_if_exist(ctx.guild)\n\n self.starboard_guilds = self.starboard_info.find(\"guilds\")\n\n self.starboard_guilds[str(ctx.guild.id)][\"thresh\"] = thresh\n\n self.starboard_info.update(\"guilds\", self.starboard_guilds)\n\n await ctx.reply(\"The amount of stars needed to get a message to the starboard is now {0}\".format(thresh))", "async def starboard_emoji(self, ctx, emoji):\n if emoji[0] == \"<\":\n # is custom emoji\n emoji_obj = await util.get_emoji(ctx, emoji)\n if emoji_obj is None:\n raise exceptions.Warning(\"I don't know this emoji!\")\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO starboard_settings (guild_id, emoji_name, emoji_id, emoji_type)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n emoji_name = VALUES(emoji_name),\n emoji_id = VALUES(emoji_id),\n emoji_type = VALUES(emoji_type)\n \"\"\",\n ctx.guild.id,\n None,\n emoji_obj.id,\n \"custom\",\n )\n await util.send_success(\n ctx, f\"Starboard emoji is now {emoji} (emoji id `{emoji_obj.id}`)\"\n )\n else:\n # unicode emoji\n emoji_name = emoji_literals.UNICODE_TO_NAME.get(emoji)\n if emoji_name is None:\n raise exceptions.Warning(\"I don't know this emoji!\")\n\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO starboard_settings (guild_id, emoji_name, emoji_id, emoji_type)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE\n emoji_name = VALUES(emoji_name),\n emoji_id = VALUES(emoji_id),\n emoji_type = VALUES(emoji_type)\n \"\"\",\n ctx.guild.id,\n emoji_name,\n None,\n \"unicode\",\n )\n await util.send_success(ctx, f\"Starboard emoji is now {emoji}\")\n await self.bot.cache.cache_starboard_settings()", "def drawstars(slist=[], best=None, outfile='/tmp/stars.jpg'):\n img = Image.new('RGB', (xmax,ymax), backcol) #blank 8-bit color image\n draw = ImageDraw.Draw(img)\n\n x,y,radius = 400, 300, hole_radius*Cscale\n draw.rectangle( (400+Xmin*Cscale, 300-Ymin*Cscale, 400+Xmax*Cscale, 300-Ymax*Cscale), outline=(0,128,0), fill=None)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,128,0), fill=None)\n\n for i in range(len(slist)):\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,0,0), fill=(0,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(0,0,0) )\n\n i = best #Redraw the 'best' star in red\n try:\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(192,0,0), fill=(192,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(192,0,0) )\n except TypeError,IndexError:\n pass #There is no 'best' star\n\n img.save(outfile, quality=90)", "def OnBitmapButton5StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 5\r\n\t\tself.SetStars()", "async def star_show(self, ctx, item: Star):\n board = self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\", (ctx.guild.id,)).fetchone()\n try:\n board_msg = await self.bot.get_channel(board[\"channel_id\"]).fetch_message(item[\"item_id\"])\n except discord.NotFound:\n return await self.destroy_item(board[\"channel_id\"], item[\"item_id\"])\n else:\n await ctx.send(board_msg.content, embed=board_msg.embeds[0])", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def createStar(npoints):\n # START CODE HERE\n\n pass\n # END CODE HERE # (remove the pass statement)", "def set_stars():\n prod_id = int(request.vars.prod_id)\n logger.info(\"changing stars on prod_id {%s}\" %prod_id)\n rating = int(request.vars.rating)\n logger.info(\"auth.user from api: %s\"%auth.user.email )\n db.stars.update_or_insert(\n (db.stars.prod_id == prod_id) & (db.stars.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n rating = rating\n )\n new_avg = calc_avg_rating(prod_id)\n return response.json(dict(new_avg=new_avg))", "async def starboard_blacklist(self, ctx, channel: discord.TextChannel):\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO starboard_blacklist (guild_id, channel_id)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n channel_id = channel_id\n \"\"\",\n ctx.guild.id,\n channel.id,\n )\n await util.send_success(ctx, f\"Stars are no longer counted in {channel.mention}\")\n await self.bot.cache.cache_starboard_settings()", "async def enable(self, ctx):\n self.bot.db.execute(\"UPDATE starboards SET enabled = 1 WHERE channel_id = ?\", (ctx.channel.id,))\n await ctx.say(\"star.enabled\")", "async def fake_data(self, ctx, entries: int = 100):\n class Channel:\n id = 00000000\n for i in range(entries):\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n if guild_starboards is None:\n starboard_len = 0\n else:\n starboard_len = len(guild_starboards.get(\"starboards\"))\n\n starboard = Starboard(index=starboard_len,\n channel=Channel,\n emotes=[],\n threshold=0,\n activated=False)\n\n await self.db_add_starboard(ctx.guild, starboard.serialize())\n await ctx.send(\"Done\")", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def a_star(board_name, draw_real_time):\n # Initializing the board through reading the file\n init = read_from_file(board_name) # Returns a list containing the full board, start and goal square\n board = init[0]\n start_sq = init[1]\n goal_sq = init[2]\n open_nodes = []\n closed = []\n start_sq.heuristic(goal_sq)\n start_sq.f = start_sq.g + start_sq.h\n open_nodes.append(start_sq)\n neighbors = [[-1, 0], [0, -1], [1, 0], [0, 1]]\n while open_nodes:\n node = open_nodes.pop()\n closed.append(node)\n if draw_real_time:\n draw_closed(node.x, node.y)\n print(node)\n if node == goal_sq: # We have arrived at the solution\n handle_solution(node, start_sq)\n break\n for n in neighbors:\n # Make sure the neighbor is a valid square on the board\n if len(board) > (node.y + n[0]) >= 0 and len(board[node.y]) > (node.x + n[1]) >= 0:\n child = board[node.y + n[0]][node.x + n[1]]\n if child.value != '#': # Checking if the node is an obstacle, and thus not accessible\n node.add_child(child)\n if child not in closed and child not in open_nodes: # We have not yet generated this node\n attach_and_eval(child, node, goal_sq)\n open_nodes.append(child)\n if draw_real_time:\n draw_open(child.x, child.y)\n elif node.g + child.get_arc_cost() < child.g: # Found a cheaper path to this node, thus a better parent\n attach_and_eval(child, node, goal_sq) # Recalculate the costs for the node\n if child in closed: # If the node was already visited, make sure the children are also updated\n propagate_path_improvements(child)\n # Sort the open_nodes list in descending order based on the f-function, so that pop gets the least costly node\n open_nodes.sort(key=lambda s: s.f, reverse=True)", "def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')", "async def delete_starboard(self, ctx, index: int = None):\n if index is None and await ctx.confirm(prompt=\"Delete all starboards?\"):\n self.starboards_collection.update_one({\"_id\": ctx.guild.id}, {\"$set\": {\"starboards\": []}})\n\n self.starboards_collection.update_one({\"_id\": ctx.guild.id}, {\"$pull\": {\"starboards\": {\"_id\": index}}})\n starboards = (await self.starboards_collection.find_one({\"_id\": ctx.guild.id})).get(\"starboards\")\n for starboard in starboards:\n if starboard.get(\"_id\") > index:\n starboard[\"_id\"] -= 1\n\n await self.starboards_collection.update_one({\"_id\": ctx.guild.id}, {\"$set\": {\"starboards\": starboards}})\n await ctx.send(embed=CustomEmbeds.remove(author=\"Removed Starboard\"))", "def __init__(self,ai_settings,screen):\n super(Star,self).__init__()\n self.screen = screen\n\n #Create a bullet star at(0,0) and then set correct position\n self.rect = pygame.Rect(0,0,ai_settings.star_width,ai_settings.star_height)\n self.rect.centerx=randint(0,ai_settings.screen_width)\n self.rect.top= randint(0,ai_settings.screen_height/4)\n\n self.y = float(self.rect.y)\n\n self.color = ai_settings.star_color\n self.speed_factor = ai_settings.star_speed_factor", "async def starboard_amount(self, ctx, amount: int):\n await queries.update_setting(ctx, \"starboard_settings\", \"reaction_count\", amount)\n emoji_name, emoji_id, emoji_type = await self.bot.db.execute(\n \"\"\"\n SELECT emoji_name, emoji_id, emoji_type\n FROM starboard_settings WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n one_row=True,\n )\n if emoji_type == \"custom\":\n emoji = self.bot.get_emoji(emoji_id)\n else:\n emoji = emoji_name\n\n await util.send_success(\n ctx, f\"Messages now need **{amount}** {emoji} reactions to get into the starboard.\"\n )\n await self.bot.cache.cache_starboard_settings()", "def add_star_team(client_id, team_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the team.\n\t\tteam_indexed_name = session.query(Team.indexed_name)\\\n\t\t\t\t.filter(Team.id == team_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the team.\n\t\tstarred_team = StarredTeam(user_id=client_id,\n\t\t\t\tteam_id=team_id,\n\t\t\t\tindexed_name=team_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_team)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this team.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the team.\n\tsession.execute(Teams.update()\n\t\t\t.where(Team.id == team_id)\n\t\t\t.values({Team.num_stars: Team.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(MatchOpponent.match_id, Match)\\\n\t\t\t.join(Match, MatchOpponent.match_id == Match.id)\\\n\t\t\t.filter(MatchOpponent.team_id == team_id, MatchOpponent.is_streamed == True)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\t\n\tsession.commit()", "def main():\n\n # Create an instance of our Star Targeter tool\n starTargeter = StarTargetTool()\n\n # Connect to Maxim and perform basic setup\n starTargeter.connect()\n\n\n # Calibrate the autoguider orientation and scale if requested\n if PERFORM_CALIBRATION:\n starTargeter.calibrateAutoGuider(EXPOSURE_LENGTH_SECONDS) \n\n # When autoguiding, Maxim normally creats a tiny subframe around\n # the target star. Since our star may be hundreds of pixels off\n # from the desired fiber position, first call this routine to\n # take some full-frame exposures and perform coarse adjustments\n # to get the star near the target\n starCenteredSuccessfully = starTargeter.findStarAndMoveToTarget(EXPOSURE_LENGTH_SECONDS, FIBER_X, FIBER_Y, ROUGH_TOLERANCE_PIXELS, maxIterations=5)\n\n if starCenteredSuccessfully:\n # Use Maxim's built-in star tracking routine to\n # accurately position the star and keep it\n # on target\n starTargeter.subframeAndKeepStarOnTarget(EXPOSURE_LENGTH_SECONDS, FIBER_X, FIBER_Y, ROUGH_TOLERANCE_PIXELS)\n else:\n print \"STAR CENTERING FAILED\"", "def simulate_star(fwhm, mag, integ, bgd=None, roff=0, coff=0):\n\n img_size = 8\n img_size2 = img_size * img_size\n\n if not isinstance(bgd, (int, float)):\n raise TypeError(\"simulate_star:: bgd expected to be (int, float)\")\n\n star = np.zeros((img_size, img_size))\n\n # Mag to counts conversion\n gain = 5. # e-/ADU\n counts = integ * transform.mag_to_count_rate(mag) / gain\n\n # Gaussian model\n halfsize = np.int(img_size / 2)\n row, col = np.mgrid[-halfsize:halfsize, -halfsize:halfsize] + 0.5\n sigma = fwhm / (2. * np.sqrt(2. * np.log(2.)))\n g = np.exp(-((row - roff)**2 / sigma**2 + (col - coff)**2 / sigma**2) / 2.)\n\n # Zero 6x6 corners\n g = cntr.zero_6x6_corners(g, centered=True)\n\n # Normalize to counts\n i1 = np.int(halfsize + 0.5 - 3)\n i2 = np.int(halfsize + 0.5 + 3)\n g = counts * g / g[i1:i2][i1:i2].sum()\n\n # Simulate star\n star = np.random.normal(g)\n\n # Add background\n if np.shape(bgd) == ():\n bgd = np.ones((img_size, img_size)) * bgd\n\n star = star + bgd\n\n return np.rint(star)", "def add_star_streamer(client_id, streamer_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the streaming user.\n\t\tstreamer_indexed_name = session.query(User.indexed_name)\\\n\t\t\t\t.filter(User.id == streamer_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the streaming user.\n\t\tstarred_streamer = StarredStreamer(user_id=client_id,\n\t\t\t\tstreamer_id=streamer_id,\n\t\t\t\tindexed_name=streamer_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_streamer)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this streaming user.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the streaming user.\n\tsession.execute(Users.update()\n\t\t\t.where(User.id == streamer_id)\n\t\t\t.values({User.num_stars: User.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(StreamedMatch.match_id, Match)\\\n\t\t\t.join(Match, StreamedMatch.match_id == Match.id)\\\n\t\t\t.filter(StreamedMatch.streamer_id == streamer_id)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\n\tsession.commit()", "def __init__(self, stars_x, stars_y, stars_f):\n self.xpos = stars_x\n self.ypos = stars_y\n self.flux = stars_f\n\n return", "def create(self):\n\n for i in range(8):\n # Create white pawns\n self.board[1][i] = Piece(\"pawn\", 1, i, 0)\n # Create black pawns\n self.board[6][i] = Piece(\"pawn\", 6, i, 1)\n\n # Create white rooks\n self.board[0][0] = Piece(\"rook\", 0, 0, 0)\n self.board[0][7] = Piece(\"rook\", 0, 7, 0)\n\n # Create black rooks\n self.board[7][0] = Piece(\"rook\", 7, 0, 1)\n self.board[7][7] = Piece(\"rook\", 7, 7, 1)\n\n # Create white knights\n self.board[0][1] = Piece(\"knight\", 0, 1, 0)\n self.board[0][6] = Piece(\"knight\", 0, 6, 0)\n\n # Create black knights\n self.board[7][1] = Piece(\"knight\", 7, 1, 1)\n self.board[7][6] = Piece(\"knight\", 7, 6, 1)\n\n # Create white bishop\n self.board[0][2] = Piece(\"bishop\", 0, 2, 0)\n self.board[0][5] = Piece(\"bishop\", 0, 5, 0)\n\n # Create black bishop\n self.board[7][2] = Piece(\"bishop\", 7, 2, 1)\n self.board[7][5] = Piece(\"bishop\", 7, 5, 1)\n\n # Create white queen and king\n self.board[0][3] = Piece(\"queen\", 0, 3, 0)\n self.board[0][4] = Piece(\"king\", 0, 4, 0)\n\n # Create black queen and king\n self.board[7][3] = Piece(\"queen\", 7, 3, 1)\n self.board[7][4] = Piece(\"king\", 7, 4, 1)", "def simulated_cluster(n_stars=CLUSTER_DEFAULTS['stars'],\n dimensions=CLUSTER_DEFAULTS['dimensions']):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n image = gaussian_filter(image, sigma=1)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def create_chessboard(size=8):\n r1 = (WHITE + BLACK) * int((size / 2)) + \"\\n\"\n r2 = (BLACK + WHITE) * int((size / 2)) + \"\\n\"\n print((r1 + r2) * int((size / 2)))", "def init_new_board(self) -> None:\r\n\r\n TkState.enable(self.settings_menu.winfo_children())\r\n TkState.enable(self.edit_menu.winfo_children())\r\n TkState.enable([self.play_button, self.step_button])\r\n TkState.disable([self.reset_button])\r\n\r\n self.gen_number.config(text = 0)\r\n self.speed_scale.set(self.INITIAL_TIME_PER_GEN)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n\r\n self.animator.board = self.anim_board\r\n self.painter.board = self.anim_board\r\n self.painter.adjust_to_canvas()", "def add_board(action, user):\n \n userprofile = user.get_profile()\n \n board = Board()\n board.title = action['what']['title']\n board.id = action['what']['id']\n userprofile.boards.append(board)\n userprofile.save()\n \n return board", "def _set_board(self) -> None:\n try:\n self.board.set_pin_mode_sonar(self.trig, self.echo, self._callback)\n except Exception:\n print(f'Problem setting up {self.sonar_name}')\n print(f'Set up {self.sonar_name} successfully')", "def OnBitmapButton2StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 2\r\n\t\tself.SetStars()", "def OnBitmapButton1StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 1\r\n\t\tself.SetStars()", "def makeStandardBoard():\r\n board = Board() # Create an empty chessboard.\r\n # First, set the pawns.\r\n for file in range(8):\r\n board.squares[file, 6].set_piece(Pawn('white'))\r\n board.squares[file, 1].set_piece(Pawn('black'))\r\n # Set the Rooks.\r\n for file in (0, 7):\r\n board.squares[file, 7].set_piece(Rook('white'))\r\n board.squares[file, 0].set_piece(Rook('black'))\r\n # Set the Knights.\r\n for file in (1, 6):\r\n board.squares[file, 7].set_piece(Knight('white'))\r\n board.squares[file, 0].set_piece(Knight('black'))\r\n # Set the Bishops.\r\n for file in (2, 5):\r\n board.squares[file, 7].set_piece(Bishop('white'))\r\n board.squares[file, 0].set_piece(Bishop('black'))\r\n # Set the Queens.\r\n board.squares[3, 7].set_piece(Queen('white'))\r\n board.squares[3, 0].set_piece(Queen('black'))\r\n # Finally, set the Kings.\r\n board.white_king = King('white')\r\n board.squares[4, 7].set_piece(board.white_king)\r\n board.black_king = King('black')\r\n board.squares[4, 0].set_piece(board.black_king)\r\n\r\n board.update_pieces()\r\n\r\n return board", "def create_rating(user, movie, score):\n\n # pass in user object, movie object, score integer\n # To test this function in the interactive mode, \n # create the user and movie objects and then pass \n # in those objects as the arguments\n rating = Rating(user=user, movie=movie, score=score)\n\n db.session.add(rating)\n db.session.commit()\n\n return rating", "def create(self, title):\n return self.app.post('/new-board', data = dict(\n title = title\n ), follow_redirects = True)", "def starbox(width, height):\n print(\"*\" * width) # print top edge of the box\n # print sides of the box\n for _ in range(height - 2):\n print(\"*\" + \" \" * (width - 2) + \"*\")\n print(\"*\" * width) # print bottom edge of the box", "def simbad_brightstars(image_file=\"../nro_maps/12CO_20161002_FOREST-BEARS_spheroidal_xyb_grid7.5_0.099kms.fits\",\n brighter_than='G0', extra_criteria=\"(ra < 84.4 | dec < -6.66)\", otypes=\"Star\",\n replace_ra='hourangle', replace_dec='deg', add_sptype_letter_column=True,\n output=None, output_format='fits'):\n try:\n wcs = WCS(image_file).celestial #Drop non-celestial axes (like velocity and stokes). \n except:\n raise(\"image_file must be a fits image or cube with wcs in header.\")\n\n footprint = wcs.calc_footprint()\n\n \n ### ra_min/max, dec_min/max need to be in degrees.\n ### In the fits headers I have they are, but this may not always be true.\n ###\n ra_min, ra_max = footprint[:,0].min(), footprint[:,0].max()\n dec_min, dec_max = footprint[:,1].min(), footprint[:,1].max()\n\n s = Simbad()\n s.add_votable_fields('sptype')\n\n if extra_criteria:\n stars = s.query_criteria(\"ra > {} & ra < {} & dec > {} & dec < {} & sptypes < {} & {}\".format(\n ra_min, ra_max, dec_min, dec_max, brighter_than, extra_criteria), otypes=\"Star\")\n else:\n stars = s.query_criteria(\"ra > {} & ra < {} & dec > {} & dec < {} & sptypes < {}\".format(\n ra_min, ra_max, dec_min, dec_max, brighter_than), otypes=\"Star\")\n\n stars_coord = coord.SkyCoord(stars['RA'], stars['DEC'], unit=(u.hourangle, u.deg))\n\n if replace_ra:\n stars.replace_column('RA', Column(stars_coord.ra, name='RA', unit=replace_ra))\n if replace_dec:\n stars.replace_column('DEC', Column(stars_coord.dec, name='DEC', unit=replace_dec))\n\n if add_sptype_letter_column:\n stars.add_column(Column([sptype[0] for sptype in stars['SP_TYPE'].astype('str')], name='SP_LETTER', unit='str'))\n\n if output:\n stars.write(output, format=output_format)##\n else:\n return stars", "def star_rating(table, record_id, splitstars=False):\n import uuid\n id = uuid.uuid4()\n row=db(db.plugin_wiki_rating.tablename==table)(db.plugin_wiki_rating.record_id==record_id).select().first()\n rating = row.rating if row else 0\n callback = URL('plugin_wiki', 'star_rate', args = [table,record_id])\n incr = 0.5 if splitstars else 1\n return TAG[''](DIV(_id='star'+str(id),_class='rating'),\n SCRIPT(\"jQuery(document).ready(function(){jQuery('%(uid)s').rating('%(callback)s',{increment:%(incr)s, maxvalue:5, curvalue:%(rating)s});});\" % dict(uid='#star'+str(id), callback=callback,incr=incr, rating=rating)))", "def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})", "def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})", "def run_game():\r\n pygame.init()\r\n ai_settings = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_settings.screen_width, ai_settings.screen_height))\r\n pygame.display.set_caption(\"Stars\")\r\n\r\n # Make a group of stars.\r\n stars = Group()\r\n\r\n # Create a star system\r\n gf.create_star_system(ai_settings, screen, stars)\r\n \r\n # Main game loop.\r\n while True:\r\n \r\n # Let's player quit the game.\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n gf.update_screen(ai_settings, screen, stars)", "def generate_board():\n b = open(_BOARD_FILE, \"r\").readlines()\n for line in b:\n raw = line.strip().split(\" \")\n _board_graph[str_to_pos(raw[0])] = Space(\n (raw[1] == \"R\"),\n TYPE_MAP[raw[1]],\n {str_to_pos(str_pos) for str_pos in raw[2:]})", "def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def simulated_cluster(n_stars=10000, dimensions=(512, 512)):\n\n nx, ny = dimensions\n\n # Create empty image\n image = np.zeros((ny, nx))\n\n # Generate random positions\n r = np.random.random(n_stars) * nx\n theta = np.random.uniform(0., 2. * np.pi, n_stars)\n\n # Generate random fluxes\n fluxes = np.random.random(n_stars) ** 2\n\n # Compute position\n x = nx / 2 + r * np.cos(theta)\n y = ny / 2 + r * np.sin(theta)\n\n # Add stars to image\n # ==> First for loop and if statement <==\n for idx in range(n_stars):\n if x[idx] >= 0 and x[idx] < nx and y[idx] >= 0 and y[idx] < ny:\n image[y[idx], x[idx]] += fluxes[idx]\n\n # Convolve with a gaussian\n kernel = Gaussian2DKernel(stddev=1)\n image = convolve_fft(image, kernel)\n\n # Add noise\n image += np.random.normal(1., 0.001, image.shape)\n\n return image", "def setup(self):\n self.star_list = arcade.SpriteList()\n\n for i in range(50):\n # Create snowflake instance\n singlestar = Singlestar()\n # Add snowflake to snowflake list\n self.star_list.append(singlestar)\n\n # Don't show the mouse pointer\n self.set_mouse_visible(False)\n\n # Set the background color\n arcade.set_background_color(arcade.color.BLACK)", "def _construct_star_mask(self):\n # Produce a separate star mask for EACH image in the stack\n starMasks = self._produce_individual_star_masks()\n\n # Accumulate these pixels into the final star mask\n starMask = starMasks.sum(axis=0)\n\n # Cleanup temporary variables\n del starMasks\n\n # Compute final star mask based on which pixels were masked more than\n # 10% of the time.\n numImg = self.numberOfImages\n starMask = (starMask > np.ceil(0.1*numImg)).astype(float)\n\n # Check that at least one star was detected (more than 15 pixels masked)\n if np.sum(starMask) > 15:\n # Now smooth the star mask with a gaussian to dialate it\n starMask1 = ndimage.gaussian_filter(starMask, (4, 4))\n\n # Grab any pixels (and indices) above 0.05 value post-smoothing\n starMask = (starMask1 > 0.05)\n numInStarPix = np.sum(starMask)\n\n # Notify user how many \"in-star pixels\" were masked\n print('\\n\\nMasked a total of {0} pixels'.format(numInStarPix))\n else:\n print('\\n\\nNo pixels masked as \"in-star\" pixels')\n starMask = False\n\n return starMask", "async def starboard_activate(self, ctx, s_id: int = None):\n if s_id is None:\n await self.starboards_collection.update_one({\"_id\": ctx.guild.id}, {\"$set\": {\"activated\": True}},\n upsert=True)\n else:\n await self.starboards_collection.update_one({\"_id\": ctx.guild.id, \"starboards._id\": s_id},\n {\"$set\": {\"starboards.$.activated\": True}}, upsert=True)\n\n await ctx.send(embed=CustomEmbeds.confirm(author=\"Activated!\"))", "def draw_star(self, row, col):\n start_pixel_x = (row + 1) * CELL_PIXELS - 2\n start_pixel_y = (col + 1) * CELL_PIXELS - 2\n end_pixel_x = (row + 1) * CELL_PIXELS + 2\n end_pixel_y = (col + 1) * CELL_PIXELS + 2\n\n self.create_oval(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y, fill='black')", "def _build_star_cutout_mosaic(starCutouts):\n # Make sure starCutouts can be handled properly\n try:\n starCutouts = np.array(starCutouts)\n except:\n raise TypeError('`starCutouts` must be an array-like object')\n\n if starCutouts.ndim != 3:\n raise ValueError('`starCutouts` must be a (numbor of stars X cutout size x cutout size) array')\n\n\n # Get the number and shape of the remaining star cutouts\n numberOfStars, ny, nx = starCutouts.shape\n\n # Cull the list to the brightest square number of stars\n if numberOfStars >= 25:\n keepStarCount = 25\n elif numberOfStars >= 16:\n keepStarCount = 16\n elif numberOfStars >= 9:\n keepStarCount = 9\n elif numberOfStars >= 4:\n keepStarCount = 4\n else:\n raise RuntimeError('Fewer than 9 stars found: cannot build star cutout mosaic')\n\n # Chop out the sections around each star, and build a mosaic of cutouts\n numZoneSide = np.int(np.round(np.sqrt(keepStarCount)))\n cutoutMosaic = np.zeros((numZoneSide*ny, numZoneSide*nx))\n\n # Loop through each star to be placed in the mosaic\n for iStar, starCutout in enumerate(starCutouts[0:keepStarCount]):\n # Compute the zone for this star\n yZone, xZone = np.unravel_index(iStar, (numZoneSide, numZoneSide))\n\n # Establish the pasting boundaries\n btPaste = np.int(np.round(ny*yZone))\n tpPaste = np.int(np.round(ny*(yZone + 1)))\n lfPaste = np.int(np.round(nx*xZone))\n rtPaste = np.int(np.round(nx*(xZone + 1)))\n\n # Paste the cutout into the star mosaic\n cutoutMosaic[btPaste:tpPaste, lfPaste:rtPaste] = starCutout\n\n return cutoutMosaic", "def new_star_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def addStar(image, center, flux, fwhm):\n sigma = fwhm/FwhmPerSigma\n func = afwMath.GaussianFunction2D(sigma, sigma, 0)\n starImage = afwImage.ImageF(image.getBBox(afwImage.PARENT))\n # The flux in the region of the image will not be exactly the desired flux because the Gaussian\n # does not extend to infinity, so keep track of the actual flux and correct for it\n actFlux = 0\n # No function exists that has a fractional x and y offset, so set the image the slow way\n for i in range(image.getWidth()):\n x = center[0] - i\n for j in range(image.getHeight()):\n y = center[1] - j\n pixVal = flux * func(x, y)\n actFlux += pixVal\n starImage[i, j] += pixVal\n starImage *= flux / actFlux\n \n image += starImage", "def create_mini_tickers_channel(self) -> str:", "def OnBitmapButton3StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 3\r\n\t\tself.SetStars()", "def set_board(board):", "def new_game(self, width, height, minecount):\n self.start_screen.grid_forget()\n width, height, minecount = int(width), int(height), int(minecount)\n # always at least one non-mine square\n minecount = min(width*height-1, minecount)\n\n self.mineboard = Mineboard(\n width, height, minecount)\n self.change_display(width, height)\n self.scoreboard.grid(row=1, column=0, sticky='n')\n self.win_lose_lbl.grid_remove()\n self.start_time = time.time() # initial time for the game\n self.timer_update() # initial time display", "def star():\n print('*', end='')", "async def starboard_log(self, ctx, channel: ChannelSetting):\n if channel is None:\n await queries.update_setting(ctx, \"starboard_settings\", \"log_channel_id\", None)\n await util.send_success(ctx, \"Starboard log is now disabled\")\n else:\n await queries.update_setting(ctx, \"starboard_settings\", \"log_channel_id\", channel.id)\n await util.send_success(ctx, f\"Starboard log channel is now {channel.mention}\")\n await self.bot.cache.cache_starboard_settings()", "def GrayCodePattern_create(width, height):\n pass", "def draw_map(stars):\n say(\" STAR MAP\")\n say(\" ************\")\n for y in range(15, -16, -1):\n line = list(\" | \")\n if y == 0:\n line = list(\n \"+----+----+----+----+----*SOL-+----+----+----+----+ \")\n elif y % 3 == 0:\n line[25] = \"+\"\n y_hi = y * 10 / 3\n y_lo = (y + 1) * 10 / 3\n for star_index in range(1, len(stars)):\n if y_lo > stars[star_index].y >= y_hi:\n x = round(25 + stars[star_index].x / 2)\n name = stars[star_index].name\n line[x:x + len(name) + 1] = \"*\" + name\n break\n\n say(\"%s\" % \"\".join(line))\n say(\"\\nTHE MAP IS 100 LIGHT-YEARS BY 100 LIGHT-YEARS,\")\n say(\"SO THE CROSS-LINES MARK 10 LIGHT-YEAR DISTANCES\")", "def setup():\n wcs = galsim.TanWCS(\n galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024)),\n #galsim.AffineTransform(0.26, 0., 0., 0.26, galsim.PositionD(1024,1024)),\n galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)\n )\n\n # Make the image (copied from test_single_image in test_simple.py)\n image = galsim.Image(2048, 2048, wcs=wcs)\n\n # Where to put the stars.\n x_list = [ 123.12, 345.98, 567.25, 1094.94, 924.15, 1532.74, 1743.11, 888.39, 1033.29, 1409.31 ]\n y_list = [ 345.43, 567.45, 1094.32, 924.29, 1532.92, 1743.83, 888.83, 1033.19, 1409.20, 123.11 ]\n\n # Draw a Gaussian PSF at each location on the image.\n sigma = 1.3\n g1 = 0.23\n g2 = -0.17\n du = 0.09 # in arcsec\n dv = -0.07\n flux = 123.45\n psf = galsim.Gaussian(sigma=sigma).shear(g1=g1, g2=g2).shift(du,dv) * flux\n for x, y in zip(x_list, y_list):\n bounds = galsim.BoundsI(int(x-31), int(x+32), int(y-31), int(y+32))\n offset = galsim.PositionD(x-int(x)-0.5, y-int(y)-0.5)\n psf.drawImage(image=image[bounds], method='no_pixel', offset=offset)\n image.addNoise(galsim.GaussianNoise(rng=galsim.BaseDeviate(1234), sigma=1e-6))\n\n # Write out the image to a file\n image_file = os.path.join('output','test_stats_image.fits')\n image.write(image_file)\n\n # Write out the catalog to a file\n dtype = [ ('x','f8'), ('y','f8') ]\n data = np.empty(len(x_list), dtype=dtype)\n data['x'] = x_list\n data['y'] = y_list\n cat_file = os.path.join('output','test_stats_cat.fits')\n fitsio.write(cat_file, data, clobber=True)", "def create_board():\n form = BoardForm(request.form)\n if current_user.is_admin and request.method == 'POST' and form.validate():\n if Board.query.filter(\n func.lower(Board.name) == func.lower(form.name.data)\n ).scalar() is None:\n DB.session.add(Board(form.name.data, form.desc.data))\n DB.session.commit()\n flash('Board ({}) successfully created!'.format(form.name.data))\n else:\n flash('Duplicate board detected.')\n return render_template(\n 'index.html',\n boards=Board.query.filter_by(deleted=False),\n form=form)", "def OnBitmapButton4StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 4\r\n\t\tself.SetStars()", "def _bg_update(self):\n self.screen.fill(colour.BLACK)\n for star in self._stars:\n if star[2] + star[1] > self.s_height:\n star[1] = 0\n else:\n star[1] += star[2]\n self.screen.set_at((star[0], star[1]), colour.WHITE)", "def __init__(self, density=0.25, width=10, height=10, side= 20, autoreveal=False):\n\t\ttkinter.Frame.__init__(self, None)\n\t\tself.master.title(\"Minesweeper\")\n\t\tself.grid()\n\t\tself.width, self.height, self.side = width, height, side\n\t\tself.density = density\n\t\tself.auto = autoreveal\n\t\tself.game = None\n\t\tself.bind_all(\"q\", lambda a: self.quit())\n\t\t# create button\n\t\ttkinter.Button(self, text=\"NEW\", relief=\"groove\", command=self.new_game).grid(row=0, column=0)\n\t\ttkinter.Button(self, text=\"HINT\", relief=\"groove\", command=self.hint).grid(row=0, column=2)\n\t\tself.label = tkinter.Label(self, text=\"\")\n\t\tself.label.grid(row=0, column=1)\n\t\t# create mine field\n\t\tself.canvas = tkinter.Canvas(self, width=width*side, height=height*side, bg=\"white\")\n\t\tself.canvas.grid(row=1, column=0, columnspan=3)\n\t\tself.canvas.bind(\"<Button>\", self.reveal_cell)\n\t\tself.new_game()", "async def blink(self, star: MapObject) -> NoReturn:\n x, y = star.current_coordinates()\n symbol = star.frame.content\n while True:\n self._canvas.addstr(y, x, symbol, curses.A_DIM)\n await sleep(random.randint(1, 10))\n\n self._canvas.addstr(y, x, symbol)\n await sleep(random.randint(1, 10))\n\n self._canvas.addstr(y, x, symbol, curses.A_BOLD)\n await sleep(random.randint(1, 10))\n\n self._canvas.addstr(y, x, symbol)\n await sleep(random.randint(1, 10))", "def draw_star(x=0,y=0,radius=10):\n cx = x\n cy = y+radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()\n cy = y-radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()", "def mark_star(self, star_id):\n\n ra, dec = self.db.get_star(star_id)[2:4]\n kwargs = dict(layer = self.MARKERS_LAYER,\n edgecolor = '#24ff29',\n s = self.MARK_RADIUS)\n self.aplpy_plot.show_markers(ra, dec, **kwargs)\n self.navig.home()\n\n self.selected_star_id = star_id\n self.goto_button.set_sensitive(True)", "def __init__(self, mine_count=BOARD_DIM[\"MINE_COUNT\"], width=BOARD_DIM[\"BOARD_WIDTH\"],\n height=BOARD_DIM[\"BOARD_HEIGHT\"]):\n if height is None:\n height = width\n if mine_count > height * width:\n raise TooManyMineException\n self.height = height\n self.width = width\n self.mine_count = mine_count\n self.chessboard = [[Point(x, y) for x in range(width)] for y in range(height)]\n self.mines = [-1 for z in range(mine_count)]\n self.initialise()", "def read_stars(self):\n if self.hip_stars: return\n all_stars = list(hipparcos.stars())\n self.hip_stars = [None]*(max(s[0] for s in all_stars)+1)\n for s in all_stars: self.hip_stars[s[0]] = s", "def add_star_match(client_id, match_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the time of the match.\n\t\tmatch = session.query(Match)\\\n\t\t\t\t.filter(Match.id == match_id)\\\n\t\t\t\t.one()\n\t\t# Add the client's star for the match.\n\t\tstarred_match = StarredMatch(user_id=client_id,\n\t\t\t\tmatch_id=match_id,\n\t\t\t\ttime=match.time,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_match)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this match.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the match.\n\tmatch.num_stars += 1\n\n\t# If needed, add a CalendarEntry for the streamed match.\n\tif match.is_streamed:\n\t\t_increment_num_user_stars(client_id, match, now)\n\n\tsession.commit()", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def isMWSSTAR_colors(gflux=None, rflux=None, zflux=None,\n w1flux=None, w2flux=None, primary=None, south=True):\n # ----- Old stars, g-r > 0\n if primary is None:\n primary = np.ones_like(gflux, dtype='?')\n mwsstar = primary.copy()\n\n # - colors g-r > 0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n # Assume no difference in north vs south color-cuts.\n if south:\n mwsstar &= (grcolor > 0.0)\n else:\n mwsstar &= (grcolor > 0.0)\n\n return mwsstar" ]
[ "0.74559045", "0.6628303", "0.64687115", "0.6309718", "0.6219598", "0.59351027", "0.59275556", "0.586937", "0.5743689", "0.5702885", "0.56899554", "0.56740135", "0.558157", "0.5522696", "0.53798324", "0.53583425", "0.52350324", "0.5224017", "0.52115166", "0.52085984", "0.5154138", "0.51465183", "0.5129245", "0.51274365", "0.51249164", "0.51196283", "0.5101491", "0.5095697", "0.50562227", "0.5039162", "0.49732807", "0.49606687", "0.49465594", "0.4879135", "0.48653153", "0.48240057", "0.48105615", "0.47507977", "0.47449374", "0.47366786", "0.47161636", "0.47041565", "0.4694911", "0.46923035", "0.46880695", "0.46860793", "0.4676981", "0.4676498", "0.46715325", "0.46605912", "0.46498713", "0.46457484", "0.46415347", "0.46386448", "0.46309063", "0.4621826", "0.46114188", "0.46109596", "0.45982698", "0.4584027", "0.45811576", "0.4575519", "0.45739472", "0.45708075", "0.45708075", "0.4564643", "0.4560699", "0.45580566", "0.45393038", "0.4528046", "0.45111278", "0.45077395", "0.4495128", "0.4490774", "0.4478573", "0.447592", "0.44676727", "0.44476008", "0.4444353", "0.4434001", "0.44331884", "0.44276366", "0.4427065", "0.44175738", "0.44165513", "0.44124988", "0.43975702", "0.43944475", "0.43928257", "0.43885478", "0.43863213", "0.43862844", "0.4373429", "0.43654287", "0.43538785", "0.43525994", "0.4350697", "0.43475643", "0.4329702", "0.43157864" ]
0.7119598
1
Enables a disabled starboard.
async def enable(self, ctx): self.bot.db.execute("UPDATE starboards SET enabled = 1 WHERE channel_id = ?", (ctx.channel.id,)) await ctx.say("star.enabled")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable(self):\r\n self.update(enabled=True)", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "async def starboard_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"starboard_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Starboard is now **enabled**\")\n else:\n await util.send_success(ctx, \"Starboard is now **disabled**\")\n await self.bot.cache.cache_starboard_settings()", "def setEnabled(self, enabled):\n def do(toUpdateList):\n self.enabled = enabled\n self.actions.addAction(do)", "def enable(self):\n self._enabled = True", "def setEnabled(self, enable: bool) -> None:\n self.enabled = ...", "def enable(self):\n self.switch.enable()\n self._enabled = True", "def set_enabled(self, enabled):\n self.widget.setEnabled(enabled)", "def set_enabled(self, enabled):\n self.widget.setEnabled(enabled)", "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "def enable_button(self, index):\n if index != 0:\n self.roll_dem_bones.setEnabled(True)", "def _led_enable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.LOW)", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def enable():\n ret = _LIB.led_matrix_click_enable()\n if ret < 0:\n raise Exception(\"led matrix click enable failed\")", "def setupenabled(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.vspins[self.numcols - 1].setReadOnly(False)\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.pcspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(False)\n self.vspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.pcspins[self.numcols - 1].setReadOnly(False)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setEnabled(True)\n self.nsspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(False)\n self.vspins[n].setEnabled(False)\n self.nsspins[self.numcols - 1].setReadOnly(False)\n else:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(True)\n self.nsspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(True)\n self.nsspins[n].setReadOnly(True)\n self.vspins[self.numcols - 1].setReadOnly(False)\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.pcspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(False)\n self.vspins[n].setEnabled(True)\n self.nsspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(True)\n self.nsspins[n].setReadOnly(True)\n self.pcspins[self.numcols - 1].setReadOnly(False)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setEnabled(True)\n self.nsspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(True)\n self.vspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(True)\n self.vspins[n].setReadOnly(True)\n self.nsspins[self.numcols - 1].setReadOnly(False)\n\n for n in range(self.numcols, len(self.vspins)):\n self.vspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.pcspins[n].setEnabled(False)", "def enable(self) -> None:", "async def star_dm(self, ctx, enable: bool = None):\n if enable is None:\n result = self.bot.db.execute(\"SELECT starboard_dm FROM users WHERE user_id = ?\", (ctx.author.id,)).fetchone()\n enabled = result[\"starboard_dm\"] if result else 0\n status_str = ctx._(f\"star.dm{['Disabled', 'Enabled'][enabled]}\")\n return await ctx.say(\"star.dmCurrent\", status_str)\n self.bot.db.execute(\"UPDATE users SET starboard_dm = ? WHERE user_id = ?\",(\n int(enable),\n ctx.author.id\n ))\n status_str = ctx._(f\"star.dm{['Disabled', 'Enabled'][enable]}\")\n return await ctx.say(\"star.dmCurrent\", status_str)", "def enable(self):", "def set_enabled(self, enabled=True):\n self._enabled = enabled", "def enable_moves(self):\r\n if self.board is not None:\r\n self.board.enable_moves()", "def enable(self):\n self.colour_combo.config(state=tk.NORMAL)\n self.game_name_entry.config(state=tk.NORMAL)\n self.num_tickets_entry.config(state=tk.NORMAL)", "def _disable(self):\n self.enabled = False", "def enable(self):\n pass", "def disable(self):\r\n self.update(enabled=False)", "def set_disabled(self, disabled):\n if disabled:\n self.__button_new_game.configure(state=DISABLED, text=\"Playing...\")\n else:\n self.__button_new_game.configure(state=ACTIVE, text=\"New Game\")", "def reenable(*args):\n self.controls.disabled = False\n self.disabled = False", "def disabled(self, disabled):\n self._disabled = disabled", "def setEnabled(self, enable):\n self.advancedWidget1D.setEnabled(enable)\n self.advancedWidget2D.setEnabled(enable)\n self.radialRange1D.setEnabled(enable)\n self.radialRange2D.setEnabled(enable)\n self.azimuthalRange2D.setEnabled(enable)\n self.ui.integrate1D.setEnabled(enable)\n self.ui.integrate2D.setEnabled(enable)\n self.ui.advanced1D.setEnabled(enable)\n self.ui.advanced2D.setEnabled(enable)", "def enabled(self, enabled: bool):\n\n self._enabled = enabled", "def enabled(self, enabled: bool):\n\n self._enabled = enabled", "def enable_i2s(self, enable):\n control = self.get_control()\n if enable:\n control = control | CONTROL_ENABLE\n else:\n control = control & (~CONTROL_ENABLE)\n\n self.set_control(control)", "def enabled(self, enabled):\n \n self._enabled = enabled", "def disable(self):", "def disabled(self, disabled):\n\n self._disabled = disabled", "def enabled(self, enabled):\n\n self._enabled = enabled", "def enabled(self, enabled):\n\n self._enabled = enabled", "def _enable_disable_gui(self, state):\r\n self.mainWidget.standbyPushButton.setDisabled(state)\r\n self.mainWidget.eventComboBox.setDisabled(state)\r\n self.mainWidget.roomComboBox.setDisabled(state)\r\n self.mainWidget.dateComboBox.setDisabled(state)\r\n self.mainWidget.talkComboBox.setDisabled(state)\r\n self.mainWidget.audioFeedbackCheckbox.setDisabled(state)", "def enable(self):\n for val in data:\n val.enable()\n self.enabled = True", "def enable(self, *args, **kwargs):\n pass", "def enable(self, modname):\n try: self.cfg.blacklist and self.cfg.blacklist.remove(modname)\n except ValueError: pass \n if self.cfg.loadlist and modname not in self.cfg.loadlist: self.cfg.loadlist.append(modname)\n self.cfg.save()", "def setEnabled(self, boo):\n if boo:\n self.mousePressEvent = self.mousePressEventEnabled\n self.mouseMoveEvent = self.mouseMoveEventEnabled\n self.mouseReleaseEvent = self.mouseReleaseEventEnabled\n else:\n self.mousePressEvent = self.notEnabledDummy\n self.mouseMoveEvent = self.notEnabledDummy\n self.mouseReleaseEvent = self.notEnabledDummy", "def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True", "def enable(self):\n self.SetInteractive(1)", "def set_step_enabled(self, enabled):\r\n self.pushButton.setEnabled(enabled)", "def enable(self):\n raise NotImplementedError", "def disable(self):\n self.enabled = False", "def setDisabledColor(*args):", "def setDisabledColor(*args):", "def enable(self, message):\n self.Enable()", "def disable(self) -> None:", "def enable(self, state=True):\n self.reset_pin = state", "async def set_enabled(self, enabled: bool) -> None:\n return await self.api.set_enabled(enabled)", "def set_entire_display_on(enable):\n if enable:\n send_command(0xA5)\n else:\n send_command(0xA4)", "def disable(self): \n self.feed_button.config(state=\"disabled\")\n self.eat_button.config(state=\"disabled\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"disabled\") \n self.add_population_button.config(state=\"disabled\")\n self.add_body_size_button.config(state=\"disabled\")", "def enable_btns(self):\n self.saveBtn.setEnabled(True)\n self.openVideoBtn.setEnabled(True)\n self.openAnnotationBtn.setEnabled(True)\n self.resetBtn.setEnabled(True)\n self.speedCombo.setEnabled(True)\n self.newFileBtn.setEnabled(True)\n self.HelpBtn.setEnabled(True)", "def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlShare_SetEnabled', self.handle, bEnabled)", "def OnBitmapButton4StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 4\r\n\t\tself.SetStars()", "def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()", "def set_disabled(self, val):\n self._disabled = val", "def set_disabled(self, val):\n self._disabled = val", "def enable_noise_eater_mode(self, noise_eater_on):\n if noise_eater_on and self.locked():\n self.write('FL_IntReg=1') #Noise eating on\n elif noise_eater_on and not self.locked():\n raise ac_excepts.EnableError(\"Can't turn on noise eater mode if\\\n not already locked.\",\n self.enable_noise_eater_mode)\n elif not noise_eater_on:\n self.write('FL_IntReg=0') #Noise eating off\n self.enable_lock() #Not sure if need to relock now", "def SetDisabledBitmap(self, bmp):\r\n \r\n self.disabled_bitmap = bmp", "def enable(self): \n self.feed_button.config(state=\"normal\")\n self.eat_button.config(state=\"normal\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"normal\") \n self.add_population_button.config(state=\"normal\")\n self.add_body_size_button.config(state=\"normal\")", "def disable_moves(self):\r\n self.board.disable_moves()", "def Enabled(self) -> bool:", "def enable_shortcut_key(self, enable=True):\r\n self.enable_shortcut = enable", "def enable(self, enable):\n\n self._enable = enable", "def changeEnabled(self, val):\n logging.debug(\"Changing enabled to \" + str(val))\n self.filesList.setEnabled(val)\n self.tabArea.setEnabled(val)\n self.actionRemove.setEnabled(val)\n self.actionSave.setEnabled(val)", "def turnButtonsOn(self, enabled):\n if enabled:\n #connecting iface signals\n self.iface.currentLayerChanged.connect(self.acquire)\n for button in self.buttons:\n #disconnecting the clicked signal\n button.clicked.disconnect(self.reclassify)\n #changing button behavior\n button.setCheckable(True)\n else:\n #disconnecting iface signals\n self.disconnectLayerSignals()\n try:self.iface.currentLayerChanged.disconnect(self.acquire)\n except:pass\n for button in self.buttons:\n #connecting the clicked signal\n button.clicked.connect(self.reclassify)\n #changing button behavior\n button.setCheckable(False)", "def setEnabled( self, cCtrlName, bEnabled=True ):\n self.setControlModelProperty( cCtrlName, \"Enabled\", bEnabled )", "def disableEditing(self, disable):\n self.disabled = disable", "def set_enabled(self, newval):\n rest_val = \"1\" if newval > 0 else \"0\"\n return self._setAttr(\"enabled\", rest_val)", "def enable(self, delay=False) -> None:\n self.enabled = True\n if self.delayed == False:\n self.delayed = delay", "async def enable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(True)", "def disable(self):\n self._enabled = False", "async def toggle(self, ctx: BBContext):\n\n self.code_enabled = not self.code_enabled\n e = 'enabled.' if self.code_enabled else 'disabled.'\n await ctx.send(f\"Bunker code auto reaction has been : **{e}**\")\n self.bot.logger.info('Bunker code listener %s by %s', e, str(ctx.author))", "def _set_action_enabled(self, action, index):\n action.setEnabled(index.flags() & QtCore.Qt.ItemIsEnabled)", "def disable(self):\n self.colour_combo.config(state=tk.DISABLED)\n self.game_name_entry.config(state=tk.DISABLED)\n self.num_tickets_entry.config(state=tk.DISABLED)", "def shell_enabled_changed(self, enabled):\n self.set_enabled(enabled)", "def setAutomaticMode(self, enabling: bool) -> None:\n ...", "def enable(self, sid):\n return", "def setEnabled(self, *args):\n return _libsbml.SBMLExtension_setEnabled(self, *args)", "def enable(self, index, value=True, missingok=False):\n self._action(index, StateVariable.enable, missingok=missingok,\n value=value)", "def pause(self):\n self.entry['state']=DGG.DISABLED\n self.ignoreAll()", "async def starboard(self, ctx):\n if self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\",(ctx.guild.id,)).fetchone():\n return await ctx.say(\"star.already\")\n async with ctx.typing():\n await ctx.channel.edit(\n topic=TOPIC.format(mention=self.bot.user.mention, threshold=5, age=7), # yeah can't be localized\n nsfw=False,\n reason=\"Starboard preparation\"\n )\n await ctx.channel.set_permissions(ctx.guild.me,\n read_messages=True,\n send_messages=True,\n add_reactions=True,\n manage_messages=True,\n embed_links=True,\n attach_files=True,\n read_message_history=True,\n manage_roles=True,\n manage_channels=True\n )\n await ctx.channel.set_permissions(ctx.guild.default_role,\n read_messages=True,\n send_messages=False,\n add_reactions=True,\n read_message_history=True\n )\n tutorial = await ctx.say(\"star.done\", STAR_EMOJI)\n try:\n await tutorial.pin()\n except discord.HTTPException:\n pass\n self.bot.db.execute(\"INSERT INTO starboards(guild_id, channel_id,threshold,age,enabled) VALUES (?, ?,5,7,1)\", (ctx.guild.id, ctx.channel.id))\n starboard_id = self.bot.db.execute(\"SELECT starboard_id FROM starboards WHERE channel_id = ?\", (ctx.channel.id,)).fetchone()[\"starboard_id\"]\n self.bot.db.execute(\"UPDATE guilds SET starboard_id = ? WHERE guild_id = ?\", (starboard_id, ctx.guild.id))", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def OnBitmapButton1StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 1\r\n\t\tself.SetStars()", "def disable(self):\n pass", "def disable():\n ret = _LIB.led_matrix_click_disable()\n if ret < 0:\n raise Exception(\"led matrix click disable failed\")" ]
[ "0.69257647", "0.673253", "0.673253", "0.6605978", "0.65678227", "0.6538243", "0.64849865", "0.6334245", "0.6330517", "0.6330517", "0.63191867", "0.62614286", "0.6255469", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6149117", "0.6147287", "0.613502", "0.6131316", "0.6121034", "0.6112404", "0.60983145", "0.60871124", "0.60732603", "0.60688007", "0.60666364", "0.60392183", "0.60379344", "0.6030841", "0.6020148", "0.59251314", "0.5918309", "0.5918309", "0.59059405", "0.5905091", "0.5904437", "0.5891993", "0.58498573", "0.58498573", "0.5848912", "0.58315116", "0.5828287", "0.5818515", "0.581728", "0.58083326", "0.58067036", "0.58039135", "0.57999486", "0.5793701", "0.57934886", "0.57934886", "0.57918966", "0.57916534", "0.57899666", "0.57899576", "0.5784048", "0.57652223", "0.5763404", "0.57614106", "0.5745788", "0.57438725", "0.5732834", "0.5732834", "0.5722741", "0.57226276", "0.57215667", "0.5720948", "0.57091254", "0.5648091", "0.5638388", "0.56359035", "0.5628128", "0.56254125", "0.5620489", "0.5618601", "0.56137323", "0.5607326", "0.56047386", "0.55873525", "0.5579305", "0.55749214", "0.5574205", "0.557108", "0.55636555", "0.5545358", "0.5540139", "0.5539789", "0.5539601", "0.5538757", "0.5524938", "0.55231345", "0.5519705" ]
0.7405919
0
Sets "max age" for the starboard messages. If a message is older than the specified days, the message is ignored. Note that existing messages are not affected. Defaults to 7 (one week).
async def maxage(self, ctx, age: int): if age > 0: self.bot.db.execute("UPDATE starboards SET age = ? WHERE channel_id = ?", (age,ctx.channel.id)) await ctx.say("star.age", age) await self.set_topic(ctx.channel.id) else: await ctx.say("star.unsigned", age)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_age(self, max_age):\n self._max_age = max_age", "def max_age(self, max_age):\n\n self._max_age = max_age", "def max_age(self, max_age):\n if (max_age is not None and max_age < -1): # noqa: E501\n raise ValueError(\"Invalid value for `max_age`, must be a value greater than or equal to `-1`\") # noqa: E501\n\n self._max_age = max_age", "def set_maxdays(name, maxdays):\n pre_info = info(name)\n if maxdays == pre_info[\"max\"]:\n return True\n cmd = \"passwd -x {} {}\".format(maxdays, name)\n __salt__[\"cmd.run\"](cmd, python_shell=False)\n post_info = info(name)\n if post_info[\"max\"] != pre_info[\"max\"]:\n return post_info[\"max\"] == maxdays", "def max_age_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_age_in_days\")", "def max_age(self):\n return self._max_age", "def max_age(self):\n return self._max_age", "def max_age(self):\n return 120 if self.realtime else 1800", "def max_jobs_age(self):\n return int(self.__get_option('max_jobs_age'))", "def max_age(self):\n\n return self._max_age", "def max_retention_days(self) -> int:\n return pulumi.get(self, \"max_retention_days\")", "def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value", "def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value", "def max_age(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_age\")", "def message_retention_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"message_retention_in_days\")", "def max_age_rule(self) -> Optional[pulumi.Input['ApplicationMaxAgeRuleArgs']]:\n return pulumi.get(self, \"max_age_rule\")", "def show_max_age_label(self):\n self.draw_max_age = True", "def _send_maximum(self):\n content = {'maximum': self.maximum.isoformat()}\n self.send_action('set_maximum', content)", "def max_age(self) -> typing.Optional[jsii.Number]:\n return self._values.get('max_age')", "def _max_days(self):\n # type: (...) -> Union[int, Tuple[int]]\n\n return self.value.max_days", "def max_frame_age(self) -> float:\n return self._max_frame_age", "def userMaximum(self, new_max: float) -> None:\n self._user_maximum = new_max\n self.reset_limits()", "def set_max(self, max):\n self.set_val((self.val[0], max))", "def _putMailInPast(self, mail, days):\n doc = mail.getEditableContent()\n fid = self.archiver.date_field_id\n doc.edit({fid: doc.getDataModel()[fid] - days}, mail)", "def max_players(self, max_players):\n\n self._max_players = max_players", "def org_eclipse_jetty_servlet_max_age(self, org_eclipse_jetty_servlet_max_age: ConfigNodePropertyInteger):\n\n self._org_eclipse_jetty_servlet_max_age = org_eclipse_jetty_servlet_max_age", "def set_max_edges(self, edges):\n self.max_edges = edges", "async def max(self, ctx, limit: int):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"max\"] = limit\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Warn limit is now: \\n{}\".format(limit))", "def adjust_age(self):\n try:\n from tools import valid_units\n except ImportError as e:\n print(\"Necessary import failed: {}\".format(e))\n if not valid_units(self.age_units):\n print(\"Given unit is not supported: {}\".format(self.age_units))\n raise ValueError()\n if self.age_units == \"day\":\n if self.age < 7:\n return\n elif self.age < 30:\n self.age = self.age//7\n self.age_units = \"week\"\n elif self.age < 365:\n self.age = self.age//30\n self.age_units = \"month\"\n else:\n self.age = self.age//365\n self.age_units = \"year\"\n elif self.age_units == \"week\":\n if self.age < 4:\n return\n elif self.age < 52:\n self.age = self.age//4\n self.age_units = \"month\"\n else:\n self.age = self.age//52\n self.age_units = \"year\"\n elif self.age_units == \"month\":\n if self.age < 12:\n return\n else:\n self.age = self.age//12\n self.age_units = \"year\"", "def maximal_completion_delay_in_days(self, maximal_completion_delay_in_days):\n\n self._maximal_completion_delay_in_days = maximal_completion_delay_in_days", "def set_max_sentence_length(self):\n new_max = int(self.set_max_sentence.get())\n cur_min = self.min_sentence_length\n\n if new_max > cur_min:\n self.max_sentence_length = new_max\n else:\n old_max = self.max_sentence_length\n old_max_var = tk.StringVar(self.master)\n old_max_var.set(str(old_max))\n self.set_max_sentence.config(textvariable=old_max_var)", "def set_max(calories, max_calories):\n return calories if calories > max_calories else max_calories", "def maximum_event_age_in_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"maximum_event_age_in_seconds\")", "def max_time(self, max_time: str):\n\n self._max_time = max_time", "def max_retries(self, max_retries: ConfigNodePropertyInteger):\n\n self._max_retries = max_retries", "def max_visits(self, max_visits):\n\n self._max_visits = max_visits", "def _set_maximum(self):\n self._level_gen.maximum_length = self._maximum_length_spinbox.value()\n self._refresh_view()", "def set_age(self, age):\n self.__age = age", "def increase_age(self,s):\n self.age += 1\n self.days = 0\n if self.age >= 35 and self.sexual_activity == 1:\n self.sexual_activity = 0\n s.high_sexual_activity.remove(self.identifier)\n \n #exclude age == 65; they will be replaced next timestep\n if self.age % 10 == 5 and self.age < 65:\n age_group = int(np.floor((self.age+5)/10))-2\n s.age_group[age_group-1].remove(self.identifier)\n s.age_group[age_group].add(self.identifier)", "def set_age(self, newage):\n self.age = newage", "def max(self, max):\n\n self._max = max", "def max(self, max):\n\n self._max = max", "def set_max(self, val):\n self._max = val", "def age(self, age):\n\n self._age = age", "def age(self, age):\n\n self._age = age", "def set_max_seconds(self, max_wallclock_seconds):\n max_seconds_factor = self.defaults.delta_factor_time_limit\n max_seconds = max_wallclock_seconds * max_seconds_factor\n self.ctx.inputs.parameters['INPUT_XSPECTRA']['time_limit'] = max_seconds", "def cache_max_age(hours):\n seconds = hours * 60 * 60\n return 'max-age=' + str(seconds)", "def max_walltime(self, max_walltime):\n\n self._max_walltime = max_walltime", "def set_max_participants(self, max_part):\n self.max_participants = max_part", "def set_age(self, age):\n self.age = float(age)", "def setmaxsize(self, maxsize):\n self.maxsize = maxsize", "def maximum_instances(self, maximum_instances):\n if (self.local_vars_configuration.client_side_validation and\n maximum_instances is not None and maximum_instances > 2147483647): # noqa: E501\n raise ValueError(\"Invalid value for `maximum_instances`, must be a value less than or equal to `2147483647`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n maximum_instances is not None and maximum_instances < -2147483648): # noqa: E501\n raise ValueError(\"Invalid value for `maximum_instances`, must be a value greater than or equal to `-2147483648`\") # noqa: E501\n\n self._maximum_instances = maximum_instances", "def max_value(self, max_value):\n\n self._max_value = max_value", "def max_value(self, max_value):\n\n self._max_value = max_value", "def max_value(self, max_value):\n\n self._max_value = max_value", "def SetMaxLevels(self, max_levels):\n return _hypre.HypreBoomerAMG_SetMaxLevels(self, max_levels)", "def set_is_max(self, is_max):\n self.__is_max = is_max", "def setAge(self, age):\n self._age = age", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def set_f_max(cls, params):\n\n for key in params:\n if key != 'f_max':\n raise KeyError('Invalid parameter name: ' + key)\n cls.default_f_max = params", "def max_unavailable(self, max_unavailable):\n\n self._max_unavailable = max_unavailable", "def set_maxVal(self, val):\n self.maxVal = val", "def reset_logs(self, *, age_days: int = None) -> int:\n\n raise NotImplementedError(\n \"Reset for access logs is not supported on this backend\"\n )", "def _get_age(self):\n for employee in self:\n if employee.sudo().birthday:\n employee.age = relativedelta(\n fields.Date.from_string(fields.Date.today()),\n fields.Date.from_string(employee.sudo().birthday)).years\n else:\n employee.age = 0", "def set_maxSize(self, maxSize):\n if self.__log:\n self.__logger.info(f\"Setting max size to {maxSize}\")\n self.__maxSize = maxSize # Set max size\n self.__handle_cache_size() # Adapt to new changes", "def media_images_count_max(self, media_images_count_max):\n\n self._media_images_count_max = media_images_count_max", "def setMaxEpochs(self, epochs):\n return self._set(maxEpochs=epochs)", "def setMaxEpochs(self, epochs):\n return self._set(maxEpochs=epochs)", "def setMaxEpochs(self, epochs):\n return self._set(maxEpochs=epochs)", "def setMaxEpochs(self, epochs):\n return self._set(maxEpochs=epochs)", "def max_days(username):\n path = users_folder_file_path + username\n with open(path + '/preferences.txt', 'r+') as json_file:\n data = json.load(json_file)\n\n data['training_level_increase'] = \\\n int(request.form['training_level_increase'])\n\n json_file.seek(0) # rewind\n json.dump(data, json_file)\n json_file.truncate()\n\n if data['runner_type'] == 0:\n return render_template('max_days.html', username=username)\n elif data['runner_type'] == 1:\n return render_template('max_days_int.html', username=username)", "def setMaxNbEvts(self, nbevts):\n self._checkArgs( { 'nbevts' : types.IntType } )\n self.maxNumberOfEvents = nbevts", "def change_max(self, level, value):\n if value < 0:\n raise AttributeError('max value should be greater than zero')\n if level in self.progress_maxes:\n self.progress_maxes[level] = value", "def _set_max_suppress_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name=\"max-suppress-time\", rest_name=\"max-suppress-time\", parent=self, choice=(u'ch-dampening-source', u'ca-dampening-specify-values'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='damp-max-suppress-value', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"max_suppress_time must be of a type compatible with damp-max-suppress-value\"\"\",\n 'defined-type': \"brocade-bgp:damp-max-suppress-value\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name=\"max-suppress-time\", rest_name=\"max-suppress-time\", parent=self, choice=(u'ch-dampening-source', u'ca-dampening-specify-values'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='damp-max-suppress-value', is_config=True)\"\"\",\n })\n\n self.__max_suppress_time = t\n if hasattr(self, '_set'):\n self._set()", "def setMaxTicks(self, maxTicks):\n self.maxTicks = maxTicks\n self.calculate()", "def max_travel(self, max_travel):\n\n self._max_travel = max_travel", "def set_total_deaths(self, message, db_session):\n user = self.ts.get_user(message)\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n total_deaths_num = msg_list[1]\n if total_deaths_num.isdigit() and int(total_deaths_num) >= 0:\n self._set_total_deaths(total_deaths_num, db_session)\n self._add_to_whisper_queue(user, 'Total deaths: {}'.format(total_deaths_num))\n else:\n self._add_to_whisper_queue(user,\n 'Sorry {}, !set_total_deaths should be followed by a non-negative integer'.format(\n user))\n else:\n self._add_to_whisper_queue(user,\n 'Sorry {}, !set_total_deaths should be followed by a non-negative integer'.format(\n user))", "def set_max_reps(self, max_reps):\n self.max_reps = int(max_reps)", "def validate_backup_retention_period(days):\n\n days = positive_integer(days)\n if int(days) > 35:\n raise ValueError(\n \"DBInstance BackupRetentionPeriod cannot be larger \" \"than 35 days.\"\n )\n return days", "def _update_length(self, field, tag_id, value):\n # pylint: disable=unused-argument\n if tag_id not in {8, 9, 10}:\n self._message_length += len(field) + 1\n if self._message_length >= self._max_length:\n raise FIXLengthTooLongError(\n f'message too long: {self._message_length}')", "def set_max_time(self, time):\n self.widget().setMaximumTime(time)", "def media_images_height_max(self, media_images_height_max):\n\n self._media_images_height_max = media_images_height_max", "def set_maximum(self, max_value):\n\n self._progress.setMaximum(max_value)", "def _checked_maximum(self):\n self._block_signals()\n if self._maximum_length_checkbox.isChecked():\n # Maximum length enabled\n self._level_gen.maximum_length = MAXIMUM_LENGTH\n else:\n # Maximum length disabled\n self._level_gen.maximum_length = None\n self._refresh_view()\n self._unblock_signals()", "def max_affected_version(self, max_affected_version):\n\n self._max_affected_version = max_affected_version", "def maxPacketLifeTime(self) -> Optional[int]:\n return self.__parameters.maxPacketLifeTime", "def maximum_record_age_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"maximum_record_age_in_seconds\")", "def set_max_retries(cls, max_retries):\n LOGGER.debug(\"Updating max retries to {}\".format(max_retries))\n # See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html\n cls.maxRetries = max_retries", "def _set_age(cls, data):\n birth = data.get(\"birth\")\n if birth:\n today = datetime.now().date()\n data[\"age\"] = relativedelta(today, birth).years\n return data", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def maximum():\n if len(a_variable.get()) > MAX_CHARACTERS:\n messagebox.showwarning(title=\"Max Characters Exceeded!\",\n message=\"Please enter no more than 25\\n\"\n \"characters, thanks.\")\n clear_box() # Clears the entry field", "def set_deaths(self, message, db_session):\n user = self.ts.get_user(message)\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n deaths_num = msg_list[1]\n if deaths_num.isdigit() and int(deaths_num) >= 0:\n self._set_deaths(deaths_num, db_session)\n self._add_to_whisper_queue(user, 'Current deaths: {}'.format(deaths_num))\n else:\n self._add_to_whisper_queue(user,\n 'Sorry {}, !set_deaths should be followed by a non-negative integer'.format(\n user))\n else:\n self._add_to_whisper_queue(user,\n 'Sorry {}, !set_deaths should be followed by a non-negative integer'.format(\n user))", "def session_cookie_max_age(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"session_cookie_max_age\")", "def remaining_retention_days(self) -> int:\n return pulumi.get(self, \"remaining_retention_days\")", "def set_max_clients(self, clients: int = 50_000) -> None:\n self.set_db_conf(\"maxclients\", str(clients))", "def remove_old_events(self, maxage=60 * 60 * 24 * 3):\n now = time.time()\n while 1:\n try:\n lasttime = float(self.queue.peek())\n except IndexError:\n break\n\n if (now - lasttime) >= maxage:\n self.pop_event()\n else:\n break", "def maximal_completion_delay_in_days(self):\n return self._maximal_completion_delay_in_days" ]
[ "0.6970839", "0.6911315", "0.61830574", "0.6088173", "0.60299325", "0.58305186", "0.58305186", "0.57887924", "0.56967527", "0.5692167", "0.5583454", "0.5548023", "0.5548023", "0.55217004", "0.54962564", "0.54864573", "0.5448077", "0.5405468", "0.5401819", "0.53459024", "0.5305077", "0.52902114", "0.5246656", "0.52185863", "0.5204188", "0.5197784", "0.5183826", "0.5176835", "0.5155243", "0.5128057", "0.5093936", "0.50737154", "0.5052863", "0.50350857", "0.5020222", "0.50152075", "0.49927744", "0.4981937", "0.497913", "0.4975607", "0.49491423", "0.49491423", "0.49465707", "0.49345243", "0.49345243", "0.49249917", "0.49194705", "0.49179906", "0.49116686", "0.49067026", "0.48796946", "0.4851046", "0.48458755", "0.48458755", "0.48458755", "0.48456448", "0.4843804", "0.4843045", "0.48412064", "0.48380187", "0.4836032", "0.48126668", "0.48067564", "0.47642037", "0.4763191", "0.47602087", "0.47560835", "0.47560835", "0.47560835", "0.47560835", "0.47293827", "0.47183588", "0.4718315", "0.47088054", "0.46979672", "0.46942046", "0.46902743", "0.46871763", "0.46819413", "0.46687633", "0.46623293", "0.4642322", "0.46336415", "0.46241274", "0.4616845", "0.46104836", "0.46056852", "0.46044523", "0.4601031", "0.4600705", "0.4600705", "0.4600705", "0.4600705", "0.4598263", "0.45916533", "0.45914343", "0.45884544", "0.45874316", "0.45787886", "0.457762" ]
0.6915784
1
Sets "threshold" for the starboard messages. The specified number of stars are required to put the message on the starboard. Note that existing messages are not affected. Defaults to 5.
async def threshold(self, ctx, threshold: int): if threshold > 0: self.bot.db.execute("UPDATE starboards SET threshold = ? WHERE channel_id = ?", (threshold, ctx.channel.id)) await ctx.say("star.threshold", threshold) await self.set_topic(ctx.channel.id) else: await ctx.say("star.unsigned", threshold)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def set_star_thresh(self, ctx: commands.Context, thresh: int):\n self.check_if_exist(ctx.guild)\n\n self.starboard_guilds = self.starboard_info.find(\"guilds\")\n\n self.starboard_guilds[str(ctx.guild.id)][\"thresh\"] = thresh\n\n self.starboard_info.update(\"guilds\", self.starboard_guilds)\n\n await ctx.reply(\"The amount of stars needed to get a message to the starboard is now {0}\".format(thresh))", "async def _msgvote_threshold(self, ctx, threshold: int):\n\n if threshold < 0:\n await self.bot.say(\"Invalid threshold. Must be a positive \"\n \"integer, or 0 to disable.\")\n elif threshold == 0:\n self.settings[\"threshold\"] = threshold\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Message deletion disabled.\")\n else:\n self.settings[\"threshold\"] = threshold\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Messages will be deleted if [downvotes - \"\n \"upvotes] reaches {}.\".format(threshold))", "def threshold(self,thresholdValue):\n # TO DO\n pass", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def setThreshold(self, value):\n return self._set(threshold=value)", "def OnBitmapButton5StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 5\r\n\t\tself.SetStars()", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "async def handle_less(message: types.Message):\n await handle_change_threshold(message, 1 / 1.5)", "def block5_threshold(self):\n return self._safe_value(VAR_BLOCK5THRESHOLD, float)", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)", "def setThreshold1(self, trsh):\n\t\tself.edgeThreshold1 = trsh\n\t\tself.edgeThreshold2 = trsh * 2.5", "def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)", "def set_threshold(self, cat, t):\n self.con.execute(\"update ct set threshold=%f where category='%s'\" \n % (t, cat))", "def bound_rating(self, rating):\n return 1.0 * max(0, min(int(rating + 0.5), 5))\n # return 1.0 * max(0, min(rating, 5))", "def moving_threshold(self, value):\n self._write(MX_MOVING_THRESHOLD, value)", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def SetThreshold (self,VolumeNode, min, max):\n DisplayNode = VolumeNode.GetScalarVolumeDisplayNode()\n DisplayNode.SetApplyThreshold(True)\n DisplayNode.SetThreshold(min,max)", "def thresh(self, thresh=25, total_ratings=False):\n before = self.item_count()\n\n if total_ratings: self.filter(self.n_per_item() >= thresh)\n else: self.filter(np.all(self.lam() >= thresh, axis=0))\n\n after = self.item_count()\n thresh_type = 'on each item total' if total_ratings else 'by each group' \n with msg(f'Applying threshold of {thresh} ratings {thresh_type} : {after} of {before}', done=False, enabled=self.output):pass", "def set_threshold_from_energy(energy, dryRun=False):\n if energy < 3.5:\n print(\"WARNING: optimal energy threshold should normally be set to half of the beam energy, but some noise will appear below energy threshold of 3.5 keV!\")\n caput(\"BL13J-EA-EXCBR-01:CONFIG:ACQUIRE:EnergyThreshold\",energy)", "def _thresholdAlarm(self, project_usage, proj_name, old_used_space_perc):\n\n message = \"\"\n threshold_soft = 95\n threshold_hard = 100\n if (proj_name in self.conf.mirrored_projects):\n threshold_soft = 95*2\n threshold_hard = 100*2\n if ((project_usage['used_space_perc'] > threshold_soft) and \n (project_usage['used_space_perc'] > old_used_space_perc)):\n message = \"project \" + proj_name + \" is reaching its quota limit \" \\\n + \"(used space > 95%): \" + str(project_usage['used_space'])\\\n + \" \" + self.conf.storage_space_unity\n if (project_usage['used_space_perc'] >= threshold_hard):\n message = \"project \" + proj_name + \" reached its quota limit \" \\\n + \"(used space > 100%): \" + str(project_usage['used_space'])\\\n + \" \" + self.conf.storage_space_unity\n if (len(message) > 0):\n mailsnd = MailSender()\n mailsnd.send(message, self.conf.notification_sender, \n self.conf.notification_receiver)\n logger.debug(\"sent alert for quota over limit related to project: \"\n + proj_name)", "def thresholdfactor(self):\n return self.__thresholdfactor", "def setMoveThreshold(self, thresholdLoc, thresholdRot):\r\n self.moveThresholdLoc = thresholdLoc\r\n self.moveThresholdRot = thresholdRot", "def set_photon_counting_thres(self, mini, maxi):\n self.lib.SetPhotonCountingThreshold(ct.c_long(mini), ct.c_long(maxi))", "def setAmbiguityThreshold(self, value):\n return self._set(ambiguityThreshold=value)", "def set_warning_song(self, song_number):\n self._warning_song_num = int(math.fabs(song_number)) % 5\n\n # Song is in c major scale and is the 5th (G) to the 3rd (E).\n cmd = \"140 \" + str(self._warning_song_num) + \" 2 67 16 64 16\"\n\n self._serial_conn.send_command(cmd)", "def matrix_filtering_threshold(self, matrix_filtering_threshold):\n\n self._matrix_filtering_threshold = matrix_filtering_threshold", "async def starboard_amount(self, ctx, amount: int):\n await queries.update_setting(ctx, \"starboard_settings\", \"reaction_count\", amount)\n emoji_name, emoji_id, emoji_type = await self.bot.db.execute(\n \"\"\"\n SELECT emoji_name, emoji_id, emoji_type\n FROM starboard_settings WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n one_row=True,\n )\n if emoji_type == \"custom\":\n emoji = self.bot.get_emoji(emoji_id)\n else:\n emoji = emoji_name\n\n await util.send_success(\n ctx, f\"Messages now need **{amount}** {emoji} reactions to get into the starboard.\"\n )\n await self.bot.cache.cache_starboard_settings()", "def setMinimumWidth( self, value ):\n self._minimumWidth = value", "def test_unsuccessful_rating_with_rate_value_more_than_five(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': 6},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_max_value_error_message)", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def adjust_thresholding(self, pos_frame, which='animal'):\n\n cv2.namedWindow('Adjust Thresholding')\n if which == 'animal':\n cv2.createTrackbar('H_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_animal\"][2],\n 255,\n self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_animal\"][2],\n 255,\n self.nothing)\n elif which == 'material':\n cv2.createTrackbar('H_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][0],\n 255,\n self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][1],\n 255,\n self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"min_material\"][2],\n 255,\n self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding',\n self._args[\"tracking\"][\"thresholding\"][\"max_material\"][2],\n 255,\n self.nothing)\n else:\n utils.print_color_message(\"[INFO] Select 'animal' or 'material' to preview the default thresholding values\",\n \"darkgreen\")\n cv2.createTrackbar('H_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('H_High', 'Adjust Thresholding', 255, 255, self.nothing)\n cv2.createTrackbar('S_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('S_High', 'Adjust Thresholding', 255, 255, self.nothing)\n cv2.createTrackbar('V_Low', 'Adjust Thresholding', 0, 255, self.nothing)\n cv2.createTrackbar('V_High', 'Adjust Thresholding', 255, 255, self.nothing)\n\n test_frame = self._color_capture.get_frame(pos_frame)\n test_frame_cropped = test_frame[self.up_left_y:self.low_right_y, self.up_left_x:self.low_right_x]\n test_frame_cropped_hsv = cv2.cvtColor(test_frame_cropped, cv2.COLOR_BGR2HSV)\n test_frame_blurred = cv2.blur(test_frame_cropped_hsv, (5, 5))\n\n while True:\n h_l = cv2.getTrackbarPos('H_Low', 'Adjust Thresholding')\n h_h = cv2.getTrackbarPos('H_High', 'Adjust Thresholding')\n s_l = cv2.getTrackbarPos('S_Low', 'Adjust Thresholding')\n s_h = cv2.getTrackbarPos('S_High', 'Adjust Thresholding')\n v_l = cv2.getTrackbarPos('V_Low', 'Adjust Thresholding')\n v_h = cv2.getTrackbarPos('V_High', 'Adjust Thresholding')\n test_mask_mouse = cv2.inRange(test_frame_blurred, (h_l, s_l, v_l), (h_h, s_h, v_h))\n overlay = cv2.bitwise_and(test_frame_cropped_hsv, test_frame_cropped_hsv, mask=test_mask_mouse)\n cv2.imshow('Adjust Thresholding', overlay)\n key = cv2.waitKey(10) & 0xFF\n if key == ord(\"q\"):\n break\n cv2.destroyAllWindows()\n for i in range(1, 5):\n cv2.waitKey(1)", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def setPresencePenalty(self, value):\n return self._set(presencePenalty=value)", "def sparsify_weights(self, threshold = 1e-6):\n weights = self.list_cnn[-1].get_weights()\n sparsified_weights = []\n for w in weights:\n bool_mask = (abs(w) > threshold).astype(int)\n sparsified_weights.append(w * bool_mask)\n self.list_cnn[-1].set_weights(sparsified_weights)", "def setLowerThreshold(self, lower_threshold):\r\n\t\tself.LowerThreshold = lower_threshold", "def block6_threshold(self):\n return self._safe_value(VAR_BLOCK6THRESHOLD, float)", "def set_thresh(self, t0=0.5, t1=None):\n self.t0 = t0\n self.t1 = t1", "def aboveThresholdAlarm(self, data):\n\n if(self.calculateAverage(data) > self.threshold and self.aboveThreshold):\n message = \"Average above acceptable amount for \" + self.subjectName + \".\"\n if(self.log):\n logging.info(message)\n\n self.sendToAllSubscribers(message, \"Alert: Average performance above threshold.\")", "def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)", "def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)", "def setThresholdLabel(self, p):\n return self._set(thresholdLabel=p)", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"threshold\")", "def sn_size(self, val):\n if isinstance(val, int) and val >= 1:\n if val != self._faux._sn_size:\n self._faux._sn_size = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")", "def updateThreshold(self, t):\n\n budget = self.budget\n self.threshold = self.init_threshold * self.diameter * ((budget-t) / self.budget)**self.decay_factor", "def OnBitmapButton4StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 4\r\n\t\tself.SetStars()", "def set_message_rate(self, msg_type, rate):\n pass", "def user_rating(self, rating: int):\n if type(rating) == int:\n self._user_rating = rating\n if type(rating) != int:\n raise ValueError\n if rating < 0 or rating > 5:\n print('Rating is out of 5. Please enter an integer from 0 to 5.')\n self._user_rating = None", "def give_notice(self,imbalance_threshold, notice=None):\n\t\tassert imbalance_threshold>=1\n\t\tif notice==None: notice=0\n\t\tassert isinstance(notice,int)\n\t\tassert notice>=0\n\n\t\ttrain_unlabelled_feat_counts, train_labelled_feat_counts=self.get_feat_counts()\n\n\t\t\"\"\"Remove features not present in the unlabelled training data immediately\"\"\"\n\t\tbool_feats_to_remove=(train_unlabelled_feat_counts==0)&self.bool_feat_included&(self.feat_time_left==-1)\n\t\tself.remove_feats(bool_feats_to_remove)\n\n\t\t\"\"\"Give features present but rare in the unlabelled training data notice of removal in the near future\"\"\"\n\t\tbool_feats_to_give_notice=train_labelled_feat_counts/len(train_labelled_feat_counts)>\\\n\t\t\t\t\t\t\ttrain_unlabelled_feat_counts/len(train_unlabelled_feat_counts)*imbalance_threshold\n\t\tbool_feats_to_give_notice=bool_feats_to_give_notice&self.bool_feat_included&(self.feat_time_left==-1)\n\t\t\n\t\tif notice==0:\n\t\t\tself.remove_feats(bool_feats_to_give_notice)\n\t\telse:\n\t\t\tself.feat_time_left[bool_feats_to_give_notice]=notice\n\n\t\tif self.verbosity>6:\n\t\t\tprint '-'*10,'give_notice','-'*10\n\t\t\tprint \"NOTICE:\",notice,\"given to\",len(bool_feats_to_give_notice),\"new feats.\",\\\n\t\t\t\t\"Tot\",(self.feat_time_left>0).sum(),\"feats on notice,\",self.bool_feat_excluded.sum(),\\\n\t\t\t\t\"feats removed, and\", self.bool_feat_included.sum(),\"feats included.\"", "def limit():\n bwc = BandwidthConfigurator()\n bwc.limit()", "def belowThresholdAlarm(self, data):\n\n if(self.belowThreshold and self.calculateAverage(data) < self.threshold):\n message = \"Average below acceptable amount for \" + self.subjectName + \".\"\n if(self.log):\n logging.info(message)\n\n self.sendToAllSubscribers(message, \"Alert: Average performance below threshold.\")", "def OnBitmapButton3StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 3\r\n\t\tself.SetStars()", "def ask_for_threshold():\n threshold_question = [\n {\n 'type': 'input',\n 'message': 'Enter the threshold value that you want to consider (similarities below that value will not be considered):',\n 'name': 'threshold',\n 'validate': NumberValidator\n }\n ]\n threshold_answer = prompt(threshold_question, style=style)\n return threshold_answer.get(\"threshold\")", "def reward_threshold(self) -> Optional[float]:", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def warning_count(self, warning_count):\n\n self._warning_count = warning_count", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"threshold\")", "def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass", "async def handle_mo4r(message: types.Message):\n await handle_change_threshold(message, 1.5)", "async def cclimit(self, ctx, limit_amount: int = None):\n if limit_amount is None:\n return await ctx.send_help()\n if limit_amount < 0:\n return await ctx.send(\"You need to use a number larger than 0.\")\n await self.config.limit.set(limit_amount)\n await ctx.send(f\"Chatchart is now limited to {limit_amount} messages.\")", "def message_box_size_limit(self, message_box_size_limit: ConfigNodePropertyInteger):\n\n self._message_box_size_limit = message_box_size_limit", "def set_legs(self, number_of_legs):\n self.legs = number_of_legs", "def threshold(self):\n self.frame = cv.adaptiveThreshold(self.frame, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, self.thresh, 2)", "def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val", "def StarDetector_create(maxSize=None, responseThreshold=None, lineThresholdProjected=None, lineThresholdBinarized=None, suppressNonmaxSize=None): # real signature unknown; restored from __doc__\n pass", "def set_min_uncertainty(signal, threshold=0.05):\n # Increase Hirex-Sr uncertainties to be a rel error of 5% minimum (JUST FOR TESTING)\n corrected_unc=signal.std_y/signal.y<=0.05\n signal.std_y[corrected_unc]=0.05*signal.y[corrected_unc]\n\n # correction for normalized uncertainties\n if signal.s/signal.m<=0.05:\n signal.s=0.05*signal.m\n\n signal.std_y_norm=scipy.sqrt((signal.std_y / signal.m)**2.0 + ((signal.y / signal.m)*(signal.s / signal.m))**2.0)", "def set_camera_thresholds(self,thresholds):\n self.send_packet('\\x93'+struct.pack('<'+'B'*8,*thresholds))", "def on_limit(self, track):\n log.debug(\"Received limit notice: %d\", track)", "def set_min_confidence(self, new_min):\n self.__min_confidence = new_min", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def apply_threshold(heatmap, threshold):\n heatmap[heatmap <= threshold] = 0\n\n return heatmap", "def set_sigma_threshold(self, sigma_threshold):\n\n if sigma_threshold < 0:\n raise ValueError(\"The sigma threshold cannot be smaller than 0.\")\n\n core.xc_func_set_sigma_threshold(self.xc_func, ctypes.c_double(sigma_threshold))", "async def _user_update_threshold(self, user_config: dict):\n return 30.0 * user_config['backoff_factor']", "def above_threshold(threshold, current_value):\n if current_value > threshold:\n return \"#00B760\"\n if current_value < threshold:\n return \"#E3170A\"\n return \"#030027\"", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def min_pixels(self, value) -> 'Size':\n raise_not_number(value)\n self.minimum = '{}px'.format(value)\n return self", "def limitsExsess(topic, value):\n\n if isNotifyTime(topic):\n if \"temperature\" in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram(\"Temperature out of bounds: \"+value+\"degC\")\n return True\n if \"CO\" in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram(\"Carbon Monoxide level above threshold: \"+value)\n return True\n if \"All_Gas\" in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram(\"Poison gas level above threshold: \"+value)\n return True\n if \"alarm\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"ALARM in Living room is On!\")\n return True\n if \"MotionHUE\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"HUE Motion sensor detected movement!\")\n return True\n return False", "def back_pressure_data_size_threshold(self, back_pressure_data_size_threshold):\n\n self._back_pressure_data_size_threshold = back_pressure_data_size_threshold", "def SetStrengthThresh(self, strength):\n return _hypre.HypreBoomerAMG_SetStrengthThresh(self, strength)", "def set_strength_ratios(\n self,\n strength_ratios: Union[float, Tuple[float], np.ndarray],\n ):\n self._strength_ratios = np.clip(\n _convert_to_np_array(strength_ratios, self._num_motors), 0, 1)", "def __init__(self, window: int = 5, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.window = window\n self.threshold = threshold\n self.initial_val = initial_val", "def kills_per_min(self, kills_per_min):\n\n self._kills_per_min = kills_per_min", "def OnBitmapButton1StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 1\r\n\t\tself.SetStars()", "def classify(indicator):\n # This function prints the spam classification\n if indicator > SPAM_THRESHOLD: # If ratio above 0.10 then SPAM\n print('Spam indicator: ' + str(indicator))\n print('This message is: SPAM')\n else: # If ratio anything else then HAM\n print('Spam indicator: ' + str(indicator))\n print('This message is: HAM')", "def setContactBreakingTreshold(*argv):", "def set_tichu_percentage(threshold):\n tichu_threshold = threshold\n tichu_cnt = 0\n deck = Deck()\n players = [Player(id=0), Player(id=1), Player(id=2), Player(id=3)]\n for i in range(100):\n myhands = deck.shuffle_and_deal()\n for idx in range(4):\n players[idx].assign_hand(myhands[idx])\n score = players[idx].hand_rating\n if score > tichu_threshold:\n tichu_cnt += 1\n players[idx].hand.show()\n print('Player calls Tichu with a hand rating of {:.1f}.'.format(score))\n print('\\n')\n print('Tichu percentage: {:.2f}'.format(tichu_cnt/100))", "def strength(self, value: int):\n self._strength = value", "def setMinimumHeight( self, value ):\n self._minimumHeight = value", "def get_attributes_threshold(alist, decreasing_factor, min_activity_count=1, max_activity_count=25):\n index = max(0, min(min_activity_count - 1, len(alist) - 1))\n threshold = alist[index][1]\n index = index + 1\n for i in range(index, len(alist)):\n value = alist[i][1]\n if value > threshold * decreasing_factor:\n threshold = value\n if i >= max_activity_count:\n break\n return threshold" ]
[ "0.71225744", "0.59684855", "0.5927058", "0.58108085", "0.576199", "0.57090366", "0.5671583", "0.5666817", "0.5666817", "0.5666817", "0.5666817", "0.5666817", "0.5508571", "0.5468851", "0.5445576", "0.5442838", "0.538356", "0.53599596", "0.5337971", "0.5335528", "0.5254369", "0.5232643", "0.5196285", "0.5116185", "0.5096018", "0.50832146", "0.5030812", "0.5021622", "0.49949834", "0.49876738", "0.4976543", "0.4971982", "0.49571878", "0.49419168", "0.49328005", "0.4926543", "0.49215746", "0.48992863", "0.4895857", "0.4883578", "0.48785198", "0.48614174", "0.4839585", "0.4839001", "0.4839001", "0.4839001", "0.4830376", "0.4830376", "0.4830376", "0.4830376", "0.4830376", "0.48272073", "0.48179716", "0.4814409", "0.48078763", "0.47791114", "0.47780046", "0.4763034", "0.4744912", "0.47391838", "0.47293743", "0.47245026", "0.4722106", "0.4722106", "0.47077963", "0.47077963", "0.47077963", "0.47062376", "0.46838546", "0.46828955", "0.46802744", "0.46763417", "0.46628574", "0.4662621", "0.46624303", "0.46606869", "0.4658172", "0.46570086", "0.4635521", "0.46280476", "0.46275765", "0.46255898", "0.46226758", "0.46071434", "0.46014327", "0.4586721", "0.45793825", "0.45766252", "0.45720842", "0.45584446", "0.4555104", "0.4554947", "0.45542166", "0.45495248", "0.45466992", "0.4540069", "0.45285866", "0.45231864", "0.45187217", "0.45125914" ]
0.7315309
0
Shows a starboard item. The argument can be either original message ID or starboard item ID.
async def star_show(self, ctx, item: Star): board = self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?", (ctx.guild.id,)).fetchone() try: board_msg = await self.bot.get_channel(board["channel_id"]).fetch_message(item["item_id"]) except discord.NotFound: return await self.destroy_item(board["channel_id"], item["item_id"]) else: await ctx.send(board_msg.content, embed=board_msg.embeds[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self, item_id):\n pass", "async def star_random(self, ctx):\n board = self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\", (ctx.guild.id,)).fetchone()\n item = self.bot.db.execute(\n \"SELECT item_id FROM starboard_items WHERE visible = 1 \" \\\n \"ORDER BY random() LIMIT 1\"\n ).fetchone()\n if not item:\n return\n try:\n board_msg = await self.bot.get_channel(board[\"channel_id\"]).fetch_message(item[\"item_id\"])\n except discord.NotFound:\n return await self.destroy_item(board[\"channel_id\"], item[\"item_id\"])\n else:\n await ctx.send(board_msg.content, embed=board_msg.embeds[0])", "def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()", "def show_item_by_id(plugin, item_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + item_id)", "def show_item(self, show_item):\n\n self._show_item = show_item", "async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)", "def show(*args):\n I = Items()\n for arg in args:\n I.add_item(arg)\n I.write()", "def showItem(category_item_id):\n return render_template('item.html', item=db.findItem(id=category_item_id))", "def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)", "def display_message(self, message, subtitle=None, arg=None):\n if message is None:\n # Display same message as the placeholder\n message = self.placeholder\n xml = alfred.xml([\n alfred.Item(\n title=message,\n subtitle=subtitle,\n attributes={\n 'uid': alfred.uid(0),\n 'arg': arg\n },\n icon='icon.png',\n )\n ]) # compiles the XML answer\n alfred.write(xml) # writes the XML back to Alfred\n exit()", "def show_item(request, itemID):\n\ttry:\n\t\titem = get_object_or_404(Item, itemID = itemID)\n\n\t# Handle when the given itemID is not UUID\n\texcept ValidationError:\n\t\traise Http404\n\n\tcontext_dict = {}\n\tsearch_form = Search_bar()\n\tcontext_dict['search_bar'] = search_form\n\tcontext_dict['item'] = item\n\tcontext_dict['seller_rating'] = range(int(round(item.seller.rating, 1)))\n\n\trelated = Item.objects.filter(category = item.category).exclude(itemID = item.itemID)\n\t\n\tif len(related) > 3:\n\t\tcontext_dict['trendingItems'] = related[0:3]\n\telse:\n\t\tcontext_dict['trendingItems'] = related\n\n\tresponse = render(request, 'tailored/product.html', context_dict)\n\t\n\tif first_visit(request, response, str(item.itemID)):\n\t\titem.dailyVisits += 1\n\t\titem.save()\n\t\t\n\tcontext_dict['itemID'] = item.itemID\n\n\tif item.seller.user != request.user:\n\t\treturn response\n\n\tsold_form = SoldItemForm()\n\n\tif request.method == 'POST':\n\t\tsold_form = SoldItemForm(request.POST, request.FILES)\n\n\t\tif sold_form.is_valid():\n\t\t\tuser_query = User.objects.filter(username = sold_form.cleaned_data['sold_to'])\n\t\t\tif not user_query:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\n\t\t\telif user_query[0] != request.user:\n\t\t\t\ttry:\n\t\t\t\t\titem.sold_to = UserProfile.objects.get(user = user_query[0])\n\t\t\t\t\titem.save()\n\t\t\t\texcept UserProfile.DoesNotExist:\n\t\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\telse:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError(\"You can't sell an item to yourself.\"))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\titem.save()\n\t\t\treturn HttpResponseRedirect(reverse('tailored:index'))\n\n\tcontext_dict['form'] = sold_form\n\treturn render(request, 'tailored/product.html', context_dict)", "def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)", "def show_item_details(item_id):\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return render_template('item_details.html', item=item, login_session=login_session)", "async def info(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'I need embed_links permission to answer in this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n canonical = await Controller.canonical_title(item)\n if canonical:\n item = canonical\n page_url = Controller.link_from_title(item)\n try:\n wikitext = await Controller.get_wikitext(item)\n except ValueError as e:\n # Means the page is not found\n await msg.channel.send(**{\n 'content': f'No page found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n contents = []\n template_names = []\n for template in WTP.parse(wikitext).templates:\n template_names.append(template.name.strip())\n if self.is_infobox(template.name):\n args = template.arguments\n title = item\n entries = {}\n for arg in args:\n k, v = arg.string.strip(' |\\n').split('=')\n k = k.strip()\n v = v.strip()\n if k.lower() in ['title1', 'name']:\n # Set this as the item name\n title = v\n elif k.lower() in ['image1', 'image'] or not v:\n # Skip images and empty values\n continue\n else:\n entries[k] = v.replace('\\n\\n', '\\n').replace('\\n', '\\n\\t')\n entries = [f'{k} = {v}' for k, v in entries.items()]\n entries = '• '+'\\n• '.join(entries)\n content = f'## **{title}** ##\\nSource: {page_url}\\n{template.name.strip()}\\n{entries}'\n contents.append(content)\n logging.info(f'Templates at {item}: '+', '.join(template_names))\n if not contents:\n await msg.channel.send(**{\n 'content': f'No infobox found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n await msg.channel.send(**{\n 'content': '\\n===\\n'.join(contents),\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def markPlayed(self, item):\n key = f'{self.METADATA}/actions/scrobble'\n ratingKey = item.guid.rsplit('/', 1)[-1]\n params = {'key': ratingKey, 'identifier': 'com.plexapp.plugins.library'}\n self.query(key, params=params)\n return self", "def viewItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n item = session.query(Item).filter_by(id=item_id).one()\n return render_template('viewitem.html', sport_id=sport_id, item_id=item_id,\n item=item, sport=sport)", "def toggle_item_starred(self):\n self.get_selected()\n if not self.selected_item:\n return\n was_starred = self.selected_item.starred\n message = 'Starred flag is now ON'\n if was_starred:\n message = 'Starred flag is now OFF'\n self.trigger_item_starred(not was_starred)\n self.controller.display_message(message)", "def show_item_by_effect(plugin, item_id, effect_plugin, effect_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'effect': effect_plugin + ':' + effect_id,\n 'id': plugin + ':' + item_id\n })", "def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)", "def item_starred(self, item):\n self.update_item(item)", "def __str__(self):\n return \"Item('\"+ self.get_id() + \"')\"", "def show_item(self, event):\n\t\tc=self.seqframe\n\t\tbox = c.bbox(CURRENT)\n\t\tx1=box[0]\n\t\ty1=box[1]\n\t\tx2=box[2]\n\t\ty2=box[3]\n\t\titems=[]\n\t\t#make selection rectangle one pixel larger to include rect and text\n\t\titems=c.find_enclosed(x1-1,y1-1,x2+1,y2+1)\n\t\t#get this for recog sequence\n\t\tenzymes=self.RS.enzymes_regexs\n\t\t\n\t\tsfont = tkFont.Font (family='Arial', size=12,weight='bold')\n\t\tfor obj in items:\n\t\t\tc.tag_raise(obj)\n\t\t\t#if item is text, get recog sequence and display\n\t\t\tif 'textlabel' in c.gettags(obj):\n\t\t\t\tname=c.itemcget(obj, 'text')\n\t\t\t\tname=name.rstrip('(+-)')\n\t\t\t\tseq=self.get_sequence(enzymes[name]['realseq'])\n\t\t\t\tobj=c.create_text(x2+2,y1-2,text=seq,tags='recogseqlabel',\n\t\t\t\t\t\t\tfont=sfont,width=120,anchor='nw')\n\t\t\t\tbox = c.bbox(obj)\n\t\t\t\trect = c.create_rectangle(box,tag='recogseqlabel',fill='yellow')\n\t\t\t\tc.lift(obj)", "def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)", "def show_info(title, message):\n\n pass", "async def starred(self, ctx: Message):\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tawait self.send(\n\t\t f\"Starred Message: {starred}ㅤ|ㅤMessage Creator: @{starredauthor}\")", "def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )", "def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})", "def showInfo(parent,message,title=_('Information')):\r\n return askStyled(parent,message,title,wx.OK|wx.ICON_INFORMATION)", "def display(self, item: Any):\n self.display_widget.clear_output(wait=True)\n with self.display_widget:\n self.display_function(item)\n self.clear() # type: ignore", "def faqitem_show(request,item_container):\n app_name = 'faqitem'\n parent = item_container.get_parent()\n if parent.item.has_comments:\n comments = item_comment(request, item_container=item_container)\n else:\n comments = ''\n vars = get_item_vars_show(request, item_container, app_name)\n vars['comments'] = comments\n return render_to_response ( 'app/faqitem/base-item.html', vars )", "def hide(self, item_id):\n pass", "def show_item_by_damage(plugin, item_id, damage):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'damage': damage,\n 'id': plugin + ':' + item_id\n })", "def markdown_item(title, url):\n print('* [{0}]({1})'.format(\n markdown_escape(title),\n markdown_escape(url),\n ))", "def action(self,item):\r\n pass", "def show(self, nid =None):\r\n flags = NIF_ICON | NIF_MESSAGE\r\n if nid is None:\r\n nid = (self.hwnd, 0, flags, WM_USER+20, self.hicon)\r\n if self.visible:\r\n self.hide()\r\n Shell_NotifyIcon(NIM_ADD, nid)\r\n self.visible = 1", "def channel_help_item(message, query):\n query = query.lower()\n help_string = hf.help_item(query)\n message.reply(help_string)", "def goto_star(self):\n\n self.view_star(self.selected_star_id)\n # Now pressing Enter closes the finding chart window\n self.dialog.set_default_response(gtk.RESPONSE_CLOSE)", "def _createItem(self, rpcObject):\n item = ShowWidgetItem(rpcObject, self)\n return item", "def __repr__(self):\n return \"Item('\"+ self.get_id() + \"')\"", "def get_item_detail(item_id):\n pass", "def favorite(self, item_id, **params):\n\n self.queue('favorite', item_id=item_id, **params)", "def show_item(category, item):\n # Detect login status\n login_status = None\n if 'email' in login_session:\n login_status = True\n # Provide state token to enable Google Sign-In\n state = login_session['state']\n # Query database with SQLAlchemy to show selected category and item\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '), category_id=category.id)\n .one())\n # Render webpage\n return render_template('show_item.html',\n item=item,\n category=category,\n login_status=login_status,\n CLIENT_ID=CLIENT_ID,\n STATE=state)", "async def stars(self, ctx: commands.Context, stars: int):\n self.stars = stars\n await self._update_db()\n\n await ctx.send(\n f\"Done.Now this server needs `{stars}` :star: to appear on the starboard channel.\"\n )", "def item_from_feed(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)", "def showItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(Item).filter_by(category_id=category_id).all()\n\n return render_template('items.html', items=items, category=category)", "def favorite(self, item):\n self._createAction(item, \"archive\")", "async def show_card(self, ctx, card: dict):\r\n emb = discord.Embed(\r\n title=card['name'],\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=card['desc']\r\n )\r\n emb.set_footer(text='Use [p]domt info for list of all cards.')\r\n emb.set_image(url=card['img'])\r\n await ctx.send(embed=emb)", "def showMessage(self):", "def info(msg):\n click.secho(msg, fg='blue')", "def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)", "def show_tray_message(self, index):\n icon = QSystemTrayIcon.MessageIcon(0)\n message = self.iconComboBox.itemText(int(index)) + \" set\"\n self.trayIcon.showMessage(qApp.applicationName() + \" \" + qApp.applicationVersion(), message, icon, 100)", "def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)", "def draw_item(self):\r\n self.screen.blit(self.spawned_item, self.rect)", "def mark_star(self, star_id):\n\n ra, dec = self.db.get_star(star_id)[2:4]\n kwargs = dict(layer = self.MARKERS_LAYER,\n edgecolor = '#24ff29',\n s = self.MARK_RADIUS)\n self.aplpy_plot.show_markers(ra, dec, **kwargs)\n self.navig.home()\n\n self.selected_star_id = star_id\n self.goto_button.set_sensitive(True)", "def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)", "def show_player_queue(self, message):\n user = self.ts.get_user(message)\n queue_str = ', '.join([str(item) for item in self.player_queue.queue])\n self._add_to_whisper_queue(user, queue_str)", "def display_note(self, note):\n\t\tself.canvas.itemconfig(self.note, text = note)", "def show_block_by_id(plugin, block_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + block_id, block=True)", "def itemAt(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return QListWidgetItem", "def receive_item(self, item):\n self.inventory.append(item)\n events.trigger_event(\"print_message\", \"Picked up {0}\".format(item))", "def item_from_friends(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)", "def item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to view the item because\n # the item is private and was created by a different user, send a\n # 403\n elif item.private and current_user != item.user:\n abort(403)\n\n return render_template('item.html', item=item)", "def show_cart(update, context):\n bot = context.bot\n query = update.callback_query\n\n chat_id = update.effective_chat.id\n user = update.effective_user\n\n # all items ordered by user in message and his price to pay for them\n message_and_price = str_user_cart(chat_id, user.id)\n # InlineKeyboard back to start menu\n keyboard = [[InlineKeyboardButton(\"back to menu\", callback_data=str(ONE))]]\n # change last message send by bot\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message_and_price['message'],\n reply_markup=InlineKeyboardMarkup(keyboard))\n # notify ConversationHandler of SEVENTH stage\n return SEVENTH", "def showMessage(self, message):\r\n print message", "def showme(message):\n print(message)", "async def link(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'Cannot send links on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n title = await Controller.canonical_title(item)\n if title is None:\n await msg.channel.send(**{\n 'content': f'There are no pages matching `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n page_url = Controller.link_from_title(title)\n await msg.channel.send(**{\n 'content': page_url,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def draw_item(\n self,\n index: int,\n item: MenuItem,\n index_text: str | None = None,\n ) -> None:\n if index_text is None:\n index_text = str(index + 1)\n if self.zero_pad:\n pad_width = len(str(len(self.items)))\n index_text = index_text.zfill(pad_width)\n\n text_style = self.highlight if self.current_option == index else self.normal\n assert self.screen is not None\n assert text_style is not None\n\n self.screen.addstr(\n MIN_SIZE - 1 + index,\n 4,\n item.show(index_text),\n text_style,\n )", "def show(*args):", "def show(*args):", "def show(*args):", "def show(*args):", "async def fish(self, ctx):\n board = self._sort_leaderboard(json.loads(await self._query(ctx, \"fish\")))\n player = \"\"\n fish = \"\"\n place = \"\"\n i = 1\n for x in board:\n player += x['player'] + \"\\n\"\n fish += str(x['fish']) + \"\\n\"\n place += str(i) +\"\\n\"\n i += 1\n embed: discord.Embed = discord.Embed(\n color = discord.Color.blue()\n )\n embed.add_field(name = \"Place\", value =place, inline=True)\n embed.add_field(name = \"Player\", value=player, inline=True)\n embed.add_field(name = \"Fish\", value=fish, inline=True)\n await ctx.send(embed = embed)", "async def source(\n self, ctx: Context, *, source_item: SourceConverter = None\n ) -> None:\n if source_item is None:\n embed = discord.Embed(\n title=\"Magoji's Github Repository\",\n description=f\"[Here's the github link!]({GITHUB_REPO_URL})\",\n colour=0x87CEEB,\n )\n await ctx.send(embed=embed)\n return\n embed = self.build_embed(source_item)\n await ctx.send(embed=embed)", "def showDialogBox(mssg, icon=\"info\", type=\"ok\", default=\"ok\"):\n\n return tkMessageBox._show(\"Battleship\", mssg, icon, type, default=default)", "def item_show(\n item, item_id=None, item_type=None, show=\"show\", extra_args=None, cibfile=None\n):\n\n new_commands = __use_new_commands()\n\n cmd = [\"pcs\"]\n\n if isinstance(cibfile, str):\n cmd += [\"-f\", cibfile]\n\n if isinstance(item, str):\n cmd += [item]\n elif isinstance(item, (list, tuple)):\n cmd += item\n\n # constraint command follows a different order\n if item in [\"constraint\"]:\n cmd += [item_type]\n\n # New implementions use config instead of show. This resolves that issue.\n if new_commands and (\n item != \"config\" and item != \"constraint\" and item != \"property\"\n ):\n if show == \"show\":\n show = \"config\"\n elif isinstance(show, (list, tuple)):\n for index, value in enumerate(show):\n if show[index] == \"show\":\n show[index] = \"config\"\n\n if isinstance(show, str):\n cmd += [show]\n elif isinstance(show, (list, tuple)):\n cmd += show\n\n if isinstance(item_id, str):\n cmd += [item_id]\n\n if isinstance(extra_args, (list, tuple)):\n cmd += extra_args\n\n # constraint command only shows id, when using '--full'-parameter\n if item in [\"constraint\"]:\n if not isinstance(extra_args, (list, tuple)) or \"--full\" not in extra_args:\n cmd += [\"--full\"]\n log.debug(\"Running item show %s\", cmd)\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def item_detail(request, slug):\n\n item = get_object_or_404(Item, slug=slug)\n\n context = {\n 'item': item,\n }\n\n return render(request, 'items/item_detail.html', context)", "def slotshow_m(obj, content):\n global _slotshow_m_tbl\n\n return _slotshow(obj, content, _slotshow_m_tbl)", "def show_window(icon, item):\n icon.stop()\n app.after(0, app.deiconify())", "def ShowMessage(self, title=u\"\", message=u\"\", msgType=INFOBAR_INFO):\n self.Title.SetLabel(title)\n self.Message.SetLabel(message)\n self.MessageType = msgType\n self.Show(True)", "def display_message_on_ableton(self, message):\n self._show_message(message)", "def showMessage(self, msg): \n QtGui.QMessageBox.information(None, \"Info\", msg)", "def show_il(self, update, context):\n\n # Send preliminary message\n msg = 'Some other message...'\n self.send_str(msg, update, context)\n\n # Send pic\n self.sendPic('il.png', update, context)", "async def add_starboard(self, ctx):\n channel = await ctx.get_text_channel(embed=CustomEmbeds.add(author=\"Channel\",\n description=\"Send a channel to add it to the starboard!\"))\n emotes = await ctx.get_emotes(embed=CustomEmbeds.add(author=\"Emotes\",\n description=\"React with emotes and then click ✅ to add them to the starboard.\"))\n threshold = await ctx.get_int(embed=CustomEmbeds.add(author=\"Add a Threshold\",\n description=\"Send message with the minimum number of reactions for it to be added to the starboard.\"))\n\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n if guild_starboards is None:\n starboard_len = 0\n else:\n starboard_len = len(guild_starboards.get(\"starboards\"))\n\n starboard = Starboard(index=starboard_len,\n channel=channel,\n emotes=emotes,\n threshold=threshold)\n\n await self.db_add_starboard(ctx.guild, starboard.serialize())\n await ctx.send(embed=CustomEmbeds.confirm(author=\"Starboard Added\", description=f\"ID: {starboard_len}\\n\"\n f\"Channel: {channel.mention}\\n\"\n f\"Emotes: {' '.join(emotes)}\\n\"\n f\"Threshold: {threshold}\"))", "def show_item_by_tag(plugin, item_id, tag_value):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'id': plugin + ':' + item_id,\n 'tagValue': None if tag_value == 'null' else tag_value\n })", "def show(self, req, id):\n context = req.environ['manila.context']\n\n try:\n message = self.message_api.get(context, id)\n except exception.MessageNotFound as error:\n raise exc.HTTPNotFound(explanation=error.msg)\n\n return self._view_builder.detail(req, message)", "def message(self, message):\n messagebox.showinfo(\n GT_('Menu'),\n message\n )", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def EditLabel(self, item):\r\n \r\n self.Edit(item)", "def _open_item(self, *args, **kwargs):\n \"Does nothing\"", "def display_item_process(self):\n raise NotImplementedError()", "def snippet_detail(request, snippet_id):\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n return render_to_response('cab/snippet_detail.html',\n { 'object': snippet,\n 'num_ratings': snippet.rating_set.count(),\n 'rating_score': Rating.objects.score_for_snippet(snippet.id) },\n context_instance=RequestContext(request))", "def item_detail(request, item_id):\n # Select product based on URL param\n item = SELECT('item', where=f'id = {item_id}', _print=False)\n\n context = {\n 'item': item,\n 'photos': [item['photo_primary']] + item['photos']\n }\n return render(request, 'item_detail.html', context)", "def show_message_scrabble(self, player_name):\n\n interface_width = self.interface.GAME_WINDOW_WIDTH;\n interface_height = self.interface.GAME_WINDOW_HEIGHT;\n\n title_text = \"Le joueur \" + player_name + \" a fait un scrabble !!!\";\n subtitle_text = \"Il remporte 50 points bonus !\";\n\n self.message_scrabble.set_text_title(title_text);\n self.message_scrabble.set_text_subtitle(subtitle_text);\n self.message_scrabble.set_horizontal_alignment(Alignment.Center);\n self.message_scrabble.set_text_title_size(40);\n self.message_scrabble.set_text_subtitle_size(32);\n self.message_scrabble.set_space_between_titles(20);\n self.message_scrabble.set_color_title((0, 0, 0));\n self.message_scrabble.set_color_subtitle((0, 0, 0));\n self.message_scrabble.set_border_color((0, 0, 0));\n self.message_scrabble.set_border_thickness(4);\n\n self.message_scrabble.set_pos((interface_width/2, 200));\n\n self.message_placed_word.add_queued_message(self.message_scrabble, 3);", "def showMessage(self, message):\r\n util.raiseNotDefined()", "def show_item_list():\n # 3 items per line\n line = []\n linecounter = 0\n item_string = \"\"\n counter = 0\n text_spacer = 20\n clear_messages(0)\n\n for i in range(0, len(ITEMS)):\n space = text_spacer - len(ITEMS[i])\n item_string = item_string + ITEMS[i] + (' ' * space)\n counter += 1\n if counter == 3:\n line.append(item_string)\n linecounter += 1\n item_string = \"\"\n counter = 0\n if counter < 3:\n line.append(item_string)\n\n for i in range(0, linecounter + 1):\n printmessage(line[i], i + 1, MAGENTA, 0)\n clear_messages(3)", "def showSlot(self, number):\n if number <= 0:\n self.log.info('Showing slot \"Negative\" (%d) slot ignored' % number)\n else:\n slot = self.slots[self.number - number]\n if slot.isOccupied():\n slot.removeItem()\n if number == self.target:\n image = self.target_image if self.value < self.target else self.target_image_filled\n self.log.debug('Showing target row: %s, %s, %s' % (self.value, self.target, image))\n elif number <= self.value:\n image = self.fill_image\n elif number <= self.target:\n image = self.blank_image\n else:\n image = self.grey_image\n self.log.debug('Showing %s cell %d as %s (value=%s, target=%s)' % (\n self.name, number, image, self.value, self.target))\n\n slot.addItem(sprite.Sprite(image))", "def purchase_item(item_id):\n\n item = get_item(item_id)\n\n return render_template('item_page.html', item=item)", "def display_message(self, message):\n\t\tself.render('message.html', {'message': message})", "async def smug(self,ctx,user: discord.Member=None):\n if user == None or user.id == ctx.author.id:\n await ctx.send(\"{}\".format(ctx.author.mention))\n else:\n await ctx.send(\"{} {}\".format(ctx.author.mention, user.mention))\n img = random.choice(self.getreaction(\"smug\", \"0\"))\n embed = discord.Embed(colour=ctx.guild.me.top_role.colour)\n embed.set_image(url=img)\n await ctx.send(embed=embed)" ]
[ "0.6767876", "0.62626845", "0.6151777", "0.6020385", "0.5975807", "0.58423346", "0.57976115", "0.5691685", "0.5685633", "0.5602847", "0.5571166", "0.5512835", "0.5495824", "0.54660666", "0.54644984", "0.54629415", "0.545546", "0.54352176", "0.541161", "0.541123", "0.54102796", "0.54056174", "0.5404639", "0.5386514", "0.5322978", "0.5317937", "0.5279676", "0.5261988", "0.5258057", "0.52341443", "0.52262104", "0.5195311", "0.5192288", "0.51914525", "0.51818746", "0.5148522", "0.5128061", "0.51267433", "0.5119385", "0.51133907", "0.51118904", "0.5107589", "0.5077429", "0.5073764", "0.50505376", "0.5048187", "0.5039228", "0.5022438", "0.50204533", "0.50156796", "0.5013315", "0.5010193", "0.50100887", "0.5005599", "0.5001584", "0.49929893", "0.49926314", "0.49824545", "0.49809122", "0.4978013", "0.4977557", "0.4973888", "0.49727905", "0.49725303", "0.49682838", "0.49445838", "0.49328974", "0.49290794", "0.49273902", "0.49273902", "0.49273902", "0.49273902", "0.4909825", "0.4906944", "0.48966336", "0.48952395", "0.48936552", "0.48918688", "0.48867032", "0.48847488", "0.48773456", "0.48753434", "0.48751134", "0.4874257", "0.48706874", "0.4849195", "0.48443383", "0.48373", "0.48352012", "0.48349604", "0.48328617", "0.48302558", "0.48280418", "0.48270082", "0.4825009", "0.4824528", "0.48206568", "0.48204443", "0.4819694", "0.48170033" ]
0.8043702
0
Shows a random item.
async def star_random(self, ctx): board = self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?", (ctx.guild.id,)).fetchone() item = self.bot.db.execute( "SELECT item_id FROM starboard_items WHERE visible = 1 " \ "ORDER BY random() LIMIT 1" ).fetchone() if not item: return try: board_msg = await self.bot.get_channel(board["channel_id"]).fetch_message(item["item_id"]) except discord.NotFound: return await self.destroy_item(board["channel_id"], item["item_id"]) else: await ctx.send(board_msg.content, embed=board_msg.embeds[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self, item_id):\n pass", "def showItem(category_item_id):\n return render_template('item.html', item=db.findItem(id=category_item_id))", "async def random(self, ctx):\n response = await self.api.random()\n await ctx.send(embed=self._build_embed(response))", "def random_page(request):\n entries = util.list_entries()\n title = random.choice(entries)\n return render(request,\"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(title),\n \"title\": title\n })", "def show_item(self, show_item):\n\n self._show_item = show_item", "def getRandom(request):\n data = Restaurant.objects.all()\n random_restaurant = random.choice(data)\n\n context = {\n \"random_restaurant\" : random_restaurant,\n \"all_restaurants\" : data\n }\n return render(request, 'app_random_templates/random_restaurant.html', context)", "def randomPage(request):\n entries = util.list_entries()\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": random.choice(entries)\n }))", "def show_random_testimonial():\r\n\trandom_testimonial = Highlight.objects.filter(tags='testimonial')\r\n\tif random_testimonial != '':\r\n\t\treturn {'random_testimonial': random_testimonial}\r\n\telse:\r\n\t\treturn 'Nothing Here'", "def random_item_sp(self):\n if random.random() < 0.3:\n self.window.add(self.shorten_paddle, x=self.ball.x+self.objects_length/2, y=self.ball.y)\n self.shorten_paddle_exist = True", "def showItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(Item).filter_by(category_id=category_id).all()\n\n return render_template('items.html', items=items, category=category)", "def show(*args):\n I = Items()\n for arg in args:\n I.add_item(arg)\n I.write()", "def getRandom(self) -> int:\n # print(self.ind)\n return choice(self.items)", "def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)", "def getRandom(self) -> int:\n return random.choice(self.items)", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n # TODO: choose a user randomly\n u = self.uniform_user()\n i = random.choice(self.data[u].indices)\n else:\n i = random.randint(0, self.num_items - 1)\n return i", "def meme_rand():\n img = random.choice(imgs)\n quote = random.choice(quotes)\n path = meme.make_meme(img, quote.body, quote.author)\n return render_template('meme.html', path=path)", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n u = self.uniform_user()\n i = random.choice(self.dataModel.getItemIDsFromUid(u))\n else:\n i = random.randint(0,self.num_items-1)\n return i", "def read_random_item(user_id: int, db: Session = Depends(get_db)):\n # Call function to retrieve a random item of a given user\n return crud.get_random_item(user_id, db)", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n u = self.uniform_user()\n i = random.choice(self.data[u].indices)\n else:\n i = random.randint(0,self.num_items-1)\n return i", "def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)", "def random(self, irc, msg, args, channel):\n contents = self.registryValue('contents', channel)\n irc.reply(random.choice(contents))", "def show_item_details(item_id):\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return render_template('item_details.html', item=item, login_session=login_session)", "def show_item(category, item):\n # Detect login status\n login_status = None\n if 'email' in login_session:\n login_status = True\n # Provide state token to enable Google Sign-In\n state = login_session['state']\n # Query database with SQLAlchemy to show selected category and item\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '), category_id=category.id)\n .one())\n # Render webpage\n return render_template('show_item.html',\n item=item,\n category=category,\n login_status=login_status,\n CLIENT_ID=CLIENT_ID,\n STATE=state)", "def choose_number_random_category(self):\n self.view.choose_number_random_category()", "async def choose(self, ctx, *vargs):\n\n embed = discord.Embed(\n title=\"Choose\",\n description=\"Choices: {}\\nChosen: **{}**\".format(str(vargs), random.choice(vargs)),\n color=discord.Color.blurple()\n )\n await ctx.send(embed=embed)", "def item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to view the item because\n # the item is private and was created by a different user, send a\n # 403\n elif item.private and current_user != item.user:\n abort(403)\n\n return render_template('item.html', item=item)", "async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)", "def show_item_by_id(plugin, item_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + item_id)", "def food_item(request, food_id):\n\n food = get_object_or_404(Nutrition, pk=food_id)\n\n context = {\n 'food': food,\n }\n\n return render(request, 'nutrition/food.html', context)", "def viewItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n item = session.query(Item).filter_by(id=item_id).one()\n return render_template('viewitem.html', sport_id=sport_id, item_id=item_id,\n item=item, sport=sport)", "def randomHelmet():\n return random.choice(HELMETS)", "def put_item_random(self, x, y):\n r = int(random() * 10)\n if 3 < r and r <= 6:\n self.put_fireitem(x, y)\n elif 6 < r and r <= 9:\n self.put_bombitem(x, y)", "def random_item_ds(self):\n if random.random() < 0.3:\n self.window.add(self.double_score, x=self.ball.x+self.objects_length/2, y=self.ball.y)\n self.double_score_exist = True", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def meme_rand():\n img = random.choice(imgs)\n quotes_list_one = random.choice(quotes)\n quote = random.choice(quotes_list_one)\n if quote and img:\n path = meme.make_meme(img, quote.body, quote.author)\n return render_template('meme.html', path=path)\n else:\n path = meme.make_meme('./_data/photos/dog/xander_1.jpg', \"Quote\", \"Author\")\n return render_template('meme.html', path=path)", "async def random(self, ctx: \"IceTeaContext\"):\n random_tag = await ctx.guild_data.get_random_tag()\n if random_tag is None:\n return await ctx.send(\"Unable to find any tags\")\n await ctx.send(random_tag.content)", "def purchase_item(item_id):\n\n item = get_item(item_id)\n\n return render_template('item_page.html', item=item)", "def get_random_item_id():\n items = huuto.get_category_items(1)\n\n return items['items'][0]['id']", "def __str__(self):\n return \"{}_random\".format(self.index)", "async def randompage(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Wiki.randompage', extra={'invoker': ctx.message.author.name})\r\n rn = await self.req({\r\n 'action': 'query',\r\n 'list': 'random',\r\n 'rnlimit': '1',\r\n 'rnnamespace': '0'\r\n })\r\n title = rn['query']['random'][0]['title']\r\n title = title.replace(' ', '_').capitalize()\r\n title = quote(title, safe='/:')\r\n await ctx.send('https://en.scratch-wiki.info/wiki/' + title)", "def render_item_page(self, client_id, state, user_id, user_name, item_id):\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n category = self._db_manager.get_category(item[\"category_id\"])\r\n if category is None:\r\n # this should not happen unless there is a concurrent delete\r\n flash(\"Sorry, something went wrong.\")\r\n return\r\n return render_template(\r\n \"item_view.html\",\r\n client_id=client_id,\r\n state=state,\r\n is_logged_in=user_id is not None,\r\n is_creator=item[\"user_id\"] == user_id,\r\n user_name=user_name,\r\n category=category,\r\n item=item\r\n )", "def show_flashcard():\r\n random_key = choice(list(glossary))\r\n print('Define: ', random_key)\r\n input('Press return to see the correct definition')\r\n print(glossary[random_key])", "def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)", "async def star_show(self, ctx, item: Star):\n board = self.bot.db.execute(\"SELECT * FROM starboards WHERE guild_id = ?\", (ctx.guild.id,)).fetchone()\n try:\n board_msg = await self.bot.get_channel(board[\"channel_id\"]).fetch_message(item[\"item_id\"])\n except discord.NotFound:\n return await self.destroy_item(board[\"channel_id\"], item[\"item_id\"])\n else:\n await ctx.send(board_msg.content, embed=board_msg.embeds[0])", "def display(self, item: Any):\n self.display_widget.clear_output(wait=True)\n with self.display_widget:\n self.display_function(item)\n self.clear() # type: ignore", "def item_detail(request, item_id):\n # Select product based on URL param\n item = SELECT('item', where=f'id = {item_id}', _print=False)\n\n context = {\n 'item': item,\n 'photos': [item['photo_primary']] + item['photos']\n }\n return render(request, 'item_detail.html', context)", "def show_item(self, event):\n\t\tc=self.seqframe\n\t\tbox = c.bbox(CURRENT)\n\t\tx1=box[0]\n\t\ty1=box[1]\n\t\tx2=box[2]\n\t\ty2=box[3]\n\t\titems=[]\n\t\t#make selection rectangle one pixel larger to include rect and text\n\t\titems=c.find_enclosed(x1-1,y1-1,x2+1,y2+1)\n\t\t#get this for recog sequence\n\t\tenzymes=self.RS.enzymes_regexs\n\t\t\n\t\tsfont = tkFont.Font (family='Arial', size=12,weight='bold')\n\t\tfor obj in items:\n\t\t\tc.tag_raise(obj)\n\t\t\t#if item is text, get recog sequence and display\n\t\t\tif 'textlabel' in c.gettags(obj):\n\t\t\t\tname=c.itemcget(obj, 'text')\n\t\t\t\tname=name.rstrip('(+-)')\n\t\t\t\tseq=self.get_sequence(enzymes[name]['realseq'])\n\t\t\t\tobj=c.create_text(x2+2,y1-2,text=seq,tags='recogseqlabel',\n\t\t\t\t\t\t\tfont=sfont,width=120,anchor='nw')\n\t\t\t\tbox = c.bbox(obj)\n\t\t\t\trect = c.create_rectangle(box,tag='recogseqlabel',fill='yellow')\n\t\t\t\tc.lift(obj)", "def getRandom(self) -> int:\n return choice(self.list)", "def grab(self):\n if len(self.location.contents) == 0:\n print('Hate to break it to you, but there\\'s nothing to grab.')\n elif random() >= .75:\n item = self.location.contents[\n randrange(len(self.location.contents))]\n self.inventory.append(item)\n self.location.remove(item)\n print('Nice one, you actually managed to grab the {}! '\n 'I\\'m not even angry, I\\'m impressed.'.format(item))\n else:\n print('Well, at least you flailed in an impressive fashion.')", "def generate_food() -> FoodItem:\n presets = random.choice(FOOD_BANK)\n return FoodItem(presets['name'], presets['hp'], presets['msg'])", "def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)", "def random_entry(): \n\n files = list_entries()\n return random.choice(files)", "async def imgran(self):\r\n search=\"random\"\r\n search = client.gallery()\r\n holder=[]\r\n for d in search:\r\n holder.append(d.link)\r\n await self.bot.say(random.choice(holder))", "def show_pet(self):\n pet = self.pet_factory.get_pet()\n\n print(\"this is a lovely \", pet)\n print(\"It says \", pet.speak())\n print(\"It eats \", self.pet_factory.get_food())", "def show_item_by_effect(plugin, item_id, effect_plugin, effect_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'effect': effect_plugin + ':' + effect_id,\n 'id': plugin + ':' + item_id\n })", "def draw_item(self):\r\n self.screen.blit(self.spawned_item, self.rect)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def show_item_by_damage(plugin, item_id, damage):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'damage': damage,\n 'id': plugin + ':' + item_id\n })", "def show_pet(self):\n pet = self.pet_factory.get_pet()\n print \"We have a lovely {}\".format(pet)\n print \"It says {}\".format(pet.speak())\n print \"We also have {}\".format(self.pet_factory.get_food())", "def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def bonus_food(self):\n self.penup()\n self.shape(\"turtle\")\n self.color(\"red\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Bonus Food {self.x_cordinates} and {self.y_cordinates}\")", "def test_get_single_good_item(test_client):\n\n response = test_client.get(GOOD_ITEM_URL)\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 200\n assert len(data['items']) == 1\n assert data['items'][0]['id'] == 3", "def show_loot(self):\n print(self.name, \"has a total loot of \", str(self.loot))", "def random(self, random_mode):\n resp = yield from self.command('random {}'.format(random_mode))\n return True", "def cmd_gallery_random(client, args):\n gallery_random = client.gallery_random(args.page)\n data = [item.__dict__ for item in gallery_random]\n generate_output({'gallery_random': data}, args.output_file)", "def choose_item():\n print_items()\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == 'blueprint':\n blueprint = ViewMap()\n blueprint.print_map()\n print(\"Type 'back' to go to main menu.\")\n else:\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n else:\n print(\"Type 'back' to go to main menu.\")", "def item_detail(request, slug):\n\n item = get_object_or_404(Item, slug=slug)\n\n context = {\n 'item': item,\n }\n\n return render(request, 'items/item_detail.html', context)", "def test(request):\n realm = Realm.objects.filter(house=92)[0]\n shuffled_auctions = realm.auctions.all().order_by('?')\n for auction in shuffled_auctions:\n recipes = auction.item.recipes.all()\n if recipes:\n random_recipe = recipes[0]\n break\n # random_auction = realm.auctions.all().order_by('?')[counter]\n\n market_stats = random_recipe.market_stats(realm)\n min_auction = market_stats.order_by('buyout_min')[0]\n min_buyout = min_auction.buyout_min / min_auction.quantity\n avg_price = random_recipe.market_avg_buyout(realm)['market_avg_buyout']\n\n return render(request, \"test.html\", {\n 'recipe': random_recipe,\n 'min_buyout': min_buyout,\n 'average': avg_price\n })", "def getRandom(self) -> int:\n return random.choice(self.store_list)", "def rand_product_list(request):\n\n all_products = list(Product.objects.all())\n rand_products = random.sample(all_products, 7)\n\n context = {\n 'rand_products': rand_products,\n }\n return context", "def random_head():\n x = random.randint(1,3)\n if x==1: \n head_hearteyes()\n elif x==2:\n head_surprised()\n else:\n head_plain()", "def getRandom(self) -> int:\n from random import choice\n return choice(self.list)", "def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()", "def select_item(items, weights, k):\n x = random.choices(items, weights=weights, k=k)\n return x", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def show_list():\n clear_screen()\n print(\"Here is your list: \")\n\n for index, item in enumerate(shopping_list, start = 1):\n print(\"{}. {}\".format(index, item))\n\n print(\"-\" * 10)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def feed(self):\n self.health += random.randrange(1, 3) # food increases heath by (1, 3)\n if self.health > self.MAX_HEALTH: # if health exceeds the max, set it to the max\n self.health = self.MAX_HEALTH\n print(self.name, \"ate food\")\n self.show_health()", "def show_lucky_num():\r\n num = random.randint(1, 10)\r\n\r\n return render_template(\"lucky.html\", lucky_num=num, msg=\"Hello this is a message!\")", "def show(self) -> None:", "def get_random_painting():\n item = db.session.query(Painting).order_by(func.random()).first()\n return item", "def print_random_word_from(words):\n i = random.randint(0, len(words))\n print(\"The selected word has index {} and is {}.\".format(i, words[i]))\n return i", "def randitems(n, obj='ndarray', mode=None, char=None):\n if mode is None:\n mode = choice(cap[obj][MODE])\n if char is None:\n char = choice(tuple(fmtdict[mode]))\n multiplier = choice(cap[obj][MULT])\n fmt = mode + '#' + char * int(multiplier if multiplier else 1)\n items = gen_items(n, fmt, obj)\n item = gen_item(fmt, obj)\n fmt = mode.strip('amb') + multiplier + char\n return fmt, items, item", "def randitems(n, obj='ndarray', mode=None, char=None):\n if mode is None:\n mode = choice(cap[obj][MODE])\n if char is None:\n char = choice(tuple(fmtdict[mode]))\n multiplier = choice(cap[obj][MULT])\n fmt = mode + '#' + char * int(multiplier if multiplier else 1)\n items = gen_items(n, fmt, obj)\n item = gen_item(fmt, obj)\n fmt = mode.strip('amb') + multiplier + char\n return fmt, items, item", "def getRandom(self) -> int:\n return random.choice(self._list)", "def getRandom(self):\n return random.choice(self.ls)", "def __str__(self):\n return \"Item('\"+ self.get_id() + \"')\"", "def DoAction(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n itemDex = selections[0]\r\n item = self.items[itemDex]\r\n self.data.action(item)", "def randomPostToScreen():\n\ttxt_files = filter(lambda x: x.endswith('.txt'), os.listdir(bookpath))\n\tbook = random.choice(txt_files)\n\tprint generatePost(os.path.join(bookpath,book))\n\tprint '----------------------------'", "def random(self, random: bool=None):\n self._select_interface(self._rc_random, self._http_random, random)", "def choose_menu(self, n):\n self.view.menu_chosen(n, uw.Button(MENUS[n]))", "def get_item_detail(item_id):\n pass", "def _createItem(self, rpcObject):\n item = ShowWidgetItem(rpcObject, self)\n return item", "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))", "def choose(message, items=[], timeout=0):\n box = Choose(__name__, message)\n box.timeout = timeout\n box.items = items\n return box.show()", "async def show_card(self, ctx, card: dict):\r\n emb = discord.Embed(\r\n title=card['name'],\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=card['desc']\r\n )\r\n emb.set_footer(text='Use [p]domt info for list of all cards.')\r\n emb.set_image(url=card['img'])\r\n await ctx.send(embed=emb)", "def display_item(categories, item, item_id, initial_category_id):\n if item:\n # Item already exists - display on page\n return render_template('item_edit.html', item_id=item_id, item_name=item.Item.name,\n item_description=item.Item.description, item_category=item.Item.category,\n item_category_id=item.Item.category_id, categories=categories,\n login_session=login_session,\n csrf_token=generate_csrf_token())\n else:\n print('initial_category_id', initial_category_id)\n # Default fields for creating a new item\n return render_template('item_edit.html', item_id=0, item_name=\"\",\n item_description=\"\", item_category=\"\",\n item_category_id=initial_category_id, categories=categories,\n login_session=login_session, initial_category_id=initial_category_id,\n csrf_token=generate_csrf_token())" ]
[ "0.6923737", "0.6476034", "0.6428087", "0.64086896", "0.6310771", "0.6269286", "0.61964685", "0.6074963", "0.6051741", "0.60236067", "0.6007453", "0.59758556", "0.5961581", "0.59291637", "0.5912427", "0.58934987", "0.5885808", "0.5869903", "0.5858481", "0.58508587", "0.5845293", "0.584279", "0.58191395", "0.5787693", "0.57860583", "0.57735413", "0.5753484", "0.57494754", "0.5711047", "0.56983495", "0.56890476", "0.5680016", "0.56625646", "0.56457895", "0.56254417", "0.5607143", "0.5599512", "0.5592554", "0.5587714", "0.5575432", "0.5569679", "0.55611616", "0.55532014", "0.5552918", "0.5539001", "0.55060196", "0.5501518", "0.5499682", "0.5478945", "0.5476132", "0.54707026", "0.54563326", "0.54486305", "0.5445856", "0.5435494", "0.5431376", "0.54059196", "0.54059196", "0.54059196", "0.54059196", "0.5397152", "0.5391549", "0.5385789", "0.5376628", "0.53690106", "0.5359979", "0.53515726", "0.5348384", "0.5345693", "0.5329487", "0.5323102", "0.5320764", "0.5311399", "0.53044194", "0.52917063", "0.5287163", "0.52740484", "0.52706665", "0.52623045", "0.52476734", "0.52461994", "0.52447164", "0.5231418", "0.52290136", "0.52243763", "0.5221551", "0.5221551", "0.5213131", "0.5208773", "0.5204166", "0.5201343", "0.5201063", "0.51979935", "0.51881164", "0.51853573", "0.5183676", "0.5179931", "0.5176985", "0.51729596", "0.51703686" ]
0.6175574
7
Enables/disables DM when your message was stared. If the parameter is not given, this returns current status. Can be used anywhere including DM.
async def star_dm(self, ctx, enable: bool = None): if enable is None: result = self.bot.db.execute("SELECT starboard_dm FROM users WHERE user_id = ?", (ctx.author.id,)).fetchone() enabled = result["starboard_dm"] if result else 0 status_str = ctx._(f"star.dm{['Disabled', 'Enabled'][enabled]}") return await ctx.say("star.dmCurrent", status_str) self.bot.db.execute("UPDATE users SET starboard_dm = ? WHERE user_id = ?",( int(enable), ctx.author.id )) status_str = ctx._(f"star.dm{['Disabled', 'Enabled'][enable]}") return await ctx.say("star.dmCurrent", status_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)", "async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")", "async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)", "def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "def toggle(self):\n s = self.status()\n if s == self.POWER_OFF:\n self.on()\n else:\n self.off()\n return self.status()", "async def pmguard(message: Message):\n global pmCounter # pylint: disable=global-statement\n if Config.ALLOW_ALL_PMS:\n Config.ALLOW_ALL_PMS = False\n await message.edit(\"`PM_guard activated`\", del_in=3, log=__name__)\n else:\n Config.ALLOW_ALL_PMS = True\n await message.edit(\"`PM_guard deactivated`\", del_in=3, log=__name__)\n pmCounter.clear()\n await SAVED_SETTINGS.update_one(\n {'_id': 'PM GUARD STATUS'}, {\"$set\": {'data': Config.ALLOW_ALL_PMS}}, upsert=True)", "def getStatus(self):\n return self.enabled", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "async def greeter_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"greeter_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Greeter is now **enabled**\")\n else:\n await util.send_success(ctx, \"Greeter is now **disabled**\")", "async def async_turn_on(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()", "def enable(self):\n return self._packet.get('enable', False)\n\n # TODO: TCONT and GEM lists", "async def dmsettings(self, ctx):\n\n emojify_settings = self.bot.get_cog(\"Server\").emojiy_settings\n settings = await self.ensure_dm_settings(ctx.author.id)\n\n mute_kick_ban = emojify_settings(settings['ban_kick_mute'])\n leg_session_open = emojify_settings(settings['leg_session_open'])\n leg_session_update = emojify_settings(settings['leg_session_update'])\n leg_session_submit = emojify_settings(settings['leg_session_submit'])\n leg_session_withdraw = emojify_settings(settings['leg_session_withdraw'])\n\n embed = self.bot.embeds.embed_builder(title=f\"Direct Messages for {ctx.author.name}\",\n description=f\"Check `{config.BOT_PREFIX}help dms` for help on \"\n f\"how to enable or disable these settings.\\n\\n\"\n f\"{mute_kick_ban} DM when you get muted, kicked or banned\\n\"\n f\"{leg_session_open} \"\n f\"*({self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} Only)* DM when \"\n f\"a Legislative Session opens\\n\"\n f\"{leg_session_update} \"\n f\"*({self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} Only)* DM when \"\n f\"voting starts for a Legislative Session\\n\"\n f\"{leg_session_submit} \"\n f\"*({self.bot.mk.LEGISLATURE_CABINET_NAME} Only)* DM when \"\n f\"someone submits a Bill or Motion\\n\"\n f\"{leg_session_withdraw} \"\n f\"*({self.bot.mk.LEGISLATURE_CABINET_NAME} Only)* DM when \"\n f\"someone withdraws a Bill or Motion\\n\",\n has_footer=False)\n await ctx.send(embed=embed)", "def do(self):\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = \"\"\n property_val = this_server.read_property(\"SdpMasterFQDN\")[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(\n const.CMD_Disable, None, self.disable_cmd_ended_cb\n )\n self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)\n this_server.write_attr(\n \"activityMessage\", const.STR_DISABLE_CMS_SUCCESS, False\n )\n\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f\"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}\"\n tango.Except.re_throw_exception(\n dev_failed,\n const.ERR_INVOKING_CMD,\n log_msg,\n \"SdpMasterLeafNode.DisableCommand()\",\n tango.ErrSeverity.ERR,\n )", "def get_status(self):\n return super(Cabling, self).get_status()", "def enabled(self):\n return self._packet.get('enabled', True)", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def sms_disabled(self):\n return self._sms_disabled", "def lock_status(self) -> Dict[str, str]:\n self.__logger.debug('Eva.lock_status called')\n return self.__http_client.lock_status()", "async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)", "def enabled(self):\n return self._get('enabled')", "async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return", "async def _cmdf_pmenable(self, substr, msg, privilege_level):\n enabled_str = None\n if utils.str_says_true(substr) or (len(substr) == 0):\n self._pm_msg_isenabled = True\n enabled_str = \"enabled.\"\n else:\n self._pm_msg_isenabled = False\n enabled_str = \"disabled.\"\n self._save_settings()\n\n buf = \"PM greetings is now \" + enabled_str\n await self._client.send_msg(msg, buf)\n return", "async def set_enabled(self, enabled: bool) -> None:\n return await self.api.set_enabled(enabled)", "def enablement_state(self):\n return self.__enablement_state", "def on_message(self, msg):\n self.enabled = (msg == \"ON\")\n self.log.info(\"%s received %s command for logic actuator\",\n self.name, \"enable\" if self.enabled else \"disable\")", "def get_status():\n return ('off', 'off')", "async def legsessionvoting(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"leg_session_update\")\n\n if new_value:\n message = f\":white_check_mark: You will now receive DMs when you are \" \\\n f\"a {self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} \" \\\n f\"and voting starts for a Legislative Session.\"\n else:\n message = f\":white_check_mark: You will no longer receive DMs when you are \" \\\n f\"a {self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} \" \\\n f\"and voting starts for a Legislative Session.\"\n\n await ctx.send(message)", "def status(self):\n ret = self.dev.ctrl_transfer(0xc0, 0x01, 0x0081, 0x0000, 0x0001)\n if ret[0] == 0xa0:\n return self.POWER_ON\n return self.POWER_OFF", "def getSafetyMute(self, unitCode=0):\n resp = self.XAPCommand('SFTYMUTE', unitCode=unitCode)\n return bool(int(resp))", "async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)", "def send_status(self):\n self.data = {\n 'value': '',\n 'state': self.state,\n }\n event_manager.device_changed(self)", "def toggle(self, id):\n e = self.objectmanager.objects.get(id=id)\n e.enabled = not e.enabled\n e.save()\n return render({\"id\": id, \"status\": e.enabled})", "def _get_admin_status(self):\n return self.__admin_status", "def set_lock_status(use_lock):\r\n get_lock.lock_is_enabled = use_lock", "def enable_health(self):\n return self._enable_health", "def enable(self):\n hoomd.util.print_status_line();\n\n if self.enabled == False:\n hoomd.context.msg.error(\"you cannot re-enable DCD output after it has been disabled\\n\");\n raise RuntimeError('Error enabling updater');", "def get_disabled_switch(self):\n return self.disabled", "def enable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/enable\")\n\t\treturn bool(response.json[\"success\"])", "def enabled(self):\n return self.__enabled", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")", "def setModerationStatus(self, status):\n kSetStatus(self, status)", "def setSyncMode(self, IsPauseOn = True):\n self._IsPauseOn = IsPauseOn", "def get_mute(self):\n return on_off_bool(self.get(COMMAND_UIC, 'GetMute')['mute'])", "def is_on(self):\n return self._data[\"enabled\"]", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def isMuted(self):\n return self._isMuted", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n self._previous_mic_level = self.device.mic_volume\n self._previous_record_mode = self.device.recording_settings.mode\n await self.device.set_privacy(True, 0, RecordingMode.NEVER)", "def setEnabled(self, status):\r\n self._status = status\r\n\r\n if status:\r\n self._start()\r\n else:\r\n self._stop()\r\n\r\n for cb in self._statusListener:\r\n cb(self, status)", "def is_on(self):\n return getattr(self._node, STICK_API[USB_MOTION_ID][ATTR_STATE])", "def set_fedcm_delay_enabled(self, enabled):\n pass", "async def levelup(self, ctx, value: bool):\n await queries.update_setting(ctx, \"guild_settings\", \"levelup_messages\", value)\n self.bot.cache.levelupmessage[str(ctx.guild.id)] = value\n if value:\n await util.send_success(ctx, \"Level up messages are now **enabled**\")\n else:\n await util.send_success(ctx, \"Level up messages are now **disabled**\")", "def enable(self, message):\n self.Enable()", "async def starboard_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"starboard_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Starboard is now **enabled**\")\n else:\n await util.send_success(ctx, \"Starboard is now **disabled**\")\n await self.bot.cache.cache_starboard_settings()", "def set_enabled_from_policy(self, instance, service_status, action_name):\n fixture_class = self.get_fixture_class(id=instance.class_id, verify_exists=False)\n if not fixture_class:\n logger.error(\"Fixture class %s for instance %s no longer exists. This should not happen.\", instance.class_id, instance)\n return\n\n action_def = fixture_class.actions.get(action_name) or {}\n if service_status == ServiceStatus.SUCCESS:\n policy = action_def.get('on_success', '')\n elif service_status == ServiceStatus.FAILED:\n policy = action_def.get('on_failure', '')\n else:\n return\n\n new_enabled_state = None\n if 'disable' in policy:\n new_enabled_state = False\n elif 'enable' in policy:\n new_enabled_state = True\n else:\n return\n\n if instance.enabled != new_enabled_state:\n # If we went from enabled->disabled set the disable_reason\n # If we went from disabled->enabled clear the disable_reason\n # If it was already disabled (disable->disable), the original disable_reason will be presevered\n instance.enabled = new_enabled_state\n action_result = 'failed' if service_status == ServiceStatus.FAILED else 'successful'\n if new_enabled_state:\n instance.disable_reason = ''\n logger.info(\"Re-enabled %s from %s '%s' action\", instance, action_result, action_name)\n else:\n logger.info(\"Disabled %s from %s '%s' action\", instance, action_result, action_name)\n instance.disable_reason = \"Disabled from {} '{}' action\".format(action_result, action_name)\n else:\n logger.info(\"%s 'enabled' state is already: %s. No update to disable_reason\", instance, instance.enabled)", "def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def enabled(self):\n return self._enabled", "def send_states(self):\n\n teleop_enabled_msg = Bool()\n teleop_enabled_msg.data = self.teleop_enabled\n\n assisted_driving_enabled_msg = Bool()\n assisted_driving_enabled_msg.data = self.assisted_driving_enabled\n\n self.teleop_enabled_pub.publish(teleop_enabled_msg)\n self.assisted_driving_enabled_pub.publish(assisted_driving_enabled_msg)", "async def async_turn_on(self):\n await self.async_mute_volume(False)", "def smart_status(self) -> SmartSsdSmartStatus:\n return self._smart_status", "def enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enabled\")", "def status(self,axis):\n \n if not self.enabled:\n return (False,False)\n \n enabled = True\n self.send_cmd(axis, ' PRINT MVG')\n\n flag = self.ser.read(100)\n moving = True\n \n if flag[:4] == b'FALS': \n moving = False\n elif flag[:4] == b'TRUE':\n moving = True\n\n non_moving = not moving\n return (enabled, non_moving)", "def muted(self) -> bool:\n return self._muted", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def _get_enable(self):\n return self.__enable", "def _get_enable(self):\n return self.__enable", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "async def admin_enable(self, ctx: commands.Context):\n if ctx.guild.id in self.guilds:\n await ctx.send('Team management is already enabled in this guild.')\n return\n await self._enable_guild(guild=ctx.guild)\n await ctx.send('Team management enabled.')", "def setDelayStatus(self, channel, isEnabled, unitCode=0):\n resp = self.XAPCommand('DELAYSEL', channel, (1 if isEnabled else 0), unitCode=unitCode)\n return bool(int(resp))", "async def async_turn_on(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"on\"):\n self._is_on = True\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "def change_status(self):\n if self.status == \"Still Loaned\":\n self.status = \"Given Back\"\n else:\n self.status = \"Still Loaned\"", "def _get_status(self):\n held_msg=\"\"\n return u'%s%s' % (self.get_status_display(), held_msg)", "def update_remediation_status(self, status):\n self.remediation_status = status", "async def goodbye_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"goodbye_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Goodbye messages are now **enabled**\")\n else:\n await util.send_success(ctx, \"Goodbye messages are now **disabled**\")", "def status(self, status):\n allowed_values = [\"REQUESTED\", \"CREATE_IN_PROGRESS\", \"AVAILABLE\", \"UPDATE_IN_PROGRESS\", \"UPDATE_REQUESTED\", \"UPDATE_FAILED\", \"CREATE_FAILED\", \"ENABLE_SECURITY_FAILED\", \"PRE_DELETE_IN_PROGRESS\", \"DELETE_IN_PROGRESS\", \"DELETE_FAILED\", \"DELETE_COMPLETED\", \"STOPPED\", \"STOP_REQUESTED\", \"START_REQUESTED\", \"STOP_IN_PROGRESS\", \"START_IN_PROGRESS\", \"START_FAILED\", \"STOP_FAILED\", \"WAIT_FOR_SYNC\", \"MAINTENANCE_MODE_ENABLED\"]\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\"\n .format(status, allowed_values)\n )\n\n self._status = status", "def set_media_volume_sync(self, dut_name, enable=True):\n try:\n if self.phone_info.phone_type == PhoneType.ANDROID and 'SM' in self._get_android_phone_model():\n is_bt_connected_to_device = self.bt_is_connected_to(dut_name)\n if not is_bt_connected_to_device:\n logger.debug(\n 'For phone found that DUT {} is not connected with {} , '\n 'So Media Volume Sync option is not available '.format(\n dut_name,\n self.phone_info.bluetooth_name))\n return False\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_more_options,\n 5)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_more_options, 0).click()\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.media_volume_text,\n 10)\n self.find_element(self.driver.appium_driver,\n self.media_volume_text, 0).click()\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.media_volume_sync_switch,\n 10)\n\n volume_sync_switch = self.find_element(\n self.driver.appium_driver, self.media_volume_sync_switch,\n 0)\n\n # Now click that button if we're in the wrong state.\n is_media_volume_sync = self._toggle_switch(volume_sync_switch,\n enable)\n self.driver.appium_driver.back()\n logger.debug(\n \"Media Volume option is set to {} on connected bluetooth devices {}\".format(\n enable, dut_name))\n return is_media_volume_sync\n logger.warning(\n \"Media Volume Sync Option is not available on {} connected bluetooth devices\".format(\n self.phone_info.bluetooth_name))\n except Exception as e:\n logger.warning(\n \"Could not enable/disable Media Volume Sync on connected mobile devices {}\"\n .format(self.phone_info.bluetooth_name))\n logger.warning(repr(e))\n return False", "async def enabled(self) -> bool:\n response = await self.adguard.request(\"parental/status\")\n return response[\"enabled\"]", "def locked(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n self._lock_details = out[get_key(zonekeys.SESSION, self._SW_VER)]\n if \"none\" in self._lock_details:\n return False\n else:\n return True", "def notify_kicked(self):\n self.is_kicked = True", "def update_status(self):\n if self.pwm:\n if self.state == GPIO.HIGH:\n thread = threading.Thread(target=self._pwm_on, args=())\n thread.start()\n elif self.state == GPIO.LOW:\n thread = threading.Thread(target=self._pwm_off, args=())\n thread.start()\n else:\n GPIO.output(self.id_, self.state)\n\n return self.get_status()", "async def enable(self, ctx):\n self.bot.db.execute(\"UPDATE starboards SET enabled = 1 WHERE channel_id = ?\", (ctx.channel.id,))\n await ctx.say(\"star.enabled\")", "def EnableSendTriggeredGroupStatLearnedInformation(self):\n\t\treturn self._get_attribute('enableSendTriggeredGroupStatLearnedInformation')", "def status(self):\r\n return not self.sendQuery(\"isMoving\",\"isMoving\")", "def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def enabled(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enabled\")", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)" ]
[ "0.64651364", "0.59721977", "0.5788894", "0.5637827", "0.559156", "0.55866206", "0.5582838", "0.5560499", "0.5545492", "0.5535075", "0.55287397", "0.5520025", "0.5509821", "0.5500232", "0.5459743", "0.54503405", "0.544722", "0.54312956", "0.5386221", "0.5380486", "0.53682333", "0.5346718", "0.5323133", "0.5304755", "0.53033173", "0.5300116", "0.5281752", "0.528049", "0.5278697", "0.52782613", "0.526745", "0.52552885", "0.5233725", "0.523145", "0.5218137", "0.5214816", "0.5212047", "0.52065957", "0.5206042", "0.52041024", "0.51971066", "0.5189693", "0.5186876", "0.518141", "0.5179687", "0.5177347", "0.5174404", "0.5170576", "0.5150554", "0.514768", "0.5147378", "0.51420337", "0.51403934", "0.51285785", "0.5127931", "0.5125424", "0.5124359", "0.5124359", "0.5124359", "0.5124359", "0.5124359", "0.5124359", "0.5124359", "0.5124359", "0.51205325", "0.5118141", "0.51134455", "0.51133496", "0.5108298", "0.51079273", "0.5107853", "0.5106848", "0.5106848", "0.51000166", "0.5093888", "0.5090663", "0.5088644", "0.50878495", "0.5082774", "0.5080184", "0.5080046", "0.50790864", "0.5073793", "0.5073061", "0.50710475", "0.5068851", "0.50657576", "0.5062268", "0.505858", "0.50564826", "0.5054494", "0.5036137", "0.5036137", "0.5036137", "0.5036137", "0.5036137", "0.5036137", "0.5036137", "0.5036137", "0.50302756" ]
0.6530001
0
Return the minimum and maximum values in the input.
def minmax(seq, *, key=lambda x: x): iterator1, iterator2 = tee(seq) return MinMax(min(iterator1, key=key), max(iterator2, key=key))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_max(xs):\n return min(xs), max(xs)", "def get_min_max(ints):\n if len(ints) <= 0:\n return ()\n min_value = ints[0]\n max_value = ints[0]\n for i in range(len(ints)):\n temp = ints[i]\n if temp <= min_value:\n min_value = temp\n if temp >= max_value:\n max_value = temp\n output = (min_value, max_value)\n# print(\"output: \", output)\n return output\n pass", "def minmax(xs):\n min_val = None\n max_val = None\n for x in xs:\n if min_val is None or x < min_val:\n min_val = x\n if max_val is None or x > max_val:\n max_val = x\n return (min_val, max_val)", "def get_min_max(ints):\r\n if len(ints) == 0:\r\n return None\r\n max = ints[0]\r\n min = ints[0]\r\n\r\n for int in ints:\r\n if int < min:\r\n min = int\r\n if int > max:\r\n max = int\r\n \r\n return min, max", "def get_min_max(ints):\n if ints is None or len(ints) == 0:\n return None\n \n min_value = ints[0]\n max_value = ints[0]\n\n for value in ints:\n if value < min_value:\n min_value = value\n\n if value > max_value:\n max_value = value\n\n return (min_value, max_value)", "def min_max(items):\n return min(items), max(items)", "def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans", "def get_min_max(ints):\n if not ints:\n return\n max = ints[0]\n min = ints[0]\n\n\n for i in ints:\n if i > max:\n max = i\n if i < min:\n min = i\n return (min, max)", "def get_min_max(ints):\n if not ints:\n return None, None\n if len(ints) ==None:\n return None\n min_val = float(\"inf\")\n max_val = -float(\"inf\")\n # for each int in ints if update max_val and min_val accordingly\n for integer in ints:\n if integer > max_val:\n max_val = integer\n\n if integer < min_val:\n min_val = integer\n \n return (min_val, max_val)", "def min_max(input):\r\n return tuple(sorted(input)[:1]+sorted(input)[-1:]) # write a line of code to return containing min and max\r\n #tuple(input[:1]+input[-1:]) --------------- it works for the sorted lists\r\n #tuple(sorted(input)[:1]+sorted(input)[-1:]) ---------------it works for any input---slicing lists and concatinating\r\n #tuple(sorted(input))[:1]+tuple(sorted(input))[-1:]----------------it works same as the above----slicing tuples and concatinating them\r", "def find_min_max(x):\n if not x: return None\n if len(x) == 1:\n return x[0], x[0] # the first is min and the second is max\n min_val = x[0]\n max_val = x[0]\n for i in xrange(1, len(x)):\n if x[i] < min_val:\n min_val = x[i]\n elif x[i] > max_val:\n max_val = x[i]", "def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])", "def minmax(data):\n smallest = data[0]\n largest = data[0]\n\n for i in range(0,len(data)):\n if data[i] < smallest:\n smallest = data[i]\n elif data[i] > largest:\n largest = data[i]\n\n return(smallest,largest)", "def get_min_max(ints):\n if ints == None or len(ints) == 0:\n return None\n if len(ints) == 1:\n return (ints[0],ints[0])\n \n max = ints[0]\n min = ints[0]\n for i in range(1, len(ints)):\n if ints[i] > max:\n max = ints[i]\n if ints[i] < min:\n min = ints[i]\n return (min,max)", "def get_min_max(ints):\n\n if len(ints) == 0:\n return (None,None)\n min = ints[0]\n max = ints[0]\n for x in range(1,len(ints)):\n if ints[x] > max:\n max=ints[x]\n elif ints[x] < min:\n min = ints[x]\n return (min,max)", "def get_min_max(ints):\n print(f\"input arr is {ints}\")\n max=0\n min=len(ints)-1\n for i in range(1,len(ints)):\n if ints[i]>ints[max]:\n temp=ints[i]\n ints[i]=ints[max]\n ints[max]=temp\n if ints[i]<ints[min]:\n temp=ints[i]\n ints[i]=ints[min]\n ints[min]=temp\n #print(f\"max value is {ints[max]}\")\n #print(f\"min value is {ints[min]}\")\n return(ints[min],ints[max])", "def min_max(lst):\r\n my_min = None\r\n my_max = None\r\n for num in lst:\r\n if (my_min and my_max) is not None:\r\n # recalculate running min and max:\r\n if num < my_min:\r\n my_min = num\r\n continue\r\n if num > my_max:\r\n my_max = num\r\n else:\r\n my_min = num\r\n my_max = num\r\n ans = (my_min, my_max)\r\n return ans", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", "def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv", "def minMaxFonc(liste):\n\n return min(liste), max(liste)", "def get_min_max(nums):\n assert(type(nums) == list), \"nums has to be a list\"\n assert(len(nums) > 0), \"get_min_max() arg is an empty sequence\"\n min_ = nums[0]\n max_ = nums[0]\n for n in nums:\n assert(type(n) == int), \"numbers in the list have to be an integer\"\n if n < min_:\n min_ = n\n if n > max_:\n max_ = n\n return (min_, max_)", "def min_max(my_list):\n print(\"Min = \",min(my_list,key = abs))\n print(\"Max = \",max(my_list,key = abs))", "def min_max(arr: StaticArray) -> ():\n minimum = arr.get(0) # sets min to first element\n maximum = arr.get(0) # sets max to first element\n # iterate over the elements in the array to check for < or >\n for index in range(arr.size()):\n if arr[index] < minimum: # if element is less than the current min, min = new element\n minimum = arr[index]\n elif arr[index] > maximum: # if element is greater than the current max, max = new element\n maximum = arr[index]\n return minimum, maximum", "def get_minmax(self, stmt, slist):\n minel = maxel = None\n for s in slist:\n if s.keyword == \"min-elements\":\n minel = s.arg\n elif s.keyword == \"max-elements\":\n maxel = s.arg\n if minel is None:\n minst = stmt.search_one(\"min_elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max_elements\")\n if maxst:\n maxel = maxst.arg\n return (minel, maxel)", "def get_min_max(ints):\n if len(ints) == 0:\n return None, None\n \n min_e = ints[0]\n max_e = ints[-1]\n for e in ints:\n if isinstance(e, int) == False: # if the list includes non-integer number, do not find min, max \n return None,None\n if e < min_e:\n min_e = e\n if e > max_e:\n max_e = e\n return min_e, max_e", "def range(x):\n try:\n return (min(min(y) for y in x), max(max(y) for y in x))\n except ValueError:\n return (None, None)", "def minMaxBoucle(liste):\n minimum = float(\"inf\")\n maximum = -float(\"inf\")\n\n for index in range(0, 5):\n liste[index] = int(liste[index])\n if liste[index] > maximum:\n maximum = liste[index]\n if liste[index] < minimum:\n minimum = liste[index]\n return minimum, maximum", "def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes", "def getValidRatingInputs(self):\n min = self.minRatingInput.get()\n max = self.maxRatingInput.get()\n\n try:\n min = int(min)\n except ValueError:\n min = 0\n\n try:\n max = int(max)\n except ValueError:\n max = 100\n\n return min, max", "def findmaxmin(input_file):\n\tE_list = sub.check_output(\"check_maxmin.py {}\".format(input_file), shell=True).decode(\"utf-8\")\n\tEmax = float(re.search(r\"Maximum\\s*:\\s*(([+-]|\\s)\\d*\\.\\d+)\", E_list).group(1))\n\tEmin = float(re.search(r\"Minimum\\s*:\\s*(([+-]|\\s)\\d*\\.\\d+)\", E_list).group(1))\n\treturn Emax, Emin", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)", "def min_max_date(self, min, max, date):\n\t\tif not min or min > date:\n\t\t\tmin = date\n\n\t\tif not max or max < date:\n\t\t\tmax = date\n\n\t\treturn min, max", "def find_max_min(number):\n if max(number) == min(number):\n return [len(number)]\n return [min(number), max(number)]", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest, lowest)", "def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]", "def calculate_avg_min_max(temps):\n\n temp_average = sum(temps) / len(temps)\n return temp_average, min(temps), max(temps)", "def input_bounds(self):\n return self._min_input, self._max_input", "def get_min_max_tuple(min_max_tuple, value):\n min_v, max_v = min_max_tuple\n\n min_v = smart_min(min_v, value)\n max_v = smart_max(max_v, value)\n\n return (min_v, max_v)", "def _set_min_max_values(self):\n\n p_1, p_2 = self.points[0], self.points[1]\n nb_dim = len(p_1.values)\n self._min_values = []\n self._max_values = []\n for d in range(nb_dim):\n d_min = min(p_1[d], p_2[d])\n d_max = max(p_2[d], p_2[d])\n self._min_values.append(d_min)\n self._max_values.append(d_max)", "def find_min_max_fast(x):\n import math\n if not x: return None\n if len(x) == 1:\n return x[0], x[0] # the first is min and the second is max\n min_val, max_val = min_max(x[0], x[1])\n for i in xrange(2, (len(x) - 1), 2):\n smaller, larger = min_max(x[i], x[i+1])\n min_val = min(smaller, min_val)\n max_val = max(larger, max_val)\n if len(x) & 1:\n # the length of x is odd.\n min_val = min(x[-1], min_val)\n max_val = max(x[-1], max_val)\n return min_val, max_val", "def minMax(x):\r\n\r\n iMinMax = [0 for i in range(2)]\r\n\r\n num = len(x)\r\n #//System.out.println(\"MinMax: num: \" + num);\r\n\r\n iMin = 0\r\n iMax = 0\r\n min = x[iMin]\r\n max = x[iMax]\r\n\r\n for i in range(1, num):\r\n\r\n #//System.out.println(\"MinMax: i , current min, x : \" + i + \" \" + min + \" \" + x[i]);\r\n if (x[i] < min): \r\n #//System.out.println(\"MinMax: new min: if branch triggered\" );\r\n min = x[i]\r\n iMin = i\r\n \r\n #//System.out.println(\"MinMax: new min: \" + min);\r\n\r\n if (x[i] > max): \r\n max = x[i]\r\n iMax = i\r\n \r\n\r\n \r\n #//System.out.println(\"MinMax: \" + iMin + \" \" + iMax);\r\n\r\n iMinMax[0] = iMin\r\n iMinMax[1] = iMax\r\n\r\n return iMinMax", "def find_maximum_and_minimum(file_name: str) -> Tuple[int, int]:\n min_number = max_number = None\n with open(file=file_name, mode=\"tr\") as file:\n\n for line in file:\n number = int(line)\n\n if min_number is None:\n min_number = max_number = number\n\n elif number > max_number:\n max_number = number\n\n elif number < min_number:\n min_number = number\n\n return min_number, max_number", "def range(series):\n return min(series), max(series)", "def get_extrema(list):\n max_x = max(list,key=lambda item:item[0])[0]\n max_y = max(list,key=lambda item:item[1])[1]\n min_x = min(list,key=lambda item:item[0])[0]\n min_y = min(list,key=lambda item:item[1])[1]\n return (min_x, max_x, min_y, max_y)", "def find_maximum_and_minimum(file_name: str) -> Tuple[int, int]:\n with open(file_name) as fi:\n min = int(fi.readline())\n max = min\n for line in fi:\n line = int(line.rstrip('\\n')) # удаление переноса строки\n if min >= line: # и перевод оставшейся части в число\n min = line\n elif max <= line:\n max = line\n return min, max", "def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))", "def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum", "def minMax2(x):\r\n\r\n iMinMax = [0 for i in range(2)]\r\n num = len(x)[0]\r\n\r\n iMin = 0\r\n iMax = 0\r\n\r\n #// Search for minimum and maximum in row 0 - linear values:\r\n min = x[0][iMin]\r\n max = x[0][iMax]\r\n\r\n for i in range(1, num):\r\n\r\n if (x[0][i] < min): \r\n min = x[0][i]\r\n iMin = i\r\n \r\n\r\n if (x[0][i] > max): \r\n max = x[0][i]\r\n iMax = i\r\n \r\n iMinMax[0] = iMin\r\n iMinMax[1] = iMax\r\n\r\n return iMinMax", "def minmax(self):\r\n vx = [v[0] for v in self.vl]\r\n vy = [v[1] for v in self.vl]\r\n self.xmax, self.xmin = max(vx), min(vx)\r\n self.ymax, self.ymin = max(vy), min(vy)", "def find_minmax(lims, olims):\n\n limzip = zip(list(lims), list(olims), [np.min, np.max])\n return tuple([float(fn([l, ol])) for l, ol, fn in limzip])", "def min_max(self, data, era):\n return 0, np.max(data)", "def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value", "def getMaxima(x, y):\n# mx_x = (np.abs(np.min(x)) + np.max(x)) / 2\n# mx_y = (np.abs(np.min(y)) + np.max(y)) / 2\n# \n mx_x = np.max(x)\n mx_y = np.max(y)\n return mx_x, mx_y", "def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x", "def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))", "def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx", "def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax", "def data_range(x):\n return max(x)-min(x)", "def minmax_element(iterable, first=0, last=None, key=None):\n assert hasattr(iterable, '__getitem__')\n iterable = iterable if first == 0 and last is None else iterable[first:last]\n if key is None:\n return min(iterable), max(iterable)\n else:\n return min(iterable, key=key), max(iterable, key=key)", "def _get_extremes(self, attr='values'):\n # calculate the maximum and minimum for all series\n series_max = [0]\n series_min = [0]\n for s in self:\n if s is not None:\n series_max.append(s.max(attr))\n series_min.append(s.min(attr))\n return min(series_min), max(series_max)", "def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]", "def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax", "def get_range(lst):\n return float(max(lst)) - float(min(lst))", "def get_extremum(x):\n min_val = x.min()\n argmin = x.argmin()\n max_val = x.max()\n argmax = x.argmax()\n extremum = min_val\n argextremum = argmin\n if abs(min_val) < abs(max_val):\n extremum = max_val\n argextremum = argmax\n\n return extremum, argextremum", "def get_features_min_max(self):\n min_max_list = []\n\n # Get each feature's min and max values.\n for feature_name in self.feature_names:\n min = self.data[feature_name].min()\n max = self.data[feature_name].max()\n min_max_list.append([min, max])\n\n # Create dataframe from list of lists in correct format\n min_max_df = pd.DataFrame(min_max_list)\n min_max = min_max_df.T\n min_max.columns = self.feature_names\n min_max.index = ['min', 'max']\n\n return min_max", "def min_max(arr, arr_size):\n max_t = arr[0]\n min_t = arr[0]\n for i in range(arr_size):\n if arr[i] > max_t:\n max_t = arr[i]\n if arr[i] < min_t:\n min_t = arr[i]\n return min_t, max_t", "def maxmin(x):\n point_list = x[0]\n lons = [f for [f,s] in point_list]\n lats = [s for [f,s] in point_list]\n return [max(lats), min(lats), max(lons), min(lons)]", "def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)", "def find_min_max(x, y, xmin, xmax, ymin, ymax, zoomout=0.05):\n if len(x) != 0:\n newxmin, newxmax = np.min(x), np.max(x)\n diffx = newxmax - newxmin\n if newxmin < xmin:\n xmin = newxmin - zoomout * diffx\n if newxmax > xmax:\n xmax = newxmax + zoomout * diffx\n\n if len(y) != 0:\n newymin, newymax = np.min(y), np.max(y)\n diffy = newymax - newymin\n if newymin < ymin:\n ymin = newymin - zoomout * diffy\n if newymax > ymax:\n ymax = newymax + zoomout * diffy\n return xmin, xmax, ymin, ymax", "def get_minmax_stats(dataframe, variable):\n\n print(\"Maximum value of \", variable, \"is: \", dataframe[variable].max())\n print(\"Minimum value of \", variable, \"is: \", dataframe[variable].min())", "def GetFieldMinMax(fielddef):\n minmax = {'c': (0, 0xff),\n '?': (0, 1),\n 'b': (~0x7f, 0x7f),\n 'B': (0, 0xff),\n 'h': (~0x7fff, 0x7fff),\n 'H': (0, 0xffff),\n 'i': (~0x7fffffff, 0x7fffffff),\n 'I': (0, 0xffffffff),\n 'l': (~0x7fffffff, 0x7fffffff),\n 'L': (0, 0xffffffff),\n 'q': (~0x7fffffffffffffff, 0x7fffffffffffffff),\n 'Q': (0, 0x7fffffffffffffff),\n 'f': (sys.float_info.min, sys.float_info.max),\n 'd': (sys.float_info.min, sys.float_info.max),\n }\n format_ = GetFieldDef(fielddef, fields='format_')\n min_ = 0\n max_ = 0\n\n if format_[-1:] in minmax:\n min_, max_ = minmax[format_[-1:]]\n max_ *= GetFormatCount(format_)\n elif format_[-1:].lower() in ['s','p']:\n # s and p may have a prefix as length\n max_ = GetFormatCount(format_)\n\n return min_,max_", "def min_and_max(i, j, op, m, M):\n max_val = float('-inf')\n min_val = float(\"inf\")\n for k in range(i, j):\n a = evalt(M[i][k], M[k+1][j], op[k])\n b = evalt(M[i][k], m[k+1][j], op[k])\n c = evalt(m[i][k], M[k+1][j], op[k])\n d = evalt(m[i][k], m[k+1][j], op[k])\n max_val = max(max_val, a, b, c, d)\n min_val = min(min_val, a, b, c, d)\n return min_val, max_val", "def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))", "def test_min_max_scalar(features: List[List[float]]) -> List[List[float]]:\n min_max_features = []\n transposed_features = [list(i) for i in zip(*features)]\n for feature in transposed_features:\n min_in_feature = min(feature)\n max_in_feature = max(feature)\n if min_in_feature == max_in_feature:\n min_max_features.append([0 for x in feature])\n else:\n min_max_features.append([(x - min_in_feature) / (max_in_feature - min_in_feature) for x in feature])\n return [list(i) for i in zip(*min_max_features)]", "def min_max_range(s):\n # note np.argmax, np.argmin returns the position of first occurence of global max, min\n sign = np.sign(np.argmax(s) - np.argmin(s))\n if sign == 0:\n return 0.0\n else:\n return sign*(np.max(s) - np.min(s))", "def extreme_values(self, extreme):\n\n\t\tif extreme.lower() == 'min':\n\t\t\treturn data.min()\n\t\telif extreme.lower() == 'max':\n\t\t\treturn data.max()\n\t\telse:\n\t\t\tassert 'Invalid Parameter !'", "def get_min_max(self):\n\n mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms\n mr[:] = np.max(mr)\n\n mxmin = self.mx - mr\n mxmax = self.mx + mr\n mymin = self.my - mr\n mymax = self.my + mr\n mzmin = self.mz - mr\n mzmax = self.mz + mr\n\n mb_xmin_idx = np.argmin(mxmin[self.ma > 0])\n mb_xmax_idx = np.argmax(mxmax[self.ma > 0])\n mb_ymin_idx = np.argmin(mymin[self.ma > 0])\n mb_ymax_idx = np.argmax(mymax[self.ma > 0])\n mb_zmin_idx = np.argmin(mzmin[self.ma > 0])\n mb_zmax_idx = np.argmax(mzmax[self.ma > 0])\n\n xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]\n xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]\n ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]\n ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]\n zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]\n zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]\n\n xmin = xmin0 - (xmax0 - xmin0) * 0.25\n xmax = xmax0 + (xmax0 - xmin0) * 0.25\n ymin = ymin0 - (ymax0 - ymin0) * 0.25\n ymax = ymax0 + (ymax0 - ymin0) * 0.25\n zmin = zmin0 - (zmax0 - zmin0) * 0.25\n zmax = zmax0 + (zmax0 - zmin0) * 0.25\n\n return xmin, xmax, ymin, ymax, zmin, zmax", "def xminmax ( self ) :\n return self.xvar.minmax()", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def validate_min_max(self):\n first = True\n\n for point in self:\n if first:\n self.min_amplitude = point\n self.max_amplitude = point\n self.min_wavenumber = point\n self.max_wavenumber = point\n first = False\n else:\n if point.wave_number > self.max_wavenumber.wave_number:\n self.max_wavenumber = point\n if point.wave_number < self.min_wavenumber.wave_number:\n self.min_wavenumber = point\n if point.amplitude > self.max_amplitude.amplitude:\n self.max_amplitude = point\n if point.amplitude < self.min_amplitude.amplitude:\n self.min_amplitude = point", "def _limit(value, min_value, max_value):\n\n if value < min_value:\n return min_value\n if value > max_value:\n return max_value\n return value", "def parse_vmin_vmax(container, field, vmin, vmax):\n field_dict = container.fields[field]\n field_default_vmin, field_default_vmax = get_field_limits(field)\n if vmin is None:\n if \"valid_min\" in field_dict:\n vmin = field_dict[\"valid_min\"]\n else:\n vmin = field_default_vmin\n if vmax is None:\n if \"valid_max\" in field_dict:\n vmax = field_dict[\"valid_max\"]\n else:\n vmax = field_default_vmax\n return vmin, vmax", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def glGetMinmax( baseFunction, target, reset, format, type, values=None):\r\n if values is None:\r\n width = 2\r\n values = images.images.SetupPixelRead( format, (width,4), type )\r\n arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[\r\n images.images.TYPE_TO_ARRAYTYPE.get(type,type)\r\n ]\r\n baseFunction(\r\n target, reset, format, type,\r\n ctypes.c_void_p( arrayType.dataPointer(values))\r\n )\r\n return values", "def min_max_id()->(int, int):\n stmt = sqlalchemy.select([sqlalchemy.func.min(_READING_TABLE.c.id), \n sqlalchemy.func.max(_READING_TABLE.c.id)]).select_from(_READING_TABLE)\n result = execute_command(stmt).fetchall()[0]\n return result[0], result[1]", "def get_min_max(self, groups, key):\n group = groups.get_group(key)\n min = group.loc[group[\"dif\"].idxmin()]\n max = group.loc[group[\"dif\"].idxmax()]\n minmax = {\"min\": min, \"max\": max}\n return minmax", "def getminmax_linear_search(arr):\n if len(arr) == 0:\n return None, None\n\n if len(arr) == 1:\n return arr[0], arr[0]\n\n min_num = None\n max_num = None\n if arr[0] > arr[1]:\n max_num = arr[0]\n min_num = arr[1]\n else:\n max_num = arr[1]\n min_num = arr[0]\n\n for idx in range(2, len(arr)):\n if min_num > arr[idx]:\n min_num = arr[idx]\n if max_num < arr[idx]:\n max_num = arr[idx]\n\n return min_num, max_num", "def getRange(self) -> Tuple[int, int]:\n return self.validator().bottom(), self.validator().top()", "def my_min(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[0]\n\n if not args:\n raise ValueError(\"Can't find min, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find min, wrong data format\")\n return sorter(args)", "def max_in_range(self, x, y, low, high):\r\n data = np.vstack((x,y)) \r\n y_values = data[1][np.logical_and(low < data[0], data[0] < high)]\r\n x_values = data[0][np.logical_and(low < data[0], data[0] < high)]\r\n index_max_y = y_values.argmax()\r\n max_y = y_values[index_max_y]\r\n max_x = x_values[index_max_y]\r\n return max_x, max_y", "def minmax ( self , nshoots = 100000 ) :\n ## try to get minmax directly from pdf/function \n if self.tricks and hasattr ( self.pdf , 'function' ) :\n if hasattr ( self.pdf , 'setPars' ) : self.pdf.setPars() \n f = self.pdf.function()\n if hasattr ( f , 'minmax' ) :\n try :\n mn , mx = f.minmax()\n if 0<= mn and mn <= mx and 0 < mx : \n return mn , mx\n except :\n pass\n if hasattr ( f , 'max' ) :\n try :\n mx = f.max()\n if 0 < mx : return 0 , mx\n except :\n pass\n\n ## check RooAbsReal functionality\n code = self.pdf.getMaxVal( ROOT.RooArgSet ( self.xvar , self.yvar ) )\n if 0 < code :\n mx = self.pdf.maxVal ( code )\n if 0 < mx : return 0 , mx\n \n ## not try to use random\n \n mn , mx = -1 , -10\n if hasattr ( self.pdf , 'min' ) : mn = self.pdf.min()\n if hasattr ( self.pdf , 'max' ) : mx = self.pdf.max()\n if 0 <= mn and mn <= mx and 0 < mx : return mn , mx\n \n if not self.xminmax() : return ()\n if not self.yminmax() : return ()\n \n mn , mx = -1 , -10\n xmn , xmx = self.xminmax()\n ymn , ymx = self.yminmax()\n for i in range ( nshoots ) : \n xx = random.uniform ( xmn , xmx )\n yy = random.uniform ( ymn , ymx )\n with SETVAR ( self.xvar ) :\n with SETVAR ( self.yvar ) :\n self.xvar.setVal ( xx )\n self.yvar.setVal ( yy )\n vv = self.pdf.getVal()\n if mn < 0 or vv < mn : mn = vv\n if mx < 0 or vv > mx : mx = vv\n \n return mn , mx", "def get_min_max(self) -> tuple:\r\n\r\n minimum = float(\"inf\")\r\n maximum = float(\"-inf\")\r\n\r\n for name, data in self.scatters_data.items():\r\n mapping = self.scatters[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n for name, data in self.trees_data.items():\r\n if self.trees[name][\"point_helper\"] is None:\r\n mapping = self.trees[name][\"mapping\"]\r\n min_x = float(\"inf\")\r\n min_y = float(\"inf\")\r\n min_z = float(\"inf\")\r\n max_x = float(\"-inf\")\r\n max_y = float(\"-inf\")\r\n max_z = float(\"-inf\")\r\n\r\n if mapping[\"x\"] in data:\r\n min_x = min(data[mapping[\"x\"]])\r\n max_x = max(data[mapping[\"x\"]])\r\n\r\n if mapping[\"y\"] in data:\r\n min_y = min(data[mapping[\"y\"]])\r\n max_y = max(data[mapping[\"y\"]])\r\n\r\n if mapping[\"z\"] in data:\r\n min_z = min(data[mapping[\"z\"]])\r\n max_z = max(data[mapping[\"z\"]])\r\n\r\n minimum = min(minimum, min([min_x, min_y, min_z]))\r\n maximum = max(maximum, max([max_x, max_y, max_z]))\r\n\r\n return minimum, maximum", "def _find_min_and_max_coords(self, block_locations):\n min_x, max_x, min_y, max_y = self.game_size, 0, self.game_size, 0\n for coord in block_locations:\n x = coord[constant.X]\n y = coord[constant.Y]\n if x < min_x:\n min_x = x\n if x > max_x:\n max_x = x\n if y < min_y:\n min_y = y\n if y > max_y:\n max_y = y\n return min_x, max_x, min_y, max_y", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def hi_lo(data_series, current_max, current_min):\n \n try:\n highest = numpy.max(data_series)\n except:\n highest = max(data_series)\n \n if highest > current_max:\n new_max = highest\n else:\n new_max = current_max\n \n try: \n lowest = numpy.min(data_series)\n except:\n lowest = min(data_series)\n \n if lowest < current_min:\n new_min = lowest\n else:\n new_min = current_min\n \n return new_max, new_min", "def min_max_outliers(res, min=None, max=None):\n min_max_list = []\n if isinstance(min, (int, float)):\n data1 = res[res < min].reset_index()\n data1['limit type'] = 'minimum'\n data1['limit'] = min\n min_max_list.append(data1)\n if isinstance(max, (int, float)):\n data1 = res[res > max].reset_index()\n data1['limit type'] = 'maximum'\n data1['limit'] = max\n min_max_list.append(data1)\n\n min_max1 = pd.concat(min_max_list)\n\n return min_max1", "def bounds(x, xMin, xMax):\n if (x < xMin):\n x = xMin\n elif (x > xMax):\n x = xMax\n return(x)", "def minmax(data, fields):\n vmin = min(data[field].min() for field in fields)\n vmax = max(data[field].max() for field in fields)\n return dict(vmin=vmin, vmax=vmax)", "def getminmax_tournament(arr, low, high):\n if low == high:\n return arr[low], arr[low]\n\n if abs(low - high) == 1:\n min_num = None\n max_num = None\n if arr[low] > arr[high]:\n max_num = arr[low]\n min_num = arr[high]\n else:\n max_num = arr[high]\n min_num = arr[low]\n return min_num, max_num\n\n else:\n mid = (low + high) // 2\n min_num_1, max_num_1 = getminmax_tournament(arr, low, mid)\n min_num_2, max_num_2 = getminmax_tournament(arr, mid+1, high)\n return min(min_num_1, min_num_2), max(max_num_1, max_num_2)", "def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin" ]
[ "0.7975695", "0.77930915", "0.7780894", "0.7709472", "0.76314646", "0.7625426", "0.7577489", "0.7568", "0.75583875", "0.74224806", "0.7407902", "0.7383869", "0.7375017", "0.73552513", "0.7351573", "0.72896475", "0.72671056", "0.7245023", "0.721607", "0.72126466", "0.72114575", "0.71791345", "0.7090889", "0.708096", "0.7075696", "0.70660055", "0.7061984", "0.7059411", "0.7045269", "0.7007793", "0.69722486", "0.69634426", "0.6949339", "0.6930337", "0.6893399", "0.68528855", "0.68260896", "0.68150294", "0.6794114", "0.67904955", "0.67041034", "0.67023385", "0.6675358", "0.66395867", "0.66324246", "0.6606732", "0.6601156", "0.65921986", "0.6586539", "0.6584725", "0.6570368", "0.6567145", "0.6554717", "0.65011954", "0.6491343", "0.6483467", "0.64767545", "0.64706826", "0.6466172", "0.6464029", "0.6458666", "0.6415124", "0.6412509", "0.6389946", "0.63824326", "0.6371434", "0.63570267", "0.63469756", "0.6345006", "0.6344915", "0.63395816", "0.6336911", "0.6329169", "0.63218653", "0.63099194", "0.62759775", "0.62752706", "0.6260221", "0.625876", "0.6240713", "0.6222146", "0.62215084", "0.62126726", "0.6195492", "0.6194054", "0.61904645", "0.6185769", "0.6185711", "0.61805004", "0.617967", "0.61648107", "0.6160422", "0.6151147", "0.6139272", "0.61372447", "0.61371565", "0.612824", "0.6123695", "0.6120921", "0.6110734", "0.6110633" ]
0.0
-1
Finds the postion that a value of weight "weight" would fall in the weight_list, where weight_list is sorted by smallest to largest. Newer inputs win in ties.
def find_pos(weight, weight_list): bool_list = [weight >= x for x in weight_list] pos = bool_list.count(True) - 1 return pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]", "def solve_brute_force(n: int, W: int, weight: List[int], value: List[int]) -> int:\n mapped_items = [{\"w\": w, \"v\": v} for i, (w, v) in enumerate(zip(weight, value))]\n\n maximum_value: int = 0\n updated: bool = False\n for i in range(1, n + 1):\n if i > 1 and not updated:\n break\n\n updated = False\n for chosen_items in list(combinations(mapped_items, i)):\n sum_weight = 0\n sum_value = 0\n for item in chosen_items:\n sum_weight += item[\"w\"]\n sum_value += item[\"v\"]\n\n if sum_weight <= W and maximum_value < sum_value:\n updated = True\n maximum_value = sum_value\n return maximum_value", "def solve_dp(n: int, W: int, weight: List[int], value: List[int]) -> int:\n dp = [[0] * (W + 1) for i in range(n + 1)]\n\n for i, (w, v) in enumerate(zip(weight, value)):\n for j in range(W + 1):\n dp[i + 1][j] = dp[i][j]\n if j - w >= 0:\n dp[i + 1][j] = max(dp[i][j], dp[i][j - w] + v)\n\n return dp[n][W]", "def get_rank(weight):\n weight = min(1.0, max(weight, 0.0))\n ranks = [x for x in ALL_RANKS if weight >= x.min_weight]\n ranks.sort(key=lambda x: x.min_weight)\n return ranks.pop()", "def weighted_choice(list_, weights=None):\n size = len(list_)\n if weights is not None:\n assert size == len(weights)\n\n if weights is None:\n probs = np.array([1 / float(size) for i in range(size)])\n else:\n probs = np.array(weights) / sum(weights) # just in case\n\n rand = np.random.random()\n\n _sum = 0\n for i in range(size):\n if _sum <= rand < _sum + probs[i]:\n choice = i\n break\n else:\n _sum += probs[i]\n\n return list_[choice]", "def weighted_wheel_selection(weights: List[float]) -> int:\n\n cumulative_sum = np.cumsum(weights)\n prob = r.generate_uniform_random_number() * cumulative_sum[-1]\n\n for i, c_sum in enumerate(cumulative_sum):\n if c_sum > prob:\n return i\n\n return None", "def weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i", "def _weighted_choice(self, lst):\n \n total_weight = reduce(lambda x,y:x+y, [tup[1] for tup in lst])\n n = random.uniform(0, total_weight)\n for item, weight in lst:\n if n < weight:\n break\n n = n - weight\n return item", "def select(weights):\n r = random.random() * sum(weights)\n s = 0.0\n for k,w in enumerate(weights):\n s += w\n if r <= s:\n return k\n raise RuntimeError(\"select WTF from %s\" % weights)", "def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))", "def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))", "def closest(self, w, n=10):\r\n scores = self.m.dot(self.represent(w))\r\n return heapq.nlargest(n, zip(scores, self.iw))", "def sample_from(self, weights):\n total = sum(weights)\n rnd = total * random.random() # uniform between 0 and total\n for i, w in enumerate(weights):\n rnd -= w # return the smallest i such that\n if rnd <= 0:\n return i # weights[0] + ... + weights[i] >= rnd", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def randpck(elements, rand_function):\n\n # First, we compute the total weight (for example 10)\n total_weight = 0\n for e in elements:\n assert e[1] >= 0\n total_weight += e[1]\n\n # Then we generate a random number multiplied by the total weight (e.g. 0.4218 * 10 = 42.18)\n random_weight = rand_function() * total_weight\n\n # Lastly, we run through the list to find which one matches with the generated weight\n current_weight = 0\n for e in elements:\n current_weight += e[1]\n if random_weight < current_weight:\n return e[0]\n\n return None", "def weighted_random_item(items, weight):\n if not items:\n return None\n\n weight_sum = sum(weight(item) for item in items)\n if weight_sum <= 0:\n return None\n\n choice = random.random() * weight_sum\n for item in items:\n choice -= weight(item)\n if choice < 0:\n return item, weight(item) / weight_sum\n return items[-1], -1 # floating-point rounding error", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def weighted_choice(choices, weight):\n\t# requirements = random\n\tweights = []\n\t# get weight values for each of the choices\n\tfor choice in choices:\n\t\tchoice_weight = weight(choice)\n\t\tif not (isinstance(choice_weight, int) and choice_weight > 0):\n\t\t\traise TypeError('weight results must be positive integers')\n\t\tweights.append(choice_weight)\n\n\t# make a selection within the acceptable range\n\tselection = random.randint(0, sum(weights) - 1)\n\n\t# find and return the corresponding choice\n\tfor idx, choice in enumerate(choices):\n\t\tif selection < sum(weights[:idx + 1]):\n\t\t\treturn choice\n\traise RuntimeError('no selection could be made')", "def pick_weighted(weights, vals, eps=1.0e-4):\n\t\n\tweightSum = cumsum(weights)\n\tif weightSum[-1] == 0:\n\t\treturn random.choice(vals)\n\tif abs(weightSum[-1]-1.0) > eps:\n\t\traise RuntimeError(\"Weights don't sum to 1\")\n\tr = random.uniform(0.0,1.0)\n\tfor v,w in zip(vals, weightSum):\n\t\tif r > w:\n\t\t\tcontinue\n\t\treturn v\n\treturn vals[-1]", "def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item", "def dp_make_weight(egg_weights, target_weight, memo={}):\n\n \"\"\"\n 根据提示: 每个pound类型的蛋是无限的。\n 问题是提供一种蛋的组合,最好pound数等于或是接近总的weight 并且要满足数量要越少越好。\n 这是两个限制条件。但是提示也给了总是有egg为value1的,那么难度小了很多。\n 现在是怎样让蛋的数量越少越好。\n \n 1.最优子结构\n egg_weights 现在假设是(1, 5, 10, 25)\n dp_make_weight((1, 5, 10, 25),x,memo) , 当x - n >= 0 时(n代表 1,5,10,25),\n 然后在 dp_make_weight((1,5,10,25,x-n,memo) +1 中 挑选最小值。+1的原因是包含本次\n 2.重叠子问题\n 详见ps1b的图片。\n 那么memo记录的key 为 avail(即剩余的容量) ,value 为avail下最小的蛋的数量n。\n \n 那么base_case是什么?\n target == 0时,返回0\n 现在按照深度优先的思路思考\n \"\"\"\n\n if target_weight == 0:\n return 0\n\n if target_weight in memo:\n return memo[target_weight]\n\n result = None # 占位符,没有多大用\n\n for elt in egg_weights:\n if target_weight - elt >= 0: # 这样才有继续探索的必要\n tmp_result = dp_make_weight(egg_weights, target_weight - elt, memo) + 1\n if result is None or tmp_result < result:\n result = tmp_result\n memo[target_weight] = result\n return result", "def get_min_weight_index(weights: list, mst_set: set) -> int:\n min_weight = math.inf\n index = 0\n\n for i in range(len(weights)):\n if weights[i] < min_weight and i not in mst_set:\n min_weight = weights[i]\n index = i\n\n return index", "def find_probability(problist, listoffive):\n\tprobs = []\n\tfor i in listoffive:\n\t\tprobs.append(problist[i])\n\ttotprob = 1\n\tfor n in probs:\n\t\ttotprob = totprob * n\n\treturn totprob", "def weighted_score(counters, lst, weight):\n if counters == None:\n counters = {}\n\n\n for item in lst:\n if item in counters:\n counters[item] += weight\n else:\n counters[item] = weight\n\n return counters", "def find_best_match(organ: Organ, wait_list: WaitList,\n weights: Dict[int, float]) -> Optional[Patient]:\n # ANSI codes to emphasize output\n bold_red, red, reset = '\\033[31;1m', '\\033[31m', '\\033[0m'\n matches = wait_list.get_prioritized_patients(organ)\n\n # returns the patient with the highest priority within acceptable proximity\n while len(matches) != 0:\n patient = heapq._heappop_max(matches) # type: ignore\n if organ.viability >= weights[patient.location] - 10:\n return patient\n\n # in the event there are no matches\n print(f'\\n{bold_red}The following organ has no suitable matches:'\n f'\\n{red}{organ.__str__()}{reset}')\n return None", "def dp_make_weight(egg_weights, target_weight, memo = {}):\r\n # construct table. outer loop: egg weights. inner loop: 0-target_weight\r\n # table will be stored in memo. key=egg_weight, value=list, indexed from 0-target_weight\r\n for i, w in enumerate(egg_weights):\r\n # initialize key-value pair for a given egg weight. Value is empty list to be filled in inner loop.\r\n memo[w] = []\r\n for j in range(target_weight + 1):\r\n # if weight is 0, no eggs\r\n if j == 0:\r\n memo[w].append(0)\r\n # if egg_weight is less than weight, minimize number of eggs\r\n elif w <= j:\r\n # to minimize: take the min of (using prior denomination to get same weight, using current denomation to get weight)\r\n # first item=prior egg value, same weight\r\n # second item=\"sub\" current egg value by subtracting it from weight and adding 1 to egg total\r\n \r\n # if first egg weight, no need to look at \"row\" above to minimize\r\n if i == 0:\r\n min_eggs = memo[w][j-w] + 1\r\n else:\r\n min_eggs = min(memo[egg_weights[i-1]][j], memo[w][j-w] + 1)\r\n memo[w].append(min_eggs)\r\n # else if egg_weight is more than weight, take prior denomination min number of eggs at j\r\n else:\r\n memo[w].append(memo[egg_weights[i-1]][j])\r\n\r\n # access bottom right value to get minimum number of coins (largest egg_weight at target_weight)\r\n # uncomment below to only returns min number of eggs\r\n #return memo[egg_weights[-1]][target_weight]\r\n\r\n # determine makeup of min number of egg: \r\n # cur_weight to keep track as we subtract from total weight\r\n cur_weight = target_weight\r\n \r\n # egg_choices: a dict that holds how many of each egg_weight are in the optimal solution\r\n egg_choices = {}\r\n \r\n #print(memo)\r\n \r\n # outer loop goes backwards from highest to smallest egg weight\r\n for i in range(len(egg_weights)-1, -1, -1):\r\n # check if equal to memo[i-1][j] (row above, same column). if not equal, i is in the set.\r\n while egg_weights[i] <= cur_weight:\r\n # also if smallest egg weight, keep subtracting until we get 0\r\n if i == 0 or (memo[egg_weights[i]][cur_weight] != memo[egg_weights[i-1]][cur_weight]):\r\n # if they are not equal, add to the count of i in the egg_choices dict\r\n if egg_weights[i] in egg_choices.keys():\r\n egg_choices[egg_weights[i]] += 1\r\n else:\r\n egg_choices[egg_weights[i]] = 1\r\n # subtract from current weight the egg weight accounted for\r\n cur_weight -= egg_weights[i]\r\n \r\n # break if all weight accounted for\r\n if cur_weight == 0:\r\n break\r\n \r\n # string together the min number of eggs and the composition\r\n out = str(memo[egg_weights[-1]][target_weight]) + ' ('\r\n \r\n # list of formatted value * key pairs\r\n eggs = []\r\n for key, value in egg_choices.items():\r\n eggs.append(str(value) + ' * ' + str(key))\r\n \r\n # join key/value pairs together\r\n out += ' + '.join(eggs)\r\n \r\n # finish off the string\r\n out += ' = ' + str(target_weight) + ')'\r\n return out", "def selection_wheel(self, weighted_population):\n weight_total = sum((item[1] for item in weighted_population))\n n = random.uniform(0, weight_total)\n for item, weight in weighted_population:\n if n < weight:\n return item\n n = n - weight\n return item", "def get_weight_class(weight):\n\n if(weight >= 3500):\n return 5\n elif(weight >= 3000 and weight < 3500):\n return 4\n elif(weight >= 2500 and weight < 3000):\n return 3\n elif(weight >= 2000 and weight < 2500):\n return 2\n else:\n return 1", "def get_best_match(self, list):\n raise NotImplementedError", "def knapsack(weights):\n\n n = len(weights)\n max_sum = sum(weights)\n\n result = []\n\n dp = [False for _ in range(max_sum + 1)]\n dp[0] = True\n\n for i in range(1, n + 1):\n # update dp from right to left for each new weight\n for x in range(max_sum, -1, -1):\n if dp[x]:\n dp[x + weights[i - 1]] = True\n\n for i in range(len(dp)):\n if dp[i]:\n result.append(i)\n\n return result # returns all possible sums that can be constructed given a list of weights\n\n # return dp", "def solve(instance, silent=True, max_weight_lower=1,\n max_weight_upper=float('inf'), scoring=\"sink distance\"):\n flow = instance.flow\n k = instance.k\n\n # quit right away if the instance has weight bounds that can't be satisfied\n if instance.has_bad_bounds():\n return set()\n\n # if k equals the size of the largest edge cut, the weights are\n # predetermined\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n # Important: path weights must be sorted, otherwise our\n # subsequent optimizations will remove this constraint.\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight,\n instance.weights))\n\n if not silent:\n print(instance.weights, feasible_weights)\n\n # figure out whether we get the first or last positions for free\n largest_free = False\n smallest_free = False\n # check largest weight first\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n\n positions = list(range(int(smallest_free), k-int(largest_free)))\n\n # iterate over the number of unguessed weights\n for diff in range(k+1):\n if not silent:\n print(\"Diff =\", diff)\n # iterate over positions of guessed weights. We want them to be\n # ordered, but choose the smallest first to be removed\n for rev_indices in itertools.combinations(reversed(positions), k-diff):\n indices = list(reversed(rev_indices))\n p = len(indices)\n # when k-1 values are determined, it also determines the kth value\n if p == k-1:\n continue\n # iterate over choices for those guessed weights\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n\n # assign the chosen weights to the guessed positions\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n\n # add in free values\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k-1] = largest\n\n # quit if this didn't work\n if not is_feasible(weights, flow, max_weight):\n continue\n\n if not silent:\n print(\"Trying weights\", weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print(\"Unterdetermined solution\")\n return sol", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def weightedChoice(weights, objects, apply_softmax=False, alpha=None):\n if apply_softmax: weights = softmax(weights)\n if alpha: weights = normalize([w**alpha for w in weights])\n cs = np.cumsum(weights) #An array of the weights, cumulatively summed.\n idx = sum(cs < np.random.rand()) #Find the index of the first weight over a random value.\n idx = min(idx, len(objects)-1)\n return objects[idx]", "def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)", "def knapsack(items, maxweight):\n @lru_cache(maxsize=None)\n def bestvalue(i, j):\n # Return the value of the most valuable subsequence of the first\n # i elements in items whose weights sum to no more than j.\n if j < 0:\n return float('-inf')\n if i == 0:\n return 0\n value, weight = items[i - 1]\n return max(bestvalue(i - 1, j), bestvalue(i - 1, j - weight) + value)\n\n j = maxweight\n result = []\n for i in reversed(range(len(items))):\n if bestvalue(i + 1, j) != bestvalue(i, j):\n result.append(items[i])\n j -= items[i][1]\n result.reverse()\n return bestvalue(len(items), maxweight), result", "def bestOf(predictorList):\n assert predictorList != [], \"Predictor list is empty!\"\n bestList = []\n bestRate = -1.0\n for p in predictorList:\n if p.successRate > bestRate:\n bestList = [p]\n bestRate = p.successRate\n elif p.successRate == bestRate:\n bestList.append(p)\n return bestList", "def _brute_force_unbounded_knapsack_aux(weight_limit, weight_list, value_list, item_list):\n max_value = 0\n max_list = []\n for i in range(len(weight_list)):\n available_space = weight_limit - weight_list[i]\n if available_space >= 0:\n current_value, current_list = _brute_force_unbounded_knapsack_aux(\n available_space, weight_list, value_list, item_list)\n current_value += value_list[i]\n current_list.append(i)\n if (current_value > max_value):\n max_value = current_value\n max_list = current_list\n return max_value, max_list", "def draw(weights):\n choice = random.uniform(0, sum(weights))\n choice_index = 0\n\n for weight in weights:\n choice -= weight\n if choice <= 0:\n return choice_index\n\n choice_index += 1", "def get_indices_of_item_weights(weights, length, limit):\n # Code here\n # Create a cache\n cache = {}\n\n # Run for each of the indexes in the list\n for idx in range(length):\n # How much until we reach the limit?\n remaining_weight = limit - weights[idx]\n\n # Have we seen already the remaining weight needed?\n # Look in the cache\n if remaining_weight in cache:\n # Create a tuple for the results\n result = (idx, cache[remaining_weight])\n # Return by descending weight\n result = sorted(result, reverse=True)\n return result\n else:\n # Add these weights in cache\n cache[weights[idx]] = idx\n\n return None", "def greedy_order(dict_prefs, list_els):\n ordering=list()\n els=deepcopy(list_els)\n while els!=[]:\n best_score=float(\"-infinity\")\n for e1 in els:\n score_el=0\n for e2 in els:\n if e1==e2:\n continue\n score_el+=_score_pref(e1,e2,dict_prefs)\n if score_el>best_score:\n best_score=score_el\n best_el=e1\n ordering.append(best_el)\n els.remove(best_el)\n return ordering", "def weightedrandomchoice(items): # {{{2\n total = 0\n items.sort(reverse=True, key=lambda x:x[0])\n for item in items:\n total += item[0]\n threshold = random.uniform(0, 0.6) * total\n for item in items:\n threshold -= item[0]\n if threshold <= 0:\n return item[1]", "def closest_match(num,num_list):\n\tdiffs = np.abs(np.subtract(num,num_list))\n\treturn num_list[np.argmin(diffs)]", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def brute_force_unbounded_knapsack(weight_limit, weight_list, value_list):\n item_list = []\n return _brute_force_unbounded_knapsack_aux(\n weight_limit, weight_list, value_list, item_list)", "def weighted_average(value_weight_list): \n numerator = sum([v * w for v,w in value_weight_list])\n denominator = sum([w for v,w in value_weight_list])\n if(denominator != 0):\n return(float(numerator) / float(denominator))\n else:\n return None", "def weighted_average(value_weight_list):\n numerator = sum([v * w for v, w in value_weight_list])\n denominator = sum([w for v, w in value_weight_list])\n if(denominator != 0):\n return(float(numerator) / float(denominator))\n else:\n return None", "def score(priority_list, totalItemCount, itemUsageDict, threshold):\n scored = list()\n for item in priority_list:\n scored.append((item, itemUsageDict[item][\"winRatio\"] * (itemUsageDict[item][\"totalCount\"]/ totalItemCount) * threshold))\n return scored", "def most_probable_class(text, weights):\n\n pos_weights = weights['positive']\n neg_weights = weights['negative']\n neu_weights = weights['neutral']\n features = calculate_features(text)\n pos_numerator = 0.0\n neg_numerator = 0.0\n neu_numerator = 0.0\n denominator = 0.0\n for f in features:\n if f in pos_weights and f in neg_weights and f in neu_weights:\n pos_numerator += pos_weights[f] * features[f]\n neg_numerator += neg_weights[f] * features[f]\n neu_numerator += neu_weights[f] * features[f]\n denominator += pos_numerator + neg_numerator + neu_numerator\n else:\n pos_numerator += 0\n neg_numerator += 0\n neu_numerator += 0\n denominator += pos_numerator + neg_numerator + neu_numerator\n\n pos_prob = (\"positive\", exp(pos_numerator))# /exp(denominator))\n neg_prob = (\"negative\", exp(neg_numerator))# /exp(denominator))\n neu_prob = (\"neutral\", exp(neu_numerator))# /exp(denominator))\n return max(neu_prob, neg_prob, pos_prob, key=lambda x: x[1])", "def convertMBpsToWeight(self, mbps):\n mbps_thresholds = [200, 100, 75, 50, 20, 10, 2, 1]\n weights = [2, 1.9, 1.8, 1.6, 1.5, 1.3, 1.2, 1.1]\n\n for threshold, weight in zip(mbps_thresholds, weights):\n if mbps > threshold:\n return weight\n return 1", "def __find_majority_opt(input_list):\n count = 0\n element = input_list[0]\n for i in range(len(input_list)):\n if count == 0 :\n element = input_list[i]\n count =1\n elif element == input_list[i]:\n count +=1\n else :\n count -=1\n return element", "def sortByWeight(xs):\n xs = [x for x in xs if len(x.reshape(-1)) > 0]\n return list(sorted(xs, key=lambda x: (x > 0).sum()))", "def get_strongest(weights, topn):\n nstrongest_idx = np.argpartition(np.abs(weights), -topn, axis=0)[-topn:]\n nstrongest = np.array([[weights[nstrongest_idx[i, j], j] for j in range(nstrongest_idx.shape[1])]\n for i in range(topn)])\n\n return nstrongest_idx, nstrongest", "def weighted_choice(weighted_items, num_items=1):\n total = 0\n cume_list = []\n\n for item, weight in weighted_items.items():\n total += weight\n cume_list.append([item, total])\n\n for pair in cume_list:\n pair[1] /= total\n\n items = []\n\n for _ in range(num_items):\n rand = random()\n\n for item, val in cume_list:\n if rand <= val:\n items.append(item)\n break\n\n assert num_items == len(items), (weighted_items, items)\n\n if num_items == 1:\n return items[0]\n\n return items", "def unbounded_knapsack_btm_up_DP(weight_limit, weight_list, value_list):\n memo_value_list = (weight_limit + 1) * [0]\n memo_item_list = [[] for i in range(weight_limit + 1)]\n (weight_limit + 1) * [None]\n for i in range(weight_limit + 1):\n # print(i)\n memo_value_list, memo_item_list = _unbounded_knapsack_btm_up_DP_aux(\n i, weight_list, value_list, memo_item_list, memo_value_list)\n # print(memo_value_list)\n # print(memo_item_list)\n return memo_value_list[weight_limit], memo_item_list[weight_limit]", "def get_closest_value_index_in_sorted_list(value, list_):\n if value <= list_[0]:\n return 0\n if value >= list_[-1]:\n return len(list_) - 1\n pos = bisect.bisect_left(list_, value)\n before = list_[pos - 1]\n after = list_[pos]\n if after - value < value - before:\n return pos\n else:\n return pos - 1", "def weighted_choice(*values, **kwargs):\n key = kwargs.get('key', lambda x: 1.0)\n if len(values) == 1:\n values = values[0]\n if len(values) == 0:\n raise TypeError('weighted_choice expected 1 arguments, got 0')\n\n weights = [key(v) for v in values]\n s = sum(weights)\n r = random.random() * s\n for v,w in zip(values, weights):\n s -= w\n if r > s:\n return v\n return values[-1]", "def roulette(weights, n):\n if n > len(weights):\n raise Exception(\"Can't choose {} samples from {} items\".format(n, len(weights)))\n if any(map(lambda w: w <= 0, weights.values())):\n raise Exception(\"The weight can't be a non-positive number.\")\n items = weights.items()\n chosen = set()\n for i in range(n):\n total = sum(list(zip(*items))[1])\n dice = random.random() * total\n running_weight = 0\n chosen_item = None\n for item, weight in items:\n if dice < running_weight + weight:\n chosen_item = item\n break\n running_weight += weight\n chosen.add(chosen_item)\n items = [(i, w) for (i, w) in items if i != chosen_item]\n return list(chosen)", "def weights(self) -> List[float]:", "def random_pick (self, checkfn=None):\n tweight = self.total_weight(checkfn=checkfn)\n if tweight == 0:\n return None, None\n\n n = random.uniform(0, tweight)\n\n for num, item in enumerate(self):\n if checkfn is not None and not checkfn(item):\n continue\n\n if item.weight >= n:\n return num, item\n n = n - item.weight\n\n return None, None", "def knapsack_solution(v: List[int], w: List[float], c: float) -> int:\n pass", "def unbounded_knapsack_top_down_DP(weight_limit, weight_list, value_list):\n memo = weight_limit * [-1]\n memo_item_list = weight_limit * [None]\n return _unbounded_knapsack_top_down_DP_aux(weight_limit, weight_list, value_list, memo_item_list, memo)", "def int_with_probability(list_of_values):\n sum_of_values = sum(list_of_values)\n\n # pick a random value from 0 to sum\n r = random.randrange(0, sum_of_values)\n new_sum = 0\n\n for item in list_of_values:\n new_sum += item\n if new_sum >= r:\n return item", "def selectValidMaxElement(cheese_weights, cheese_eaten_map):\r\n tmp_list = []\r\n for index in range(len(cheese_weights)):\r\n if index == 0 and cheese_eaten_map[index] == 0 and\\\r\n (cheese_eaten_map[index+1] == 0 or cheese_eaten_map[index+1] == -1):\r\n tmp_list.append(cheese_weights[index])\r\n elif index == len(cheese_weights) - 1 and cheese_eaten_map[index] == 0 and\\\r\n (cheese_eaten_map[index-1] == 0 or cheese_eaten_map[index-1] == -1):\r\n tmp_list.append(cheese_weights[index])\r\n elif index != 0 and index != len(cheese_weights) - 1 and cheese_eaten_map[index] == 0 and\\\r\n (cheese_eaten_map[index+1] == 0 or cheese_eaten_map[index+1] == -1) and\\\r\n (cheese_eaten_map[index-1] == 0 or cheese_eaten_map[index-1] == -1):\r\n tmp_list.append(cheese_weights[index])\r\n if not len(tmp_list):\r\n return 0\r\n curr_max = max(tmp_list)\r\n indices = []\r\n indices = [i for i in range(len(cheese_weights)) if cheese_weights[i] == curr_max]\r\n \r\n for entry in indices:\r\n if entry == 0 and (cheese_eaten_map[entry+1] == 0 or cheese_eaten_map[entry+1] == -1):\r\n cheese_eaten_map[entry] = 1\r\n return cheese_weights[entry]\r\n elif entry == len(cheese_weights) - 1 and (cheese_eaten_map[entry-1] == 0 or cheese_eaten_map[entry-1] == -1):\r\n cheese_eaten_map[entry] = 1\r\n return cheese_weights[entry]\r\n elif entry != 0 and entry != len(cheese_weights) - 1 and\\\r\n (cheese_eaten_map[entry] == 0) and\\\r\n (cheese_eaten_map[entry -1] == 0 or cheese_eaten_map[entry -1] == -1) and\\\r\n (cheese_eaten_map[entry+1] == 0 or cheese_eaten_map[entry+1] == -1):\r\n cheese_eaten_map[entry] = 1\r\n return cheese_weights[entry]\r\n else:\r\n cheese_eaten_map[entry] = -1\r\n continue", "def _weightedAverage(list_):\n\n\t\taccum = [0, 0]\n\n\t\tfor point, weight in list_:\n\n\t\t\taccum[0] += point[0] * weight\n\t\t\taccum[1] += point[1] * weight\n\n\t\ttotalWeight = sum([weight for point, weight in list_])\n\n\n\t\tif totalWeight == 0:\n\t\t\t\n\t\t\treturn (0, 0)\n\n\n\t\taccum[0] /= float(totalWeight)\n\t\taccum[1] /= float(totalWeight)\n\n\t\treturn (accum[0], accum[1])", "def weighted_choice(self, observation, sess):\n assert self.initialised, \"This model must be initialised (self.initialisation())\"\n reward = self.get_reward(observation, sess)\n\n reward_cumsum = np.cumsum(reward) / np.sum(reward)\n\n temp = np.random.rand()\n\n for i, value in enumerate(reward_cumsum):\n if value > temp:\n break\n \n return i, reward[i]", "def addCowWeight(list, cows):\r\n sum = 0.0\r\n for key in list:\r\n sum += cows[key]\r\n return sum", "def mini(lst, key=lambda x: x):\n best, besti = lst[0],0\n for i in xrange(1,len(lst)): \n if key(lst[i]) < key(best):\n best, besti = lst[i], i\n return best,besti", "def knapsack(capacity, value, weight):\n # space complexity is O(W)\n K = [[0 for x in range(capacity + 1)] for x in range(2)]\n s = [0] * 2\n # build table\n # time complexity is O(nW)\n for i in range(len(value) + 1):\n for c in range(capacity + 1):\n if i == 0 or c == 0:\n K[i % 2][c] = 0\n elif weight[i - 1] <= c:\n if value[i - 1] + K[(i - 1) % 2][c - weight[i - 1]] > K[(i - 1) % 2][c]:\n K[i % 2][c] = value[i - 1] + \\\n K[(i - 1) % 2][c - weight[i - 1]]\n s[i % 2] = i\n else:\n K[i % 2][c] = K[(i - 1) % 2][c]\n s[i % 2] = s[(i - 1) % 2]\n else:\n K[i % 2][c] = K[(i - 1) % 2][c]\n\n return K[len(value) % 2][capacity], s[capacity % 2]", "def word_nearest(word_list, target, condition = None, consider_phase = True):\n \n if not condition:\n condition = lambda t: True\n \n min_distance = 100\n min_word = None\n \n def word_distance(word1, word2):\n position1 = word1.position\n position2 = word2.position\n\n distance = [a-b for a, b in zip(position1, position2)]\n\n return np.sum(np.abs(distance))\n \n if isinstance(word_list, Word):\n word_list = [word_list]\n elif isinstance(word_list, list):\n #word_list = word_list\n pass\n else:\n print (word_list)\n raise TypeError()\n \n for word in word_list:\n phase = word.phase\n for word_compare in target:\n if not condition(word_compare):\n continue\n elif consider_phase and phase - word_compare.phase:\n continue\n\n distance = word_distance(word, word_compare)\n #print (word_compare, distance)\n if min_distance > distance:\n min_distance = distance\n min_word = word_compare\n elif min_distance == distance:\n pass\n # should be revised\n\n \n return min_word", "def calculate_weights(counts, thresholds):\n weights = []\n steps = len(thresholds) + 1\n for i in range(len(counts)):\n for j in range(steps - 1):\n if counts[i] <= thresholds[j]:\n weights.append(j + 1)\n break\n elif j == steps - 2:\n # last step\n weights.append(j + 2)\n return weights", "def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def compChooseWord(hand, wordList, n):\n # BEGIN PSEUDOCODE (available within ps4b.py)bestScore = 0\n bScore = 0\n bWord = None\n for word in wordList:\n if isValidWord(word, hand, wordList):\n score = getWordScore(word, n)\n if score > bScore:\n bScore = score\n bWord = word\n return bWord", "def weightedMean(numlist, weights):\n\twxsum = 0.0\n\twsum = 0.0\n\n\tassert len(numlist) == len(weights)\n\n\tfor (x,w) in zip(numlist, weights):\n\t\twxsum += x*w\n\t\twsum += w\n\tif wsum == 0.0:\n\t\treturn 0.0\n\treturn wxsum/wsum", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def min(weightData , dataSetVector ):\r\n # weightData: pass the whole weightData array.\r\n # dataSetVector: pass the a data vector to compare with weightdata array, to find its closest match\r\n winnerIndex = 0 #flag for initalizing the winner index\r\n minValue = EcuDist(dataSetVector,weightData[0]) # initalize the minValue\r\n # iterate through all weighdata rows to find the closest match, depending on ecu. distance,\r\n #and then return the index of the closest match(winner)\r\n for i in range(weightData.shape[0]):\r\n if(EcuDist(dataSetVector,weightData[i]) < minValue):\r\n minValue = EcuDist(dataSetVector,weightData[i])\r\n winnerIndex = i\r\n return winnerIndex", "def weights_above_gen_from(self, minimum_weight: float) -> List:\r\n\r\n def func_gen(fit: af.Fit, minimum_weight: float) -> List[object]:\r\n samples = fit.value(name=\"samples\")\r\n\r\n weight_list = []\r\n\r\n for sample in samples.sample_list:\r\n if sample.weight > minimum_weight:\r\n weight_list.append(sample.weight)\r\n\r\n return weight_list\r\n\r\n func = partial(func_gen, minimum_weight=minimum_weight)\r\n\r\n return self.aggregator.map(func=func)", "def weight_values(values: np.array, weights: np.array) -> np.array:\r\n # Compute the maximum weight\r\n max_weight = weights.max()\r\n # Associate each value with its weight and compute teh weighted value\r\n zipped = np.column_stack((values, weights))\r\n weighted = np.array([weight_value(value=value, weight=weight, max_weight=max_weight) for value, weight in zipped])\r\n return weighted", "def _sorting_feature_weights(feature_weights):\n feature_weights.sort(key=lambda x:abs(x[1]['weight']), reverse=True)\n return feature_weights", "def get_calib_weight_lst(calib_lst, obsdate, exptime):\n input_datetime = dateutil.parser.parse(obsdate) \\\n + datetime.timedelta(seconds=exptime/2)\n datetime_lst = [dateutil.parser.parse(calib['date-obs']) \\\n + datetime.timedelta(seconds=calib['exptime']/2)\n for calib in calib_lst]\n\n dt_lst = [(dt - input_datetime).total_seconds() for dt in datetime_lst]\n dt_lst = np.array(dt_lst)\n\n if len(dt_lst)==1:\n # only one reference in datetime_lst\n weight_lst = [1.0]\n elif (dt_lst<0).sum()==0:\n # all elements in dt_lst > 0. means all references are after the input\n # datetime. then use the first reference\n weight_lst = np.zeros_like(dt_lst, dtype=np.float64)\n weight_lst[0] = 1.0\n elif (dt_lst>0).sum()==0:\n # all elements in dt_lst < 0. means all references are before the input\n # datetime. then use the last reference\n weight_lst = np.zeros_like(dt_lst, dtype=np.float64)\n weight_lst[-1] = 1.0\n else:\n weight_lst = np.zeros_like(dt_lst, dtype=np.float64)\n i = np.searchsorted(dt_lst, 0.0)\n w1 = -dt_lst[i-1]\n w2 = dt_lst[i]\n weight_lst[i-1] = w2/(w1+w2)\n weight_lst[i] = w1/(w1+w2)\n\n return weight_lst", "def random_choice(prob_list: list, seed=None):\n prob_sum = sum(prob_list)\n prob_range_list = list()\n seek = 0.0\n for p in prob_list:\n prob_range_list.append((seek, seek + p))\n seek += p\n\n while True:\n random.seed(seed)\n prob = random.uniform(0, prob_sum)\n for index, p_range in enumerate(prob_range_list):\n if p_range[0] < prob <= p_range[1] or p_range[0] <= prob < p_range[1]:\n return index", "def knapval_norep(W, wt):\n # choose to use item.weight and get item.value + optimal from what's left\n # last_item = items[-1]\n # other_items = items[:-1]\n # options = list(\n # knpaval_norep(capacity, other_items))\n # if last_item.weight <= capacity:\n # options.append(last_item.value +\n # knapval_norep(capacity-last_item.weight, other_items),\n # )\n #\n\n \"\"\"Find max weight that can fit in knapsack size W.\"\"\"\n # Create n nested arrays of 0 * (W + 1)\n max_vals = [[0] * (W + 1) for x in range(len(wt))]\n # Set max_vals[0] to wt[0] if wt[0] <= j\n max_vals[0] = [wt[0] if wt[0] <= j else 0 for j in range(W + 1)]\n for i in range(1, len(wt)):\n for j in range(1, W + 1):\n value = max_vals[i - 1][j] # previous i @ same j\n if wt[i] <= j:\n val = (max_vals[i - 1][j - wt[i]]) + wt[i]\n if value < val:\n value = val\n max_vals[i][j] = value\n else:\n max_vals[i][j] = value # set to [i - 1][j]\n else:\n max_vals[i][j] = value # set to [i - 1][j]\n\n return max_vals[-1][-1]", "def lastStoneWeight(self, stones: List[int]) -> int:\n if len(stones) <= 1:\n return stones[0] if len(stones) == 1 else None\n temp = sorted(stones, reverse=True)\n while len(temp) > 2:\n new_weight = abs(temp[0] - temp[1])\n temp = sorted(temp[2:] + [new_weight], reverse=True)\n return abs(temp[0] - temp[1])", "def wilcoxon_w(pairs: List[Tuple[Union[float, int], int]]) -> float:\n i = 0\n w = 0.0\n while i < len(pairs):\n absi = pairs[i][0]\n sum_sgn = pairs[i][1]\n j = i + 1\n while j < len(pairs) and pairs[j][0] == absi:\n sum_sgn += pairs[j][1]\n j += 1\n r = 1 + 0.5 * (i + j - 1) # smoothed rank\n w += r * sum_sgn\n i = j\n return w", "def weighted_choice(options: np.ndarray, weights: np.ndarray) -> Union[int,float]:\n assert len(options) == len(weights) != 0\n total = np.sum(weights)\n rand = np.random.rand() * total\n for i in range(len(options)):\n option = options[i]\n weight = weights[i]\n if weight < rand:\n rand -= weight\n else:\n break\n return option", "def TightStrategy(I_list,box_list):\n iso = 0\n lemon = []\n SortedItems = quick_sort(I_list)\n for element in range(0, len(SortedItems)):\n w = SortedItems[element].weight\n x = FindTightFit(box_list, w)\n if x == None:\n iso+=1\n pass\n else:\n if w <= x.max_cap - x.curr_cap:\n x.curr_cap += w\n x.items_list.append(SortedItems[element])\n lemon.append(SortedItems[element])\n else:\n pass\n print('Results from Greedy Strategy 2')\n if iso > 0:\n print('Unable to pack all items!')\n else:\n print('All items were successfully packed!')\n for s in box_list:\n print('Box',s.id,'of weight capacity',s.max_cap,'contains:')\n for item in s.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')", "def prediction_prob(self, x, weights):\n _, probs = self.predict_probability(x, weights)\n preds = []\n for p in probs:\n if p>0.5: preds.append(1)\n else: preds.append(-1)\n return preds", "def weights(cae, p):\n weights = []\n for i, j in pairwise(p):\n closed_open = len([e for e in cae if i <= e < j])\n weights.append(closed_open)\n return [value % 2 for value in weights]", "def worstOf(predictorList):\n assert predictorList != [], \"Predictor list is empty!\"\n worstList = []\n worstRate = 2.0\n for p in predictorList:\n if p.successRate < worstRate:\n worstList = [p]\n worstRate = p.successRate\n elif p.successRate == worstRate:\n worstList.append(p)\n return worstList", "def init_solution(weight_cost,max_weight):\r\n solution = [] #empty list\r\n allowed_positions = list(range (len(weight_cost))) # (0,10)\r\n while len(allowed_positions)>0:\r\n idx = random.randint(0,len(allowed_positions)-1) # any random number between 0 to 9\r\n selected_position = allowed_positions.pop(idx)\r\n if get_cost_and_weight_of_knapsack(solution + [selected_position],weight_cost)[1] <= max_weight:\r\n solution.append(selected_position)\r\n else:\r\n break\r\n return solution", "def bruteClosest(list_points):\n\n minimum = 0\n p1 = 0\n p2 = 0\n for i in list_points:\n for k in list_points:\n \n d = dist(i,k)\n if (d < minimum and d != 0) or minimum == 0:\n p1 = i\n p2 = k\n minimum = d\n return [p1, p2, minimum]", "def choice(some_list, probabilities, max_probability=1):\n x = random.uniform(0, max_probability)\n cumulative_probability = 0.0\n\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability: break\n\n return item", "def _get_weights(dist, weights):\n if weights in (None, \"uniform\"):\n return None\n\n if weights == \"distance\":\n # if user attempts to classify a point that was zero distance from one\n # or more training points, those training points are weighted as 1.0\n # and the other points as 0.0\n if dist.dtype is np.dtype(object):\n for point_dist_i, point_dist in enumerate(dist):\n # check if point_dist is iterable\n # (ex: RadiusNeighborClassifier.predict may set an element of\n # dist to 1e-6 to represent an 'outlier')\n if hasattr(point_dist, \"__contains__\") and 0.0 in point_dist:\n dist[point_dist_i] = point_dist == 0.0\n else:\n dist[point_dist_i] = 1.0 / point_dist\n else:\n with np.errstate(divide=\"ignore\"):\n dist = 1.0 / dist\n inf_mask = np.isinf(dist)\n inf_row = np.any(inf_mask, axis=1)\n dist[inf_row] = inf_mask[inf_row]\n return dist\n\n if callable(weights):\n return weights(dist)", "def _determine_new_weight(self, weight, input, currentNeuron, bmu):\n return weight \\\n + (self.neighborhood.fn(currentNeuron, bmu) \\\n * self.learning_rate * (input - weight))", "def newPoint(x, y, weighting, n):\n currX=x\n currY=y\n\n closest = np.argmax(weighting)\n closest_tuple = np.unravel_index(closest, (n+1, n+1))\n closestX, closestY = closest_tuple\n \n # Set x value\n if closestX == x:\n x = closestX\n elif closestX >= x+1:\n x = x+1\n elif closestX <= x-1:\n x = x-1\n\n # Set y value\n if closestY == y:\n y = closestY\n elif closestY >= y+1:\n y = y+1\n elif closestY <= y-1:\n y = y-1\n\n try:\n if weighting[x,y]==0:\n # top left corner\n if x==currX-1 and y==currY+1:\n if math.sqrt((currX-closestX)**2+(closestY-currY+1)**2)<=math.sqrt((currX-1-closestX)**2+(closestY-currY)**2) and weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n elif weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n else:\n if weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n elif weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n # top middle\n if x==currX and y==currY+1:\n if weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n elif weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n else:\n if weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n elif weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY\n # top right\n if x==currX+1 and y==currY+1:\n if math.sqrt((closestX-currX)**2+(closestY-currY+1)**2)<=math.sqrt((closestX-currX+1)**2+(closestY-currY)**2) and weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n elif weighting[x+1,y]!=0:\n x=currX+1\n y=currY\n else:\n if weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n elif weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n # middle left\n if x==currX-1 and y==currY:\n if weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n elif weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n else:\n if weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n elif weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n # middle RIGHT\n if x==currX+1 and y==currY:\n if weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY-1\n elif weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n else:\n if weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n elif weighting[x,y+1]!=0:\n x=currX\n y=currY+1\n # bottom left corner\n if x==currX-1 and y==currY-1:\n if math.sqrt((currX+1-closestX)**2+(currY-closestY)**2)<=math.sqrt((currX-closestX)**2+(currY-1-closestY)**2) and weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n elif weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n else:\n if weighting[x-1,y+1]!=0:\n x=currX-1\n y=currY+1\n elif weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY-1\n # bottom middle\n if x==currX and y==currY-1:\n if weighting[x+1,y-1]!=0:\n x=currX+1\n y=currY-1\n elif weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n else:\n if weighting[x-1,y]!=0:\n x=currX-1\n y=currY\n elif weighting[x+1,y]!=0:\n x=currX+1\n y=currY\n # bottom right\n if x==currX+1 and y==currY-1:\n if math.sqrt((closestX-currX)**2+(currY-1-closestY)**2)<=math.sqrt((closestX-currX+1)**2+(currY-closestY)**2) and weighting[x,y-1]!=0:\n x=currX\n y=currY-1\n elif weighting[x+1,y]!=0:\n x=currX+1\n y=currY\n else:\n if weighting[x-1,y-1]!=0:\n x=currX-1\n y=currY-1\n elif weighting[x+1,y+1]!=0:\n x=currX+1\n y=currY+1\n except:\n x=x\n y=y\n \n return x,y", "def _find_min_weight_edge(self, weight, visited):\n minimum = INF\n min_index = None\n for v in range(self.vertices):\n if weight[v] < minimum and not visited[v]:\n minimum = weight[v]\n min_index = v\n return min_index", "def moveto (solution, weight_cost, max_weight):\r\n \r\n moves =[]\r\n for idx, _ in enumerate (weight_cost): # idx will have index of weight_cost and _ will have exact values\r\n if idx not in solution:\r\n move = solution[:]\r\n move.append(idx)\r\n \r\n if get_cost_and_weight_of_knapsack(move,weight_cost)[1] <= max_weight:\r\n moves.append(move)\r\n \r\n for idx, _ in enumerate (solution):\r\n move = solution [:]\r\n del move [idx]\r\n if move not in moves:\r\n moves.append(move)\r\n return moves", "def FindTightFit(box_list,w):\n mini = w\n x = None\n for i in range(0,len(box_list)):\n curr = box_list[i].max_cap - box_list[i].curr_cap\n if curr >= w:\n x = box_list[i]\n rem = curr - w\n if curr > mini:\n mini = rem\n x = box_list[i]\n return x", "def get_dwi(data, weight=\"400\"):\n wt_dict = {'10':3, '100':4, '400':5, '800':6, '2000':7}\n di = wt_dict[weight]\n return [patient[di] for i, patient in enumerate(data) if i in good_patients]", "def suggest(suggestions):\n weight_sum = sum(suggestions.values())\n prob_ranges = []\n lower_bound = 0.0\n\n # generate probability ranges\n for task, weight in suggestions.iteritems():\n upper_bound = lower_bound + weight / weight_sum\n prob_ranges.append((task, (lower_bound, upper_bound)))\n\n # update lower bound\n lower_bound = upper_bound\n\n rand_number = random.random()\n\n for task, (low, high) in prob_ranges:\n if low <= rand_number < high:\n return task\n\n raise AssertionError('Should not be here. O_O');" ]
[ "0.7426046", "0.6773496", "0.6738176", "0.6716903", "0.67166317", "0.6681879", "0.6489452", "0.647155", "0.6244381", "0.6119967", "0.6119967", "0.6119967", "0.6103172", "0.5987175", "0.59611946", "0.5952065", "0.5946552", "0.594247", "0.59135944", "0.5880885", "0.5846692", "0.5805101", "0.5796779", "0.57954824", "0.57887274", "0.57846737", "0.5746038", "0.5729218", "0.5725849", "0.57252014", "0.56517965", "0.56402147", "0.5618314", "0.5594574", "0.5586536", "0.55850446", "0.55826616", "0.5567691", "0.5558268", "0.5519378", "0.5516917", "0.55121326", "0.5493504", "0.5486209", "0.54739225", "0.54706013", "0.5456956", "0.545459", "0.5429502", "0.5428273", "0.54239035", "0.5415697", "0.5406317", "0.5394556", "0.5386439", "0.53793526", "0.53776115", "0.5369279", "0.53630245", "0.53603524", "0.53452957", "0.53451055", "0.5326976", "0.5320668", "0.5319474", "0.5317986", "0.5315654", "0.53155994", "0.5297704", "0.52826566", "0.5280834", "0.5270757", "0.52695423", "0.52686304", "0.52684724", "0.52666557", "0.5257601", "0.52519697", "0.5249862", "0.5243005", "0.5238912", "0.52269375", "0.52156466", "0.5214575", "0.5209873", "0.5207262", "0.5201758", "0.52005845", "0.5198816", "0.51959807", "0.5194992", "0.5193861", "0.51822656", "0.51770335", "0.5158437", "0.5158305", "0.51411223", "0.51379496", "0.5133722", "0.5130794" ]
0.7051767
1
Adjusts top10 list in ascending order, by inserting a new item in appropriate place and adjusting others appropriately
def adjust_top10(value, pos, weight, top10, top10weights): # Create new top10 to be adjusted newtop10 = top10 newtop10weights = top10weights # Keep higher ones, shift lower ones left one newtop10[0:pos] = top10[1:pos + 1] newtop10weights[0:pos] = top10weights[1:pos + 1] # add new ones newtop10[pos] = value newtop10weights[pos] = weight return (newtop10, newtop10weights)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top10(self, top10: List[Word]):\n\n self._top10 = top10", "def move_top ( self ):\n list, index = self.get_info()\n self.value = [ list[index] ] + list[:index] + list[index+1:]", "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)", "def top_n(items, n):\n\n for i in range(n):\n for j in range(len(items)-1-i):\n\n if items[j] > items[j+1]:\n items[j], items[j+1] = items[j+1], items[j]\n \n top_n = items[-n:]\n\n return top_n[::-1]", "def test_sort_more_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n e4 = Experience(rid=1, uid=22, experience=1839)\n e5 = Experience(rid=1, uid=2, experience=20)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.add(e4)\n db.session.add(e5)\n db.session.commit()\n list = top_n_in_order(1, 3)\n self.assertEqual([(22, 1839), (12, 1343), (3, 100)], list)", "def add_top_pairs(dry_run=False, pair_now=False):\n top = ratings.top_n(15)\n new_pairs = []\n for idx, t in enumerate(top[:10]):\n new_pairs += [[t[0], o[0]] for o in top[idx+1:idx+5]]\n\n if dry_run:\n print(new_pairs)\n return\n\n if pair_now:\n maybe_enqueue(new_pairs)\n else:\n _append_pairs(new_pairs)", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def update_highscores(self):\n for i in range(len(self.highscores)):\n if self.score >= self.highscores[i]:\n self.highscores.insert(i, self.score)\n self.highscores.pop()\n break", "def _set_top(self, user_n, item_n):\n self.user_n = user_n\n self.item_n = item_n", "def find_top_unique(self, list_of_entries, top_n):\n\n\n if len(list_of_entries) < top_n:\n self.top_n_too_large_label = Label(self.main_frame,\n fg=\"red\",\n text=\"Max N = %s\" % len(list_of_entries))\n if type(list_of_entries[0]) is AudioEntry:\n self.top_n_too_large_label.grid(row=13, column=4)\n if type(list_of_entries[0]) is VideoEntry:\n self.top_n_too_large_label.grid(row=13, column=5)\n raise Exception(\"N is larger than the total number of words\")\n\n if self.top_n_too_large_label is not None:\n self.top_n_too_large_label.grid_remove()\n\n sorted_by_count = sorted(list_of_entries, key=self.get_count, reverse=True)\n #self.top_n_too_large_label = Label(self.main_frame, fg=\"red\", text=\"Max N = %s\" % len(list_of_entries))\n unique_entries = [[] for i in range(top_n)]\n\n curr_rank = 0\n prev_count = None\n curr_count = None\n\n for entry in sorted_by_count:\n\n if entry.word in self.general_parser.words:\n entry.in_general = True\n else:\n entry.in_general = False\n\n curr_count = entry.count\n\n if prev_count is None:\n if entry.word not in self.specific_month_words:\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = 1\n continue\n\n\n if curr_rank >= top_n:\n break\n\n\n if entry.word not in self.specific_month_words:\n # increment rank if current entry has a different count\n # (the last set of entries having this count are all filled\n # into the unique_entries[])\n if curr_count != prev_count:\n curr_rank = curr_rank + 1\n if curr_rank >= top_n:\n break\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = curr_rank + 1\n continue\n unique_entries[curr_rank].append(entry)\n entry.rank = curr_rank + 1\n\n\n\n return unique_entries[0:curr_rank + 1]", "def pizza_sort(lst):\n length = len(lst)\n def based_god_help_me(lst,index=0):\n if index == length - 1:\n return\n greatest = index_largest(lst[index:]) + index\n lst[greatest], lst[index] = lst[index], lst[greatest]\n based_god_help_me(lst,index+1)\n return based_god_help_me(lst)", "def insertionSort(list):", "def get_top_10(data: List[EmissionPerCapita], current_year: int) -> List[EmissionPerCapita]:\r\n\r\n # Get the first 10 elements in data\r\n top_10_so_far = []\r\n for i in range(10):\r\n top_10_so_far.append(data[i])\r\n\r\n # Get the index for the current year.\r\n index = current_year - data[0].start_year\r\n\r\n # Mutate top_10_so_far to get the highest 10.\r\n for emission in data:\r\n for value in top_10_so_far:\r\n if value.epc_year[index] < emission.epc_year[index] and emission not in top_10_so_far:\r\n list.remove(top_10_so_far, value)\r\n list.append(top_10_so_far, emission)\r\n\r\n return top_10_so_far", "def evaluate_elf(calories):\n if calories > top_three[2]:\n top_three.append(calories)\n elif calories > top_three[1]:\n top_three.popleft()\n top_three.insert(1, calories)\n elif calories > top_three[0]:\n top_three[0] = calories", "def update_order():", "def update_order():", "def pizza_sort(lst):\n def help_func(lst, i):\n if i == len(lst) - 1:\n return lst\n else:\n tem1 = lst[i]\n tem2 = index_largest(lst[i:]) + i\n lst[i] = lst[index_largest(lst[i:]) + i]\n lst[tem2] = tem1\n return help_func(lst, i+1)\n return help_func\n help_func(lst, i=0)", "def put_sorted_cards(result, cards, weight):\n result.append((cards2str(sort_cards(cards)), weight))", "def cocktail_sort(num_list):\n\n # Setting variables\n start_index = 0\n end_index = len(num_list) - 1\n swapped = True\n\n while swapped:\n\n # Pass moves up\n swapped = False\n for i in range(start_index, end_index, 1):\n # Exchanges items\n if num_list[i] > num_list[i + 1]:\n temp = num_list[i]\n num_list[i] = num_list[i + 1]\n num_list[i + 1] = temp\n swapped = True\n end_index -= 1\n\n # Pass moves down\n swapped = False\n for i in range(end_index, start_index, -1):\n # Exchanges items\n if num_list[i] < num_list[i - 1]:\n temp = num_list[i]\n num_list[i] = num_list[i - 1]\n num_list[i - 1] = temp\n swapped = True\n start_index += 1", "def top10(self) -> List[Word]:\n return self._top10", "def get_item_based_topk(self, items, top_k=10, sort_top_k=False):\n\n # convert item ids to indices\n item_ids = items[self.col_item].map(self.item2index)\n\n # if no ratings were provided assume they are all 1\n if self.col_rating in items.columns:\n ratings = items[self.col_rating]\n else:\n ratings = pd.Series(np.ones_like(item_ids))\n\n # create local map of user ids\n if self.col_user in items.columns:\n test_users = items[self.col_user]\n user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())}\n user_ids = test_users.map(user2index)\n else:\n # if no user column exists assume all entries are for a single user\n test_users = pd.Series(np.zeros_like(item_ids))\n user_ids = test_users\n n_users = user_ids.drop_duplicates().shape[0]\n\n # generate pseudo user affinity using seed items\n pseudo_affinity = sparse.coo_matrix(\n (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items)\n ).tocsr()\n\n # calculate raw scores with a matrix multiplication\n test_scores = pseudo_affinity.dot(self.item_similarity)\n\n # remove items in the seed set so recommended items are novel\n test_scores[user_ids, item_ids] = -np.inf\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test_users.drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()", "def InsertSort(num_list):\n for i in range(1,len(num_list)):\n for j in range (i,0,-1):\n if num_list[j]<num_list[j-1]:\n num_list[j],num_list[j-1] = num_list[j-1],num_list[j]\n return num_list", "def sort(self): # sort all entries to make room for new ones, determine best and worst\n ns = self.num_stored.value\n ys = np.asarray(self.ys[:ns])\n yi = ys.argsort()\n sortRuns = []\n for i in range(len(yi)):\n y = ys[yi[i]]\n xs = self.get_x(yi[i])\n sortRuns.append((y, xs))\n numStored = min(len(sortRuns),int(0.9*self.capacity)) # keep 90% best \n for i in range(numStored):\n self.replace(i, sortRuns[i][0], sortRuns[i][1])\n self.num_sorted.value = numStored \n self.num_stored.value = numStored \n return numStored", "def perc_up(self, i):\r\n while i // 2 > 0:\r\n if self.items[i] > self.items[i // 2]:\r\n tmp = self.items[i // 2]\r\n self.items[i // 2] = self.items[i]\r\n self.items[i] = tmp\r\n i = i // 2", "def insert_top_to_bottom(deck: List[int]) -> None:\n last = deck[-1]\n\n if last == max(deck):\n last = last - 1\n first_part = deck[:last]\n second_part = deck[last: -1]\n del deck[:]\n deck.extend(second_part)\n deck.extend(first_part)\n deck.append(last + 1)\n\n else:\n first_part = deck[:last]\n second_part = deck[last: - 1]\n del deck[:]\n deck.extend(second_part)\n deck.extend(first_part)\n deck.append(last)", "def bring_to_front(self,itmkey):\n itms = self.get_items_list()\n if itmkey in itms:\n itm = itms[itmkey]\n z = itm['z']\n for k,it in itms.items():\n if it['z'] > z:\n it['z'] -= 1\n itm['z'] = len(itms)\n self.put_items_list(itms)\n return {'k':itmkey,'z':itm['z']}\n return None", "def recommend_k_items(self, test, top_k=10, sort_top_k=False, remove_seen=False):\n\n test_scores = self.score(test, remove_seen=remove_seen)\n\n top_items, top_scores = get_top_k_scored_items(scores=test_scores, top_k=top_k, sort_top_k=sort_top_k)\n\n df = pd.DataFrame(\n {\n self.col_user: np.repeat(test[self.col_user].drop_duplicates().values, top_items.shape[1]),\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )\n\n # drop invalid items\n return df.replace(-np.inf, np.nan).dropna()", "def insert_top_to_bottom(deck):\n \n number = deck[-1]\n if number != get_big_joker_value(deck): \n middle = deck[number:-1]\n deck[:] = middle + deck[:number] + [number]", "def insert_top_to_bottom(deck_of_cards):\n big_joker_value = get_big_joker_value(deck_of_cards)\n small_joker_value = get_small_joker_value(deck_of_cards)\n last_card = deck_of_cards[len(deck_of_cards) - 1]\n if not last_card == big_joker_value:\n top_few_cards = deck_of_cards[: last_card]\n deck_of_cards.extend(top_few_cards)\n\t# top_few_cards represents portion of deck to last_card index. This is \n\t# added to the bottom of the deck.\n deck_of_cards[: last_card] = []\n\t# Removes the duplicates\n deck_of_cards.remove(deck_of_cards[-(last_card + 1)])\n deck_of_cards.append(last_card)\n # This then added the last card to the bottom of the deck.", "def shifter(list):\n #sc1 = \"objects \" #Scaffolding message variables. Temporary\n #sc2 = \" and \"\n #sc3 = \" switched\"\n #sc4 = \" in order\"\n n = len(list) #Assign length of list to variable n\n x = 0 #Start at first position in list\n while listscan(list):\n if list[x] > list[x + 1]:\n t1= list[x] #Assign both items to a variable, then reinsert in opposite positions\n t2 = list[x + 1]\n list[x + 1] = t1\n list[x] = t2\n #print(sc1 + str(x) + sc2 + str(x + 1) + sc3)\n if x + 1 < n - 1: #Only when not at end\n x = x + 1 #Move position one more right\n else: #Base case when unsorted\n x = 0 #Restart Cycle\n else: #If sorted, and more room to right, move over one, leave items in position.\n if x + 1 < n - 1:\n #print(sc1 + str(x) + sc2 + str(x + 1) + sc4)\n x = x + 1\n else: #Base case. If at end of list, and items in order, leave.\n print(sc1 + str(x) + sc2 + str(x + 1) + sc4)\n x = 0 #Restart cycle", "def top_10():\n print(\"\\n\")\n usuarios_top = []\n with open(\"Basedatos.txt\", \"r\") as bd:\n datos = bd.readlines()\n for x in datos:\n y = x[:-1].split(\",\")\n usuarios_top.append(y)\n usuarios_top.sort(key=lambda usuarios_top: int(usuarios_top[4]), reverse=True) #ordena la lista de los usuarios por el puntaje de mayor a menor\n print(Fore.LIGHTRED_EX, \" \"*70, \"TOP 10\", Fore.RESET)\n usuarios = []\n for x in usuarios_top: #se almacenaran solo los 10 primeros usuarios en otra lista y se mostraran\n usuarios.append(x)\n if len(usuarios) <= 10:\n print(Fore.LIGHTMAGENTA_EX + \" \"*58, x[0], \"-\"*5 + \">\", x[4] +\"pts\", \"--\" + x[5], \"disparos\" + Fore.RESET)\n else: break\n print(\"\\n\")", "def _set_top_preps(self) -> None :\n prep_dict = self._system.getPReps(1, 20)\n prep_address_list = prep_dict['preps']\n for each_prep in prep_address_list:\n self._top_preps.put(each_prep['address'])", "def merge(items1, items2):\n # TODO: Repeat until one list is empty\n # TODO: Find minimum item in both lists and append it to new list\n # TODO: Append remaining items in non-empty list to new list\n sorted_list = []\n while len(items1) > 0 and len(items2) > 0:\n if items1[0] > items2[0]:\n sorted_list.append(items2.pop(0))\n else:\n sorted_list.append(items1.pop(0))\n sorted_list.extend(items1)\n del items1\n sorted_list.extend(items2)\n del items2\n return sorted_list\n\n # front = 0\n # back = (len(items1) - 1)\n # while len(items2) > 0:\n # value = items2.pop()\n # while front <= back:\n # pivot = ((front + back) // 2)\n # # if p f and b all equal the same index\n # if front == back:\n # # if the value is greater append at the back\n # if value > items1[back]:\n # items1.insert(back + 1, value)\n # break\n # # if the value is less than insert at index 0\n # if items1[back] < value:\n # items1.insert(0, value)\n # break\n # # if the value is equal to the value insert at index 0\n # # if f, p, and b are greater than the value\n # if items1[front] > value:\n # # insert the value before f and p\n # items1.insert(front, value)\n # break\n # # if b, p, and f are less than the value\n # if items1[back] < value:\n # # insert the value after b and p\n # items1.insert(back + 1, value)\n # break\n # if items1[pivot] > value:\n # back = pivot - 1\n # elif items1[pivot] < value:\n # front = pivot + 1\n # elif items1[pivot] == value:\n # items1.insert(pivot + 1, value)\n # break\n # front = 0\n # back = (len(items1) - 1)\n # return items1", "def cut_by_top(self, top_k=30000):\n if len(self.word2index) <= top_k:\n print(\"Word number (%s) is smaller Top K (%s)\" % (len(self.word2index), top_k))\n return\n\n word_count = list()\n for word, count in iteritems(self.word_count):\n word_count.append((count, word))\n word_count.sort(reverse=True)\n\n self.clear_dictionary(keep_special=True)\n\n added_top_num = 0\n for count, word in word_count:\n if added_top_num >= top_k:\n break\n if word not in self.special:\n self.add(key=word, count=count)\n added_top_num += 1\n\n print(\"After cut, Dictionary Size is %d\" % len(self))", "def prioritize_candidates(lst_cand):\n print(f\"\\nprioritize_candidates(); len = {len(lst_cand)}\")\n if len(lst_cand) > 1:\n for n in range(len(lst_cand)):\n nc = list(lst_cand[n])\n nc.insert(0,0)\n lst_cand[n] = nc\n for cand in lst_cand:\n # some text adds p\n if cand[1].find(\"Okay\") > -1:\n cand[0] += 10\n if cand[1].lower().find(\"serie\") > -1:\n cand[0] += 10\n if cand[1].find(\"__NAM\") > -1:\n cand[0] += 10\n if cand[1].find(\"BIX_\") > -1:\n cand[0] += 10\n if cand[1].find(\"REF_\") > -1:\n cand[0] += 10\n if cand[1].find(\"veracrypt1\") > -1:\n cand[0] += 100\n if cand[1].find(\"veracrypt2\") > -1:\n cand[0] += -10\n # some text cost p\n if any([cand[1].find(f\"-{n}\") > -1 for n in range(9)]):\n cand[0] -= 5\n if cand[1].find(\"DEL\") > -1:\n cand[0] -= 100\n if cand[1].find(\"copy\") > -1:\n cand[0] -= 50\n if cand[1].find(\"output\") > -1:\n cand[0] -= 6\n if cand[1].find(\".part\") > -1:\n cand[0] -= 9\n # deeper path adds p\n cand[0] += cand[1].count(os.sep)\n # If still even, older is better\n lst_top = [cand for cand in sorted(lst_cand, reverse=True)]\n if lst_top[0][0] == lst_top[1][0]: # No winner\n if lst_top[0][2] < lst_top[1][2]: # head is oldest\n lst_top[0][0] += 1\n else:\n lst_top[1][0] += 1\n return lst_top\n else: # Too few to prioritize\n return lst_cand # return unchanged", "def update_item_orders(begin_order, t_task, projects, api, cmd_count):\n for task in t_tasks.values():\n if is_in_the_same_proj(task, projects) and task['item_order'] >= begin_order:\n api.items.get_by_id(task['id']).update(item_order=task['item_order']+1)\n update_cmd_count(api)", "def getTopTen():\n\n if moviesRanked > 10:\n return moviesRanked[0:10]\n else: \n return moviesRanked", "def pagerank(dict_prefs, nitems, eps_search=20):\n prefs_mat=np.zeros((nitems,nitems))\n for k,v in dict_prefs.items():\n if v==0:\n continue\n elif v>0:\n prefs_mat[k[1],k[0]]+=v\n else:\n prefs_mat[k[0],k[1]]-=v\n prefs_mat_orig=prefs_mat.copy()\n eps_grid=list(.5**np.logspace(0,1,eps_search))\n best=-10^5\n best_order=None\n \n for eps in eps_grid:\n prefs_mat=prefs_mat_orig.copy()\n for i in range(nitems):\n prefs_mat[:,i]+=eps\n tot=np.sum(prefs_mat[:,i])\n prefs_mat[:,i]=prefs_mat[:,i]/tot\n\n \n pr=np.ones((nitems,1))/nitems\n for i in range(30):\n pr=prefs_mat.dot(pr)\n lst_pagerank=list(np.argsort(pr.reshape(-1)))\n score_this_order=eval_ordering(lst_pagerank,dict_prefs)\n if score_this_order>best:\n best=score_this_order\n best_order=deepcopy(lst_pagerank)\n return best_order", "def print_scores(ranked_list, links, topN=10):\n n = topN\n for url, score in ranked_list.items():\n try:\n _ = links.index(url)\n print(\"Score: %f \\t URL: %s\" %(score, url))\n\n n -= 1\n\n if n <= 0:\n break\n except:\n pass", "def push(self, element, value):\n insert_pos = 0\n for index, el in enumerate(self.tops):\n if not self.find_min and el[1] >= value:\n insert_pos = index + 1\n elif self.find_min and el[1] <= value:\n insert_pos = index + 1\n self.tops.insert(insert_pos, [element, value])\n self.tops = self.tops[: self.n]", "def insertion_sort(items, key):\n # if order == \"reverse\":\n # compare = operator.lt\n # elif order == \"normal\":\n # compare = operator.gt\n global COMPARE\n\n # Repeat until all items are in sorted order\n for index in range(len(items)):\n iterator = index\n\n # Take first unsorted item\n while COMPARE(key(items[iterator-1]), key(items[index])) and iterator > 0:\n iterator -= 1\n # Insert it in sorted order in front of items\n sorteditem = items.pop(index)\n items.insert(iterator, sorteditem)\n\n return items", "def update_top_categories(self):\n cats = self.sorted_categories()\n display = \"\"\n for cat in cats:\n hit = \"hits\" if self.category_map[cat] > 1 else \"hit\"\n display += f\"{cat}, {self.category_map[cat]} {hit}\\n\"\n self.top_categories.configure(state=\"normal\")\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state=\"disabled\")", "def reorder_list(items, arg=''):\n\n if arg:\n\n for i in items:\n if i == arg:\n items.remove(i)\n items.insert(0, arg)\n\n return items", "def insert_sort(a):\n size = len(a)\n # find key\n for key in range(1, size):\n key_to_insert = a[key]\n # insert key in sorted sequence\n for margin in reversed(range(0, key)):\n if key_to_insert < a[margin]:\n a[margin + 1] = key_to_insert\n break\n else:\n # SHIFT THE MARGIN\n a[margin + 1] = a[margin]\n else:\n a[0] = key_to_insert\n return a", "def Insert(self, val, extra=None):\n if self._size >= 0:\n if val > self.best[0]:\n idx = bisect.bisect(self.best, val)\n # insert the new element\n if idx == self._size:\n self.best.append(val)\n self.extras.append(extra)\n else:\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)\n # and pop off the head\n self.best.pop(0)\n self.extras.pop(0)\n else:\n idx = bisect.bisect(self.best, val)\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)", "def insertion_sort(items):\n # Repeat until all items are in sorted order\n # Take first unsorted item\n # Insert it in sorted order in front of items\n sorted_index = 1\n while not is_sorted(items):\n num = items.pop(sorted_index)\n \n back_index = sorted_index - 1\n for back_num in items[sorted_index-1::-1]:\n if num > back_num:\n items.insert(back_index + 1, num)\n break\n\n back_index -= 1\n else:\n items.insert(0, num)\n \n sorted_index += 1", "def insertion_sort(new_list):\n for _i in range(1, len(new_list)): \n _point = new_list[_i]\n _j = _i-1\n while _j >=0 and _point < new_list[_j] : \n new_list[_j+1] = new_list[_j] \n _j -= 1\n new_list[_j+1] = _point", "def gap_insertion_sort(num_list, start, gap):\n\n # Creates sublists for the sublist gap\n for i in range(start + gap, len(num_list), gap):\n\n # New item to be inserted into the sublist gap\n current_value = num_list[i]\n position = i\n\n while position >= gap and num_list[position - gap] > current_value:\n # Shift item to current position\n num_list[position] = num_list[position - gap]\n position -= gap\n\n # Sets new position to current value\n num_list[position] = current_value", "def listOrdering(self):\r\n index = 0\r\n while( index < len(self.sortedList)-1):\r\n if(self.sortedList[index][2] > self.sortedList[index+1][2]): # positions in wrong order\r\n self.sortedList[index], self.sortedList[index+1] = self.sortedList[index+1], self.sortedList[index] # switch\r\n if(self.sortedList[index][2] == self.sortedList[index+1][2]): # Position conflict\r\n if(self.sortedList[index][1] <= self.sortedList[index+1][1]): # Already ordered by id\r\n self.sortedList[index+1][2] += 1 # position altered for second rule\r\n else:\r\n self.sortedList[index][2] += 1\r\n self.sortedList[index], self.sortedList[index+1] = self.sortedList[index+1], self.sortedList[index] # switch\r\n index += 1", "def insertion_sort(my_list):\n\n # Start at the second element (pos 1).\n # Use this element to insert into the\n # list.\n for key_pos in range(1, len(my_list)): # n\n\n # Get the value of the element to insert\n key_value = my_list[key_pos]\n\n # Scan from right to the left (start of list)\n scan_pos = key_pos - 1\n\n # Loop each element, moving them up until\n # we reach the position the\n while (scan_pos >= 0) and (my_list[scan_pos] > key_value): # n/4, total of n squared / 4\n my_list[scan_pos + 1] = my_list[scan_pos]\n scan_pos = scan_pos - 1\n\n # Everything's been moved out of the way, insert\n # the key into the correct location\n my_list[scan_pos + 1] = key_value", "def heap_sort_increase(alist):\r\n heap = MaxHeap()\r\n heap.build_heap(alist)\r\n originalSize = heap.size\r\n for i in range(heap.size):\r\n maxVal = heap.items[1]\r\n heap.del_max()\r\n heap.items[originalSize-i] = maxVal\r\n return heap.items[1:originalSize+1]", "def sort(self):\n # Base Case\n # If the robot has reached the end of the list and his light is off (no swaps have occurred),\n if self.can_move_right() == False and self.light_is_on() == False:\n return\n\n # Grab the first card\n self.swap_item()\n\n # While the robot is still able to move right,\n while self.can_move_right():\n\n # Move right\n self.move_right()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is greater than what he is holding (-1), swap items\n if self.compare_item() == -1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once the robot can no longer move right, he is at the end of the list and holding the largest value\n # Swap items\n self.swap_item()\n\n # Now the robot needs to traverse back to index 0, grabbing the smallest value as he goes\n # Follow the same logic as when he moved right with the largest value\n\n # If he hits a empty slot in the list, everything in front of it has been sorted\n # He doesn't need to sort anymore, he is holding the smallest value left to be sorted. \n # Put it in the blank spot and turn to move back in the other direction\n\n while self.compare_item() is not None:\n\n # Move left\n self.move_left()\n\n # Compare the item in his hand to that in front of him\n # If the item in front of him is less than what he is holding (1), swap items\n if self.compare_item() == 1:\n # Swap the item\n self.swap_item()\n # Turn his light on to indicate that a swap has occured\n self.set_light_on()\n \n # Once self.compare_item() is None, that means he is in front of a blank space\n # - everything to the left of the blank space has already been sorted\n # Deposit what he is holding\n self.swap_item()\n\n # Reset the light to the off position\n self.set_light_off()\n\n # Move one spot over to the right\n self.move_right()\n\n # Re-run the process all over again\n self.sort()", "def _insert_card(self, new_card):\n \"\"\"\n i = 0\n cmp_fun = _compare_lt_sort if self.cmp_reverse else _compare_gt_sort\n newval = new_card.get_value(self.sort_by)\n for card in self.cards[(self.current_page - 1) * self.max_cards:\n self.current_page * self.max_cards]:\n val = card.get_value(self.sort_by)\n if cmp_fun(val, newval):\n self._swap_cards(card, new_card, i)\n return\n i += 1\n self._swap_cards(None, new_card, i)\n \"\"\"\n if not self._should_remove(new_card.id, new_card.get_obj()):\n self.cards.append(new_card)\n self._add_card(new_card)", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def __get_top_with_detail(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productId', keep=\"first\")[\n :top]\n\n return result", "def findRelativeRanks(nums):\n compare_lst = copy.deepcopy(nums)\n compare_lst.sort(reverse=True)\n for i in nums:\n compare_index = compare_lst.index(i)\n nums_index = nums.index(i)\n if compare_index > 2:\n nums[nums_index] = str(compare_index + 1)\n elif compare_index == 0:\n nums[nums_index] = 'Gold Medal'\n elif compare_index == 1:\n nums[nums_index] = 'Silver Medal'\n else:\n nums[nums_index] = 'Bronze Medal'\n return nums", "def quick_sort_growing(list_num: list) -> list:\n\n if len(list_num) < 2: # if length of the list is less than 2\n return list_num # return input list\n else:\n # randomly choose a number from list_num and write it to the variable pivot\n pivot = list_num[random.choice(range(len(list_num)))]\n # create list where all numbers is less than pivot\n less_part = [i for i in list_num[:] if i < pivot]\n # create list where all numbers is greater than pivot\n greater_part = [i for i in list_num[:] if i > pivot]\n # recursive call of the function with less part (greater part) until base case\n return quick_sort_growing(less_part) + [pivot] + quick_sort_growing(greater_part)", "def truncate_ocr_sim_list(token, ocr_sims_list, limit=10):\n if len(ocr_sims_list) <= limit:\n return ocr_sims_list\n\n ocr_scores = set([sc for sim, sc in ocr_sims_list.items()])\n\n # Limit of 10 different scores allowed\n sorted_ocr_scores = sorted(ocr_scores, reverse=True)[:limit]\n ocr_list = []\n for score in sorted_ocr_scores:\n tmp_ocr_list = [ocr_sims for ocr_sims, ocr_score in ocr_sims_list.items() if ocr_score == score]\n\n if len(ocr_list) + len(tmp_ocr_list) > limit:\n list_len = limit - len(ocr_list)\n tmp_list = []\n\n while len(tmp_list) < list_len:\n tmp_list += select_lower_edit_distance(token, tmp_ocr_list)\n\n if len(ocr_list) + len(tmp_list) == limit: # Final list has exactly 10 elements\n ocr_list += tmp_list\n break\n else: # List has more than 10 arguments (need to chose only the n elements needed)\n alpha_tmp_list = []\n\n while len(alpha_tmp_list) != list_len:\n alpha_word = select_best_alphabetical_word(token, tmp_list)\n\n alpha_tmp_list.append(alpha_word)\n tmp_list = [tkn for tkn in tmp_list if tkn != alpha_word]\n\n ocr_list += alpha_tmp_list\n break\n elif len(ocr_list) + len(tmp_ocr_list) == limit:\n ocr_list += tmp_ocr_list\n break\n else: # len(ocr_list) + len(tmp_ocr_list) < limit\n ocr_list += tmp_ocr_list\n\n if len(ocr_list) != limit:\n raise IndexError(\"OCR list is still too big (\"+str(len(ocr_list))+\"/\"+str(limit)+\")\")\n\n return {tkn: ocr_sims_list[tkn] for tkn in ocr_list}", "def top5list(ascending):\n sorted_change, current_month_pred, previous_month_pred = get_change_list()\n sorted_change.sort(reverse=ascending)\n to_send = list()\n for j in range(0, 5):\n name = sorted_change[j][1]\n change_ammount = sorted_change[j][0]\n try:\n base_price = msp_details.query.filter_by(crop=name).first()\n base2021 = base_price.year2021\n except:\n base2021 = msp(name)\n current_month_price = round((current_month_pred[name] * base2021) / 100, 2)\n previous_month_price = round((previous_month_pred[name] * base2021) / 100, 2)\n to_send.append([name, current_month_price, previous_month_price, current_month_pred[name],\n previous_month_pred[name], change_ammount])\n return to_send", "def word_count_sort(word_count_list):\n\n for index in range(1, len(word_count_list)):\n # initialize pointers\n value = word_count_list[index] # starts at the tuple in index 1\n position = index - 1 # initialize to start at 0\n\n # move items to a higher index position while their value is less than the value at the next index\n # compare values in tuple[1] but swap entire tuple\n while position >= 0 and word_count_list[position][1] < value[1]:\n word_count_list[position + 1] = word_count_list[position] # swap the tuple at position into next index\n position -= 1 # decrement to fill lower index and break loop\n\n word_count_list[position + 1] = value # move higher number left one index\n\n return word_count_list", "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def topMoves(board, limit):\r\n spots = set()\r\n top_list = []\r\n top_queue = PriorityQueue()\r\n\r\n # For each piece on the board\r\n # TODO: This should be all I need\r\n\r\n #board.print_board()\r\n #print(board.get_filled_coordinates())\r\n\r\n for n in board.get_filled_coordinates():\r\n\r\n # For each potential connect space within range\r\n for m in attackArea(n, board.connect):\r\n\r\n (x, y) = m\r\n\r\n # If the connect space is on the board, add to list of potential spots\r\n if board.cell_exists(x, y) and m not in board.get_filled_coordinates():\r\n spots.add(m)\r\n\r\n trackingList = []\r\n\r\n # Evaluate potential of each spot, and add to queue\r\n for p in spots:\r\n top_queue.put((evaluatePosition(board, p) * (-1), p))\r\n trackingList.append(str((evaluatePosition(board, p) * (-1), p)))\r\n\r\n for z in range(limit):\r\n top_list.append(top_queue.get())\r\n\r\n #print(\"Queue: \" + str(trackingList))\r\n\r\n for record in top_list:\r\n #print(str(record))\r\n pass\r\n\r\n # return map(lambda (x, y): (-x, y), top_list)\r\n return top_list[0]", "def top_n(values, first_n=10):\n values = iter(values)\n top = [val for val in islice(values, first_n)]\n if len(top) < first_n:\n return top\n heapq.heapify(top)\n for val in values:\n heapq.heappushpop(top, val)\n return top", "def top(self, top):\n self.ptr.top(top)", "def top(self):", "def insertion_sort(a_list):\n \n for index in range(1, len(a_list)):\n value = a_list[index]\n position = binary_search(a_list, 0, index, value)\n\n for subIndex in range(index, position, -1):\n temp = a_list[subIndex]\n a_list[subIndex] = a_list[subIndex - 1]\n a_list[subIndex - 1] = temp", "def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()", "async def top_10_specs(self):\r\n players = await self.get_players()\r\n specs = []\r\n for player in players:\r\n specs.append(player['specId'])\r\n del specs[10:]\r\n await ctx.message.channel.send('Top 10 3v3 Composition:')\r\n for key in self.specs:\r\n if specs.count(int(key)) > 0:\r\n await ctx.message.channel.send('{:s}: {:d}'.format(self.specs[key], specs.count(int(key))))", "def quick_sort(items):\n if len(items) &gt; 1:\n pivot_index = len(items) / 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val &lt; items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items", "def largest_ten(list1):\n result = []\n for i in range(10):\n max1 = list1[0]\n for j in list1:\n if max1 < j:\n max1 = j\n result.append(max1) ##the final target is the max1 when finishing this loop\n list1.remove(max1)\n\n return result", "def move_items(self):\n self.set_fibonnaci_levels()", "def shift_item_up(self, index):\n while index > 0:\n parent_index = index // 2\n if parent_index > 0 and self.heaplist[parent_index] < self.heaplist[index]:\n self.heaplist[parent_index], self.heaplist[index] = self.heaplist[index], self.heaplist[parent_index]\n index = index // 2", "def SortList(self, key: callable = str.lower):\n temp_list = self.Items\n temp_list.sort(key=key)\n # delete contents of present listbox\n self.delete(0, Tags.End.value)\n # load listbox with sorted data\n for item in temp_list:\n self.insert(Tags.End.value, item)", "def sort(artist_list):\n if len(artist_list) > 1: # If the length of the list is greater than 1 run this\n middleIndex = len(artist_list) // 2 # Middle Index is halfway point of the list using int division\n leftList = artist_list[:middleIndex]\n rightList = artist_list[middleIndex:] # left and right list created by splitting list by that middle index\n sort(leftList)\n sort(\n rightList) # recursive call of the left and right list to further break down the list till it gets to\n # length 1 where it will no longer run the if statement\n indexMain = 0\n indexLeft = 0\n indexRight = 0 # creation of indexes for each list\n while indexLeft < len(leftList) and indexRight < len(\n rightList): # Runs through the left and right list at the same time while comparing them until one\n # of the lists reaches the end\n leftString = leftList[indexLeft].lower()\n leftString = leftString.replace(\" \", \"\")\n rightString = rightList[indexRight].lower()\n rightString = rightString.replace(\" \",\n \"\") # Right and Left string created by standarizing each string to all\n # lowercase and no spaces\n if leftString <= rightString:\n artist_list[indexMain] = leftList[indexLeft]\n indexLeft += 1 # If the leftString is alphabetically before rightString i.e. <= it will set the main\n # string at indexMain to the leftList at indexLeft\n else:\n artist_list[indexMain] = rightList[indexRight]\n indexRight += 1 # If the leftString is alphabetically after rightString i.e. <= it will set the main\n # string at indexMain to the rightList at indexRight\n indexMain += 1 # Index of main increased by 1 to go to next element\n while indexLeft < len(leftList):\n artist_list[indexMain] = leftList[indexLeft]\n indexLeft += 1\n indexMain += 1 # All remaining parts of the left list if left is greater than right at that time,\n # run through here to set remaining values of index main to leftList\n while indexRight < len(rightList):\n artist_list[indexMain] = rightList[indexRight]\n indexRight += 1\n indexMain += 1 # All remaining parts of the right list if right is greater than left at that time,\n # run through here to set remaining values of index main to rightList", "def __addToLevel(self, head, value):\n\n #if DEBUG: print('\\t__addToLevel({})'.format(value))\n\n cur = head\n \n if cur.next == None:\n output = self.__insert(cur,value)\n return output\n \n #cur = cur.next\n\n while cur:\n if cur.next == None or \\\n cur.val == value or\\\n cur.next.val > value:\n output = self.__insert(cur,value)\n #output = cur\n break\n cur = cur.next\n return output", "def fill_first_stool(self: 'TOAHModel', number_of_cheeses: int):\n self._number_of_cheeses = number_of_cheeses\n first_stool = self.stool_lst[0]\n for cheese in range(1, number_of_cheeses+1):\n first_stool.append(Cheese(cheese))\n first_stool.sort(key = lambda cheese:cheese.size, reverse=True)\n self.end_game_stool = first_stool.copy()", "def sorting_alg(self, my_list):\n for i in range(len(my_list)):\n for j in range(i+1, len(my_list)):\n if my_list[i] > my_list[j]:\n my_list[i], my_list[j] = my_list[j], my_list[i]\n #print(my_list)\n #sleep(1)\n return my_list", "def _reset_top_preps(self) -> None:\n if self._system.getIISSInfo()[\"nextPRepTerm\"] > self._block_height_week.get() + (7 * 43200):\n self._block_height_week.set(self._system.getIISSInfo()[\"nextPRepTerm\"])\n for i in range(len(self._top_preps)):\n self._top_preps.pop()\n self._set_top_preps()", "def counting_sort(mylist, position):\n length = len(mylist)\n final = [0] * length # Final sorted list\n temp = [0] * 10 # Current sorted list\n\n for i in range(length): # Add last digit of element to temp\n index = mylist[i]\n temp[index % 10] += 1\n\n for i in range(1, 10): # Shift over elements\n temp[i] += temp[i - 1]\n\n i = length - 1 # Index to last element of list\n \n while i >= 0: # Traverse from right to left\n index = mylist[i]\n final[temp[index % 10] - 1] = mylist[i] # Add elements to final list by sorted position order\n temp[index % 10] -= 1\n i -= 1\n\n for i in range(length): # Save final list back to mylist\n mylist[i] = final[i]", "def insertion_sort(items):\n for i in range(1, len(items)):\n j = i\n while j > 0 and items[j] < items[j-1]:\n items[j], items[j-1] = items[j-1], items[j]\n j -= 1", "def dataPrep(mydata: list) -> list:\n mylist = [int(elm) for elm in mydata]\n\n volt = int(max(mylist)) + 3\n start = 0\n\n mylist.extend([volt, start])\n mylist.sort()\n\n return mylist", "def set_order(main_lst, lst_to_set):\r\n rating = []\r\n for film_num in range(len(main_lst)):\r\n if main_lst[film_num] == 0 or lst_to_set[film_num] == 0:\r\n main_lst.pop(film_num)\r\n lst_to_set.pop(film_num)\r\n rating.append(lst_to_set[main_lst.index(film_num + 1)])\r\n return rating", "def ExpandTopInto(src_queue, trg_queue, cached_states, min_bound=1.0):\n _, best_state = src_queue[0]\n # Produce more candidate items.\n new_states = best_state.ProduceNewStates()\n for new_state in new_states:\n if new_state.state_id not in cached_states:\n score = new_state.score * min_bound\n heapq.heappush(trg_queue, (score, new_state))\n cached_states.add(new_state.state_id)", "def top_students(mongo_collection):\n all_items = mongo_collection.find({})\n for item in all_items:\n count = 0\n new_topics = item\n for sta in item.get(\"topics\"):\n count += sta.get(\"score\")\n averageScore = count/len(item.get(\"topics\"))\n\n myquery = {\"name\": item.get(\"name\")}\n newvalues = {\"$set\": {\"averageScore\": averageScore}}\n mongo_collection.update_many(myquery, newvalues)\n\n order = mongo_collection.find().sort(\"averageScore\", DESCENDING)\n\n return order", "def percolate_up(self, i):\n while i // 2 > 0:\n if self.heap_list[i] > self.heap_list[i // 2]:\n tmp = self.heap_list[i // 2]\n self.heap_list[i // 2] = self.heap_list[i]\n self.heap_list[i] = tmp\n i = i // 2", "def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]", "def thickenXYList( list, tester, biSectionMax=6, interpolation=xDataEnumsModule.Interpolation.linlin):\n\n def thickenXYList2( interpolation, xl, yl, xu, yu, newList, tester, level ) :\n\n if( level == biSectionMax ) : return\n level += 1\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.loglin:\n xMid = 0.5 * ( xl + xu )\n else :\n xMid = math.sqrt( xl * xu );\n\n if interpolation == xDataEnumsModule.Interpolation.linlin or interpolation == xDataEnumsModule.Interpolation.linlog:\n yMid = 0.5 * ( yl + yu )\n else :\n yMid = math.sqrt( yl * yu )\n\n y = tester.evaluateAtX( xMid )\n\n dy = abs( y - yMid )\n if( ( dy > abs( y * tester.relativeTolerance ) ) and ( dy > tester.absoluteTolerance ) ) :\n newList.append( [ xMid, y ] )\n thickenXYList2( interpolation, xl, yl, xMid, y, newList, tester, level )\n thickenXYList2( interpolation, xMid, y, xu, yu, newList, tester, level )\n\n if( len( list ) < 2 ) : raise Exception( \"len( list ) = %d < 2\" % len( list ) )\n newList = []\n for i1, xy in enumerate( list ) :\n x2, y2 = xy\n if( i1 > 0 ) : thickenXYList2( interpolation, x1, y1, x2, y2, newList, tester, 0 )\n newList.append( [ x2, y2 ] )\n x1, y1 = x2, y2\n newList.sort( )\n return( newList )", "def merge_sorth_in_place(num_list, start_index, end_index):\n pass", "def personal_top_three(scores: list) -> list:\n scores_inverted = [~score for score in scores]\n heapify(scores_inverted)\n return [~heappop(scores_inverted) for _ in range(min(len(scores), 3))]", "def top10_likelihoods(likelihoods, vocab, classes):\r\n resultDict = {}\r\n for cls in classes:\r\n results = []\r\n for word in vocab:\r\n results.append((word, likelihoods[cls][word]))\r\n resultDict[cls] = results\r\n # Sort and return top 10 for each class\r\n for key in resultDict:\r\n results = resultDict[key]\r\n resultDict[key] = map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]\r\n return resultDict", "def insertionsort(A:list) -> \"void\":\n\tfor j in range(1, len(A)):\n\n\t\tkey = A[j]\n\t\ti = j - 1\n\n\t\twhile i >= 0 and A[i] > key:\n\t\t\tA[i+1] = A[i]\n\t\t\ti = i - 1\n\n\t\tA[i+1] = key", "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n for i in range(len(nums) // 2):\n nums.insert(i*2+1, nums.pop())", "def insertion_sort(arr: List) -> None:\n for i in range(len(arr) - 1):\n if arr[i] > arr[i + 1]:\n for k in range(i, -1, -1):\n if arr[i+1] >= arr[k]:\n arr.insert(k + 1, arr.pop(i+1))\n break\n else:\n arr.insert(0, arr.pop(i + 1))", "def insertionsortIndex(A:list, p:int, r:int) -> \"void\":\n\tfor i in range(p + 1, r + 1):\n\t\t\n\t\tkey = A[i]\n\t\tj = i - 1\n\n\t\twhile j >= p and key < A[j]:\n\t\t\tA[j+1] = A[j]\n\t\t\tj = j - 1\n\t\tA[j+1] = key", "def bottom_up_merge_sort(items):\n subarray_size = 1\n\n while subarray_size < len(items)//2:\n ## Continue making passes through items until the subarray size is\n ## the size of items, since this means items is finally sorted.\n for i in range(0, len(items), subarray_size):\n merge(items, i, i+subarray_size*2, subarray_size)\n subarray_size *= 2", "def calc_scores(unscored_list, master_list):\n num_scores = len(unscored_list)\n best_score = master_list[0]['score']\n worst_score = master_list[num_scores]['score']\n score_interval = (best_score - worst_score) / num_scores\n score = best_score\n\n for item in unscored_list:\n item['score'] = score\n score -= score_interval\n return unscored_list", "def update(self, item, outcome, thresh=.9):\n if outcome > thresh:\n new_box = self.recent_box + 1\n if new_box >= len(self.boxes):\n self.cards.remove(item)\n else:\n self.boxes[new_box].appendleft(item)\n else:\n new_box = max(self.recent_box - 1, 0) \n self.boxes[new_box].appendleft(item)", "def top_items(self, n=10, filter=None):\n if n > len(self): n = len(self)\n order = np.argsort(self)\n if filter is None:\n indices = order[-1:-n-1:-1]\n return [(self.label(idx), self[idx]) for idx in indices]\n idx = -1\n results = []\n while len(results) != n and idx >= -len(order):\n where = order[idx]\n label = self.label(where)\n if filter(label):\n results.append((label, self[where]))\n idx -= 1\n return results" ]
[ "0.69200873", "0.61177766", "0.6019761", "0.5785143", "0.574743", "0.56948155", "0.5632971", "0.5610252", "0.5595934", "0.557611", "0.5559975", "0.5536772", "0.5520824", "0.5512247", "0.5477111", "0.54510283", "0.54510283", "0.54265624", "0.5416262", "0.5411289", "0.54062337", "0.5388858", "0.53738225", "0.5361916", "0.53570336", "0.533932", "0.5334352", "0.5325912", "0.5322592", "0.5313717", "0.52620184", "0.52507734", "0.5247309", "0.52360064", "0.5235516", "0.52108943", "0.520006", "0.51998025", "0.5192338", "0.51876926", "0.51842654", "0.51791805", "0.51702034", "0.5169582", "0.51592666", "0.5154913", "0.5149752", "0.5145385", "0.5143394", "0.51425445", "0.51367944", "0.5131683", "0.51092094", "0.5108741", "0.5099477", "0.509477", "0.5092832", "0.5081785", "0.5080127", "0.50798464", "0.50785977", "0.50708807", "0.50672626", "0.50618815", "0.5048923", "0.5042354", "0.5039984", "0.5036991", "0.50366366", "0.50352067", "0.50150865", "0.50100905", "0.49978396", "0.49884835", "0.49838588", "0.49804533", "0.49772748", "0.49765143", "0.49692416", "0.49614254", "0.49600306", "0.4959983", "0.4958961", "0.4957353", "0.49533504", "0.49516657", "0.4947018", "0.49441397", "0.49423042", "0.4941401", "0.49410748", "0.49400625", "0.49377826", "0.49343216", "0.49257624", "0.4925453", "0.492476", "0.49175343", "0.4915676", "0.49049968" ]
0.72702825
0
Calculates the next state of a given 'board' following the classic rules of Conway's Game Of Life
def original(arr): height = np.shape(arr)[0] width = np.shape(arr)[1] result = np.array(arr) for row in range(height): for col in range(width): neighbors = 0 val = result[row][col] for i in range(-1, 2): for j in range(-1, 2): if i == 0 and j == 0: # The cell itself cannot be counted as a neighbor continue if row + i < 0 or col + j < 0 or row + i > height or col + j > width: # Out of bounds continue with suppress(IndexError): if arr[row + i][col + j] == 1: neighbors += 1 if neighbors == 3 and val == 0: # Cell becomes alive result[row][col] = 1 elif neighbors > 3 and val == 1 or neighbors < 2 and val == 1: # Cell dies result[row][col] = 0 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)", "def gameOfLife(self, board: 'List[List[int]]') -> None:\n m, n = len(board), len(board[0])\n\n def calc(i, j):\n neighbors = [\n [i-1, j-1],[i-1,j],[i-1,j+1],\n [i, j-1],[i,j+1],\n [i+1, j-1],[i+1, j],[i+1,j+1]\n ]\n sum = 0\n for r,c in neighbors:\n if 0 <= r < m and 0 <= c < n:\n sum += (board[r][c] & 1)\n return sum\n\n for i in range(m):\n for j in range(n):\n status = calc(i, j)\n if board[i][j] == 1 and (status == 2 or status == 3):\n board[i][j] = 3\n else:\n if status == 3:\n board[i][j] = 2\n for i in range(m):\n for j in range(n):\n board[i][j] >>= 1", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def next_state_of_cell(self, x_cell, y_cell):\n neighbours = self.get_number_neighbours_of_cell(x_cell, y_cell)\n if(self.board_state[x_cell][y_cell] == 1):\n # Any live cell with more than three live neighbours dies, \n # as if by overpopulation.\n if(neighbours > 3):\n return 0\n # Any live cell with fewer than two live neighbours dies,\n # as if by underpopulation.\n elif(neighbours < 2):\n return 0\n # Any live cell with two or three live neighbours lives\n # on to the next generation.\n else:\n return 1\n if(self.board_state[x_cell][y_cell] == 0):\n # Any dead cell with exactly three live neighbours becomes a live cell, \n # as if by reproduction.\n if(neighbours == 3):\n return 1\n else:\n return 0", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def gameOfLife(self, board):\n \n # Neighbours array for 8 neighboring cells of a given cell\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n \n rows = len(board)\n cols = len(board[0])\n \n # Iterate through the board by each cell\n for row in range(rows):\n for col in range(cols):\n \n # For each cell counting number of live neighbors\n live_neighbors = 0\n for neighbor in neighbors:\n \n # row and column of neighboring cell\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n \n # Checking validity of neighboring cell and if it was originally a live cell\n if(r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n \n live_neighbors += 1\n \n # Rule 1 or Rule 3\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n \n board[row][col] = -1 # -1 meaning cell is now dead but was originally live\n \n # Rule 4\n if board[row][col] == 0 and live_neighbors == 3:\n board[row][col] = 2 #2 meaning cell is now live but was originally dead\n # Get final representation for updated board \n for row in range(rows):\n for col in range(cols):\n \n if board[row][col] > 0:\n board[row][col] = 1\n \n else:\n board[row][col] = 0", "def gameOfLife(self, board) -> None:\n rows = len(board)\n cols = len(board[0])\n neighbours = [(-1, 1), (0, 1), (1, 1), (-1, 0), (1, 0), (-1, -1), (0, -1), (1, -1)]\n for row in range(rows):\n for col in range(cols):\n live_neighbour = 0\n for i, j in neighbours:\n new_row = row + i\n new_col = col + j\n if new_row >= 0 and new_row < rows and new_col >= 0 and new_col < cols and \\\n board[new_row][new_col] in [1, -1]:\n live_neighbour += 1\n if (live_neighbour < 2 or live_neighbour > 3) and board[row][col] == 1:\n board[row][col] = -1\n elif live_neighbour == 3 and board[row][col] == 0:\n board[row][col] = 2\n for row in range(rows):\n for col in range(cols):\n if board[row][col] == -1:\n board[row][col] = 0\n elif board[row][col] == 2:\n board[row][col] = 1", "def gameOfLife(self, board):\n n = len(board)\n m = len(board[0])\n DX = [0, 0, 1, -1, 1, 1, -1, -1]\n DY = [1, -1, 0, 0, 1, -1, 1, -1];\n for i in range(n):\n for j in range(m):\n cnt = 0\n for k in range(8):\n x = i + DX[k]\n y = j + DY[k]\n if x < 0 or x >= n or y < 0 or y >= m:\n continue\n cnt += board[x][y] & 1\n if (board[i][j] & 1) > 0:\n if cnt >= 2 and cnt <= 3:\n board[i][j] = 0b11\n elif cnt == 3:\n board[i][j] = 0b10\n for i in range(n):\n for j in range(m):\n board[i][j] >>= 1", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def gameOfLife(self, board: List[List[int]]) -> None:\n # copy matrix\n copy_matrix = [[board[row][col] for col in range(len(board[0]))] for row in range(len(board))]\n \n # 8 possible directions\n directions = [(0,1), (0, -1), (1,0), (-1,0), (-1,-1), (1,1), (1,-1), (-1,1)]\n num_rows = len(board)\n num_cols = len(board[0])\n \n # matrix traversal\n for i in range(0, num_rows):\n for j in range(0, num_cols):\n # for each cell, explore all of its neighboring cells\n num_live_cells = 0\n for direction in directions:\n r = i + direction[0]\n c = j + direction[1]\n # make sure if it is a live cell \n if (r < num_rows and r >=0) and (c < num_cols and c>=0) and (copy_matrix[r][c]==1):\n # if it is live cell, increment live_cell_count\n num_live_cells +=1\n # if here: We now have estimate of surrounding live cells\n # start applying rules \n # Rule-1: Any live cell with fewer than 2 live neighbors die\n # Rule-2: Any live cell with 2/3 live neighbors live up\n # Rule-3: Any Live cell with > 3 live neighbors die\n # Rule-4: Any dead cell with ==3 live neighbors becomes alive\n if copy_matrix[i][j] == 1 and (num_live_cells > 3 or num_live_cells < 2):\n # Rule-1 and Rule-3: So the current cell dies...\n board[i][j] = 0\n if copy_matrix[i][j] == 0 and num_live_cells == 3:\n # Rule-4: Dead becomes alive\n board[i][j] = 1\n # Rule-2 is taken care by default.", "def next(self):\n self.current_state = self.next_state\n self.next_state = self.clear_screen() # set values to 0\n for x in range(1, 101):\n for y in range(1, 101):\n # calculate the number of alive neighbours at given coordinates\n self.neighbours_alive = self.check_neighbours_alive(x, y)\n\n # assign the result value from rule sets\n self.next_state[x][y] = self.rule_sets[self.selected_rule][ # selected rule name\n str(self.current_state[x][y])][ # 0 or 1 (dead or alive)\n self.neighbours_alive] # number between 0 to 8\n return self.next_state", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n mat = [row[:] for row in board] #original copy of the board\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if mat[i+direc[0]][j+direc[1]]==1:\n cnt_live+=1\n if mat[i][j]==1 and cnt_live<2 or mat[i][j]==1 and cnt_live>3:\n board[i][j]=0\n elif mat[i][j]==1 and 2<=cnt_live<=3 or mat[i][j]==0 and cnt_live==3:\n board[i][j]=1", "def gameOfLife(self, board: List[List[int]]) -> None:\n ds = [(-1, -1), (0, -1), (-1, 0), (1, 0), (0, 1), (1, 1), (1, -1), (-1, 1)]\n for i in range(0, len(board)):\n for j in range(0, len(board[i])):\n lnum = 0\n for k in range(0, len(ds)):\n x, y = ds[k]\n if 0 <= i + x < len(board) and 0 <= j + y < len(board[i]):\n s = board[i + x][j + y] & 1\n if s == 1:\n lnum += 1\n if board[i][j] == 1:\n if lnum < 2:\n board[i][j] |= 2\n elif 2 <= lnum <= 3:\n pass\n else:\n board[i][j] |= 2\n else:\n if lnum == 3:\n board[i][j] |= 2\n for i in range(0, len(board)):\n for j in range(0, len(board[i])):\n if board[i][j] > 1:\n board[i][j] = ~(board[i][j] & 1) & 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n def neighbour(i, j):\n total = 0\n for x,y in ((i+1, j), (i-1, j), (i, j+1), (i, j-1), (i-1, j-1), (i-1, j+1), (i+1, j+1), (i+1, j-1)):\n if x >= 0 and y >= 0 and x <= len(board) -1 and y <= len(board[0]) -1 and board[x][y] & 1:\n total += 1\n return total\n \n def rule(value,i, j):\n if value == 1:\n if neighbour(i, j) == 2 or neighbour(i, j) == 3:\n value |= 2\n elif value == 0:\n if neighbour(i, j) == 3:\n value |= 2\n return value\n \n if not len(board):\n return []\n m = len(board)\n n = len(board[0])\n \n for i in range(m):\n for j in range(n): \n board[i][j] = rule(board[i][j], i, j) \n \n for i in range(m):\n for j in range(n): \n board[i][j] = board[i][j] >> 1 \n \n \n \n return board", "def state_generator(self):\n\n kernel = np.array([\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n iteration = 0\n\n while True: # (Game of Life does not end)\n # Run 2D convolution with the given kernel to find out how many neighbors each cell has.\n # Boundary option determines whether to run with hard boundaries on the game board or\n # using a toroid board which wraps circularly. These are the two strategies for handling\n # a finite game board. scipy.signal.convolve2d handles these two modes gracefully, which\n # is why it is used here. There is also a performance gain when using numpy/scipy matrix\n # operations as opposed to iterating element-wise over the whole matrix.\n # See https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.signal.convolve2d.html\n\n # There is a more sophisticated and efficient algorithm for determining next game state\n # (see http://dotat.at/prog/life/life.html) but for clarity and a lack of time, the standard\n # implementation was chosen.\n\n num_neighbors_board = convolve2d(self.board, kernel, mode='same', boundary=self.boundary.value)\n\n # Find empty cells that have three neighbors\n birth_coordinates = np.where(np.logical_and(self.board == 0, num_neighbors_board == 3))\n\n # Find live cells with too few or too many neighbors\n death_coordinates = np.where(\n np.logical_and(\n self.board == 1,\n np.logical_or(num_neighbors_board < 2, num_neighbors_board > 3)\n )\n )\n\n births = np.array(birth_coordinates).transpose().tolist()\n deaths = np.array(death_coordinates).transpose().tolist()\n self.board[birth_coordinates] = 1\n self.board[death_coordinates] = 0\n\n iteration += 1\n yield self.board, births, deaths, iteration", "def gameOfLife(self, board: List[List[int]]) -> None:\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n row = len(board)\n col = len(board[0])\n copyboard = copy.deepcopy(board)\n for i in range(row):\n for j in range(col):\n liven = 0\n for neighbor in neighbors:\n r = i + neighbor[0]\n c = j + neighbor[1]\n if (r>=0 and r<row) and (c>=0 and c<col) and (copyboard[r][c] == 1):\n liven += 1\n if copyboard[i][j]==1 and (liven<2 or liven>3):\n board[i][j] = 0\n if copyboard[i][j]==0 and liven == 3:\n board[i][j] =1", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n n = len(board[0])\n def count(x, y):\n top = y - 1\n down = y + 1\n left = x - 1\n right = x + 1\n if top < 0:\n top = 0\n if down >= m:\n down = m - 1\n if left < 0:\n left = 0\n if right >= n:\n right = n - 1\n _count = 0\n for i in range(top, down + 1):\n for j in range(left, right + 1):\n _count += board[i][j]\n _count -= board[y][x]\n return _count\n\n\n result = [[0 for _ in range(n)] for _ in range(m)]\n for i in range(m):\n for j in range(n):\n neighbours = count(j, i)\n if board[i][j] == 0 and neighbours == 3:\n result[i][j] = 1\n if board[i][j] == 1 and (neighbours == 2 or neighbours == 3):\n result[i][j] = 1\n for i in range(m):\n for j in range(n):\n board[i][j] = result[i][j]", "def getNextState(self, board, player, action):\n b = self._base_board.with_np_pieces(np_pieces=np.copy(board))\n b.add_stone(action, player)\n return b.np_pieces, -player", "def gameOfLife(self, board: List[List[int]]) -> None:\n rows = len(board)\n cols = len(board[0])\n if not rows or not cols:\n return board\n neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\n\n def no_of_live_neighbors(x, y):\n count = 0\n for dx, dy in neighbors:\n if 0 <= x + dx <= rows - 1 and 0 <= y + dy <= cols - 1:\n if abs(board[x + dx][y + dy]) == 1:\n count += 1\n return count\n\n for i in range(rows):\n for j in range(cols):\n live_neighbours = no_of_live_neighbors(i, j)\n if board[i][j] == 0 and live_neighbours == 3:\n board[i][j] = 2\n if board[i][j] == 1 and (live_neighbours < 2 or live_neighbours > 3):\n board[i][j] = -1\n for i in range(rows):\n for j in range(cols):\n if board[i][j] > 0:\n board[i][j] = 1\n else:\n board[i][j] = 0\n\n return board", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, -1, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 5, False, {}\n else:\n return state, -100, True, {}", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n if m==0:\n return board\n n = len(board[0])\n if n==0:\n return board\n def valid(a,b):\n if 0<=a<m and 0<=b<n:\n return True\n directions = [(0,-1),(-1,-1),(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1)]\n for i in range(m):\n for j in range(n):\n #count how many live=1 or dead=0 cells surrounding cell (i,j)\n cnt_live=0\n for direc in directions:\n if valid(i+direc[0],j+direc[1]):\n if board[i+direc[0]][j+direc[1]]==1 or board[i+direc[0]][j+direc[1]]==-1:\n cnt_live+=1\n if (board[i][j]==1 and cnt_live<2) or \\\n (board[i][j]==1 and cnt_live>3):\n board[i][j]=-1\n elif board[i][j]==0 and cnt_live==3:\n board[i][j]=2\n for i in range(m):\n for j in range(n):\n if board[i][j]==-1:\n board[i][j]=0\n elif board[i][j]==2:\n board[i][j]=1", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n\n # print the valid moves on board for current player\n move = board.last_move\n\n # enemy agent's id\n enemy = self.id % 2 + 1\n\n value = self.evaluateRows(board, enemy) + self.evaluateCols(board, enemy) + self.evaluateBackwardDiagonals(board, enemy) + self.evaluateForwardDiagonals(board, enemy)\n return value", "def hill_climbing(problem):\n\n current = State(problem.initial_state)\n print(current.get_value())\n while current.get_value() != 0:\n neighbour = current.generate_neighbour()\n print(neighbour.board)\n print(neighbour.get_value())\n if neighbour.get_value() >= current.get_value():\n return current.board\n current = neighbour", "def life_step(state):\n\t# For every cell each live cell in any of the 8 neighbouring cells contributes 1 to the sum\n\t# Rolling matricies is periodic so this implements periodic boundary conditions\n\tnumberOfNeigbours = sum(np.roll(np.roll(state, i, axis=0), j, axis=1)\n\t\t\t\t\t\t for i in (-1,0,1) for j in (-1,0,1) if (i != 0 or j != 0))\n\n\t# Any live cell with fewer than two live neighbours dies, as if caused by under-population\n\tstate = np.where(numberOfNeigbours < 2, 0, state)\n\t# Any live cell with more than three live neighbours dies, as if by over-population\n\tstate = np.where(numberOfNeigbours > 3, 0, state)\n\t# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.\n\tstate = np.where(numberOfNeigbours == 3, 1, state)\n\n\treturn state", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, 0, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 0, False, {}\n else:\n return state, -100, True, {}", "def gameOfLife(self, board: List[List[int]]) -> None:\n\n neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]\n\n rows = len(board)\n cols = len(board[0])\n\n # 遍历面板每一个格子里的细胞\n for row in range(rows):\n for col in range(cols):\n # 对于每一个细胞统计其八个相邻位置里的活细胞数量\n live_neighbors = 0\n\n for neighbor in neighbors:\n # 相邻位置的坐标\n r = (row + neighbor[0])\n c = (col + neighbor[1])\n # 查看相邻的细胞是否是活细胞\n if (r < rows and r >= 0) and (c < cols and c >= 0) and abs(board[r][c]) == 1:\n live_neighbors += 1\n\n # 过去的活细胞,现在变为死细胞\n if board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):\n # -1 代表这个细胞过去是活的现在死了\n board[row][col] = -1\n # 过去的死细胞,现在变为活细胞\n if board[row][col] == 0 and live_neighbors == 3:\n # 2 代表这个细胞过去是死的现在活了\n board[row][col] = 2\n\n # 遍历 board 刷新更新后的状态\n for row in range(rows):\n for col in range(cols):\n if board[row][col] > 0:\n board[row][col] = 1\n else:\n board[row][col] = 0", "def next_life_generation(board):\r\n\theight = len(board)\r\n\twidth = len(board[0])\r\n\tnextGen = copy(board)\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif row > 0 and row < height-1:\r\n\t\t\t\tif col > 0 and col < width-1:\r\n\t\t\t\t\tcellNeigbors = howManyNeigbors(board,row,col)\r\n\t\t\t\t\tif cellNeigbors < 2:\r\n\t\t\t\t\t\tnextGen[row][col] = 0\r\n\t\t\t\t\telif cellNeigbors > 3:\r\n\t\t\t\t\t\tnextGen[row][col] = 0\r\n\t\t\t\t\telif cellNeigbors == 3 and board[row][col] == 0:\r\n\t\t\t\t\t\tnextGen[row][col] = 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tnextGen[row][col] = board[row][col]\r\n\treturn nextGen", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def get_next_board(Board):\n paddedBoard = pad_board(Board)\n\n dims = paddedBoard.shape\n rows = dims[0] - 2\n cols = dims[1] - 2\n\n nextBoard = np.zeros((rows, cols), dtype=int)\n\n for r in range(1, dims[0] - 1):\n for c in range(1, dims[1] - 1):\n numNeighbours = check_neighbours(r, c, paddedBoard)\n # ... ... ... ... ...\n # ... Game Logic ...\n # ... ... ... ... ...\n\n return nextBoard", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # check \\ diagonal wins\n for i in range(2):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check / diagonal wins\n for i in range(3,5):\n for j in range(2):\n if state[i][j]!= ' ' and state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3]:\n return 1 if state[i][j]==self.my_piece else -1\n # check diamond wins\n for i in range(3):\n for j in range(1,4):\n if state[i+1][j] == ' ' and state[i][j]!= ' ' and state[i][j] == state[i+1][j-1] == state[i+1][j+1] == state[i+2][j]:\n return 1 if state[i][j]==self.my_piece else -1\n\n return 0 # no winner yet", "def next_state(s_curr, action, params):\n P_dist = params['P_dist']\n R = params['R']\n n_rows = params['n_rows']\n n_cols = params['n_cols']\n occ_grid = params['occ_grid']\n\n rnd = np.random.uniform()\n\n s_next = s_curr\n\n # Actions - ['left','right','up','down']\n\n if rnd <= P_dist:\n if action == 0:\n move = 2\n elif action == 1:\n move = 2\n elif action == 2:\n move = 1\n else:\n move = 0\n elif rnd < 2*P_dist:\n if action == 0:\n move = 3\n elif action == 1:\n move = 3\n elif action == 2:\n move = 1\n else:\n move = 1\n else:\n move = action\n\n # Move left\n if move == 0:\n row_next = s_curr[0]\n col_next = s_curr[1] - 1\n if col_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move right\n if move == 1:\n row_next = s_curr[0]\n col_next = s_curr[1] + 1\n if col_next < n_cols and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move up\n if move == 2:\n row_next = s_curr[0] - 1\n col_next = s_curr[1]\n if row_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move down\n if move == 3:\n row_next = s_curr[0] + 1\n col_next = s_curr[1]\n if row_next < n_rows and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n r = R[s_next[0], s_next[1]]\n return s_next, r", "def game_value(self, state):\r\n # check horizontal wins\r\n for row in state:\r\n for i in range(2):\r\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\r\n return 1 if row[i] == self.my_piece else -1\r\n # check col wins\r\n for col in range(5):\r\n for i in range(2):\r\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\r\n return 1 if state[i][col] == self.my_piece else -1\r\n #check diag up wins\r\n for x in range(2):\r\n for y in range(2):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y+1] == state[x+2][y+2] == state[x+3][y+3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check diag down wins\r\n for x in range(2):\r\n for y in range(3, 5):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y-1] == state[x+2][y-2] == state[x+3][y-3]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n #check square box wins \r\n for x in range(4):\r\n for y in range(4):\r\n if state[x][y] != ' ' and state[x][y] == state[x+1][y] == state[x][y+1] == state[x+1][y+1]:\r\n return 1 if state[x][y] == self.my_piece else -1\r\n\r\n return 0 # no winner yet\r", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def gameOfLife(self, board: List[List[int]]) -> None:\n r, c = len(board), len(board[0])\n # 下面两行做zero padding\n board_exp = np.array([[0 for _ in range(c + 2)] for _ in range(r + 2)])\n board_exp[1:1 + r, 1:1 + c] = np.array(board)\n print(board_exp)\n # 设置卷积核\n kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n # 开始卷积\n for i in range(1, r + 1):\n for j in range(1, c + 1):\n # 统计细胞周围8个位置的状态\n temp_sum = np.sum(kernel * board_exp[i - 1:i + 2, j - 1:j + 2])\n # 按照题目规则进行判断\n if board_exp[i, j] == 1:\n if temp_sum < 2 or temp_sum > 3:\n board[i - 1][j - 1] = 0\n else:\n if temp_sum == 3:\n board[i - 1][j - 1] = 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n self.board = copy.deepcopy(board)\n self.rows = len(self.board)\n self.cols = len(self.board[0])\n for i in range(self.rows):\n for j in range(self.cols):\n neighbors = self.count_neighbors(i, j)\n if board[i][j] == 1:\n if neighbors < 2 or neighbors > 3:\n board[i][j] = 0\n else:\n if neighbors == 3:\n board[i][j] = 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n m = len(board)\n n = len(board[0])\n\n def affect(x, y):\n for i in [x-1, x, x+1]:\n for j in [y-1, y, y+1]:\n if (i == x and j == y) or i < 0 or i >= m or j < 0 or j >= n:\n continue\n board[i][j] += 10\n for i in range(m):\n for j in range(n):\n if board[i][j] % 10 == 1:\n affect(i, j)\n for i in range(m):\n for j in range(n):\n value = board[i][j]\n if value // 10 == 3:\n board[i][j] = 1\n elif value // 10 == 2 and value % 10 == 1:\n board[i][j] = 1\n else:\n board[i][j] = 0", "def game_state(matrix):\n\n \"\"\"\n # To set winning tile\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 2048:\n # return 'win'\n # return 'not over'\n \"\"\"\n for i in range(len(matrix)-1):\n # intentionally reduced to check the row on the right and below\n # more elegant to use exceptions but most likely this will be their solution\n for j in range(len(matrix[0])-1):\n if matrix[i][j] == matrix[i+1][j] or matrix[i][j+1] == matrix[i][j]:\n return 'not over'\n for i in range(len(matrix)): # check for any zero entries\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n return 'not over'\n for k in range(len(matrix)-1): # to check the left/right entries on the last row\n if matrix[len(matrix)-1][k] == matrix[len(matrix)-1][k+1]:\n return 'not over'\n for j in range(len(matrix)-1): # check up/down entries on last column\n if matrix[j][len(matrix)-1] == matrix[j+1][len(matrix)-1]:\n return 'not over'\n return 'lose'", "def computeNextState(self):\n aliveNeighbors = self.numOfLiveNeighbors()\n if aliveNeighbors < 2 or aliveNeighbors > 3:\n self.setNextToDead()\n\n if not self.isAlive() and aliveNeighbors == 3:\n self.setNextToAlive()", "def gameOfLife(self, board: List[List[int]]) -> None:\n changes = list()\n for i in range(len(board)):\n for j in range(len(board[0])):\n neighbor_data = {\n 'live': 0,\n 'dead': 0\n }\n checks = {(0,1), (0,-1), (1, 0), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1,-1)}\n if i == 0:\n checks.discard((-1, 0))\n checks.discard((-1, 1))\n checks.discard((-1, -1))\n if j == 0:\n checks.discard((0, -1))\n checks.discard((-1, -1))\n checks.discard((1, -1))\n if i == (len(board) - 1):\n checks.discard((1,0))\n checks.discard((1,-1))\n checks.discard((1, 1))\n if j == (len(board[0]) - 1):\n checks.discard((0, 1))\n checks.discard((-1, 1))\n checks.discard((1, 1))\n for check in checks:\n if board[i + check[0]][j + check[1]]:\n neighbor_data['live'] += 1\n else:\n neighbor_data['dead'] += 1\n if board[i][j]:\n # check live rules\n if neighbor_data['live'] < 2 or neighbor_data['live'] > 3:\n changes.append((i, j))\n else:\n # check dead rules\n if neighbor_data['live'] == 3:\n changes.append((i, j))\n for change in changes:\n board[change[0]][change[1]] = int (not board[change[0]][change[1]])\n \n print (board)", "def gameOfLife(self, board: List[List[int]]) -> None:\n neighbors = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1),\n (0, 1), (1, 1)]\n rows = len(board)\n cols = len(board[0])\n\n tmp_board = [[board[r][c] for c in range(cols)] for r in range(rows)]\n\n for row in range(rows):\n for col in range(cols):\n lives = 0\n for n in neighbors:\n r = row + n[0]\n c = col + n[1]\n\n if 0 <= r < rows and 0 <= c < cols and tmp_board[r][c] == 1:\n lives += 1\n if tmp_board[row][col] == 1 and (lives < 2 or lives > 3):\n board[row][col] = 0\n if tmp_board[row][col] == 0 and lives == 3:\n board[row][col] = 1", "def game_value(self, state):\n # check horizontal wins\n for row in state:\n for i in range(2):\n if row[i] != ' ' and row[i] == row[i+1] == row[i+2] == row[i+3]:\n return 1 if row[i]==self.my_piece else -1\n\n # check vertical wins\n for col in range(5):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i+2][col] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n\n # TODO: check \\ diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col+1] == state[i+2][col+2] == state[i+3][col+3]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check / diagonal wins\n for col in range(2):\n for i in range(2):\n if state[i][col+3] != ' ' and state[i][col+3] == state[i+1][col+2] == state[i+2][col+1] == state[i+3][col]:\n return 1 if state[i][col]==self.my_piece else -1\n # TODO: check 2x2 box wins\n for col in range(4):\n for i in range(4):\n if state[i][col] != ' ' and state[i][col] == state[i+1][col] == state[i][col+1] == state[i+1][col+1]:\n return 1 if state[i][col]==self.my_piece else -1\n \n return 0 # no winner yet", "def evaluate(self, state):\n\t\ttranspose = state.board.transpose()\t\t# columns in state.board = rows in transpose\n\t\tcount = []\n\t\topponentcount = []\n\t\tfor row, column in zip(state.board, transpose):\n\t\t\trowcounter = collections.Counter(row)\n\t\t\tcolumncounter = collections.Counter(column)\n\t\t\tcount.append(rowcounter.get(state.current_player, 0))\n\t\t\tcount.append(columncounter.get(state.current_player, 0))\n\t\t\topponentcount.append(rowcounter.get(state.current_player * - 1, 0))\n\t\t\topponentcount.append(columncounter.get(state.current_player * -1 , 0))\n\n\t\tY = state.board[:, ::-1]\n\t\tdiagonals = [np.diagonal(state.board), np.diagonal(Y)]\n\t\tmain_diagonal_count = collections.Counter(diagonals[0])\n\t\tsecond_diagonal_count = collections.Counter(diagonals[1])\n\t\tcount.append(main_diagonal_count.get(state.current_player, 0))\n\t\tcount.append(second_diagonal_count.get(state.current_player, 0))\n\t\topponentcount.append(main_diagonal_count.get(state.current_player * - 1, 0))\n\t\topponentcount.append(second_diagonal_count.get(state.current_player * -1, 0))\n\n\t\t# max(count): maximum number of player's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\t# max(opponentcount): maximum number of opponent's tiles in a row, column, or a diagonal (the highest value is 5)\n\t\tscoremax = 5 ** max(count)\n\t\tscoremin = 5 ** max(opponentcount)\n\n\t\treturn scoremax - scoremin", "def gameOfLife(self, board: List[List[int]]) -> None:\n if not board or len(board)==0:\n return \n\n rows = len(board)\n cols = len(board[0])\n #lives = 0\n \n\n for i in range(rows):\n for j in range(cols):\n lives = self.n_neighbors(board,i,j)\n \n # Rule 1 and Rule 3\n if board[i][j]==1 and (lives <2 or lives >3):\n board[i][j]= 2 # -1 signifies the cell is now dead but originally was live.\n if board[i][j]== 0 and lives ==3:\n board[i][j]=3 # signifies the cell is now live but was originally dead.\n\n for i in range(rows):\n for j in range(cols):\n board[i][j] = board[i][j]%2\n return board", "def gameOfLife(self, board) :\n # mark live-->dead (-1)\n # mark live-->live (1)\n # mark dead-->live (2)\n # mark dead-->dead (0)\n\n h = len(board)\n w = len(board[0])\n\n def counter(i,j):\n c=0\n for m in range(-1,2):\n for n in range(-1,2):\n if i+m<0 or j+n <0 :\n continue\n if i+m>h-1 or j+n>w-1:\n continue\n else:\n if board[i+m][j+n]==1 or board[i+m][j+n]==-1:\n c+=1\n return c\n\n for i in range(h):\n for j in range(w):\n live=counter(i,j)\n if board[i][j] ==1:\n live=live-1\n if live<2 or live>3:\n board[i][j]=-1\n else:\n if live==3:\n board[i][j]=2\n for i in range(h):\n for j in range(w):\n if board[i][j]==2:\n board[i][j]=1\n if board[i][j]==-1:\n board[i][j]=0", "def gen_next_state(self, row, col, value):\n new_state = self.copy_state() # Create a copy of the current state (final and possible values)\n # Update the board configuration:\n new_state.final_values[row][col] = value\n new_state.possible_values[row][col] = [] # Position has been filled so it no longer has possible moves\n\n new_state.update_constraints(row, col, value) # Update affected possible values (apply constraints)\n\n singleton_list = new_state.get_singletons() # Find singletons for the new board configuration\n while singleton_list:\n row, col = singleton_list.pop() # Get singleton's position\n\n new_state.final_values[row][col] = new_state.possible_values[row][col][0] # Update final value\n new_state.possible_values[row][col] = [] # Position has been filled so it no longer has possible moves\n new_state.update_constraints(row, col, new_state.final_values[row][col]) # Propagate constraints\n\n singleton_list = new_state.get_singletons() # Get the remaining singletons\n\n return new_state # Return the resulting state", "def gameOfLife(self, board: List[List[int]]) -> None:\n res = {}\n for i in range(len(board)):\n for j in range(len(board[0])):\n count = 0\n # top line\n if i-1 >= 0:\n # left-top\n if j-1>=0:\n if board[i-1][j-1]==1:\n count += 1\n # top\n if board[i-1][j]==1:\n count += 1\n # right-top\n if j+1<len(board[0]):\n if board[i-1][j+1]==1:\n count += 1\n # same line\n # left\n if j-1>=0:\n if board[i][j-1]==1:\n count += 1\n # right\n if j+1 < len(board[0]):\n if board[i][j+1]==1:\n count += 1\n # bottom line\n if i+1 < len(board):\n # left-bottom\n if j-1 >= 0:\n if board[i+1][j-1]==1:\n count += 1\n # bottom\n if board[i+1][j]==1:\n count += 1\n # right-bottom\n if j+1 < len(board[0]):\n if board[i+1][j+1]==1:\n count += 1\n # check\n if board[i][j]==0 and count==3:\n res[(i,j)] = 1\n if board[i][j]==1 and (count<2 or count>3):\n res[(i,j)] = 0\n for key in res:\n board[key[0]][key[1]] = res[key]", "def advance_board(self):\n board = self.board\n rules = self.energy_rules\n h, w = board.shape\n beta = 1.0 / max(1e-20, self.temperature)\n if len(rules[0]) - 1 == 4:\n neighborhood = np.array([[0,1,0],[1,0,1],[0,1,0]])\n elif len(rules[0]) - 1 == 6:\n neighborhood = np.array([[0,1,1],[1,0,1],[1,1,0]])\n elif len(rules[0]) - 1 == 8:\n neighborhood = np.array([[1,1,1],[1,0,1],[1,1,1]])\n else:\n raise RuntimeError(\"async rules must have length 5, 7, or 9\")\n rng = get_rng()\n for _ in range(int(board.size * self.cells_per_update)):\n x = rng.choice(w)\n y = rng.choice(h)\n if board[y, x] & CellTypes.frozen:\n continue\n neighbors = board.view(wrapping_array)[y-1:y+2, x-1:x+2] * neighborhood\n alive_neighbors = np.sum(neighbors & CellTypes.alive > 0)\n spawn_neighbors = np.sum(neighbors & CellTypes.spawning > 0)\n frozen = np.sum(neighbors & CellTypes.freezing) > 0\n if frozen:\n continue\n if board[y, x] & CellTypes.alive:\n H = rules[0][alive_neighbors]\n else:\n H = rules[1][alive_neighbors]\n\n P = 0.5 + 0.5*np.tanh(H * beta)\n P = 1 - (1-P)*(1-self.spawn_prob)**spawn_neighbors\n board[y, x] = CellTypes.life if coinflip(P) else CellTypes.empty", "def next_states(self, state):\n import copy\n\n ans = []\n current_array = state.board.array\n space_pos = state.board.space\n\n up_pos = [space_pos[0] - 1, space_pos[1]]\n down_pos = [space_pos[0] + 1, space_pos[1]]\n left_pos = [space_pos[0], space_pos[1] - 1]\n right_pos = [space_pos[0], space_pos[1] + 1]\n\n # down position\n if self.__is_valid(down_pos):\n down_array = [copy.copy(row) for row in current_array]\n down_board = Board(array=down_array, space=space_pos.copy())\n down_board.swap(down_pos)\n ans.append(State(board=down_board, came_from=state, move='U'))\n\n # up position\n if self.__is_valid(up_pos):\n up_array = [copy.copy(row) for row in current_array]\n up_board = Board(array=up_array, space=space_pos.copy())\n up_board.swap(up_pos)\n ans.append(State(board=up_board, came_from=state, move='D'))\n\n # right position\n if self.__is_valid(right_pos):\n right_array = [copy.copy(row) for row in current_array]\n right_board = Board(array=right_array, space=space_pos.copy())\n right_board.swap(right_pos)\n ans.append(State(board=right_board, came_from=state, move='L'))\n\n # left position\n if self.__is_valid(left_pos):\n left_array = [copy.copy(row) for row in current_array]\n left_board = Board(array=left_array, space=space_pos.copy())\n left_board.swap(left_pos)\n ans.append(State(board=left_board, came_from=state, move='R'))\n\n return ans", "def next_step(self):\n\n c = 1\n dt = 0.001\n dx = 1 / 20**2\n\n # copy current state first\n next_state = np.copy(self.state)\n\n # iterate over matrix\n for i in range(self.width - 1):\n for j in range(self.height - 1):\n\n if not self.shape == \"circle\" or self.circle[i, j] == 1:\n\n # left bottom corner\n if i == 0 and j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + 0\\\n + 0 + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i, j]\n # right top corner\n elif i == 0 and j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (0 + self.state[i + 1, j]\\\n + self.state[i, j - 1] + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n # right bottom corner\n elif i == self.width - 1 and j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i, j - 1] + 0\\\n + 0 + self.state[i - 1, j]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n # left bottom corner\n elif i == self.width - 1 and j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i, j - 1] + self.state[i - 1, j]\\\n + 0 + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif i == 0: # j is not 0\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + 0\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + 0 + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif i == self.width - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (0 + self.state[i - 1, j]\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + self.state[i, j - 1] + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n else:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n\n self.prev_state = np.copy(self.state)\n self.state = np.copy(next_state)\n\n self.timestep += 1", "def gameOfLife(self, board: List[List[int]]) -> None:\n if (0 == len(board)): return\n ori, l, r = copy.deepcopy(board), len(board), len(board[0])\n for i in range(l):\n for j in range(r):\n live = self.count(ori, i, j)\n if ori[i][j] == 1 and live < 2:\n board[i][j] = 0\n elif ori[i][j] == 1 and live > 3:\n board[i][j] = 0\n elif ori[i][j] == 0 and live == 3:\n board[i][j] = 1", "def start_state():\n return chess.Board()", "def eval_board(self, board):\n\t\ts = 0\n\t\t\n\t\tfor i in board.columns:\n\t\t\tfor j in board.rows:\n\t\t\t\tif board[i+j] == self.color:\n\t\t\t\t\n\t\t\t\t\tif i in ['A', 'H'] or j in ['1', '8']:\n\t\t\t\t\t\tif i + j in ['A1', 'A8', 'H1', 'H8']:\n\t\t\t\t\t\t\ts += 4\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ts += 2\n\t\t\t\t\telse:\n\t\t\t\t\t\ts += 1\n\t\treturn s", "def gameOfLife(self, board: List[List[int]]) -> None:\n if board:\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] += 1\n for i in range(len(board)):\n for j in range(len(board[0])):\n alive_cnt = 0\n for x, y in [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, 1), (-1, -1)]:\n if 0 <= x + i < len(board) and 0 <= y + j < len(board[0]) and abs(board[x + i][y + j]) == 2:\n alive_cnt += 1\n if alive_cnt <= 1 or alive_cnt >= 4 or (alive_cnt == 2 and abs(board[i][j]) == 1):\n board[i][j] = -1 * board[i][j]\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] = 1 if board[i][j] > 0 else 0", "def _turn_cycle(self):\r\n\r\n #Get current player\r\n cur_player = self.get_current_player()\r\n\r\n #Get board states for current player\r\n choices = self.board.get_states(cur_player)\r\n\r\n #Update board state\r\n self.board = choices[self.get_player(cur_player).choose_state(choices)]\r\n\r\n #Make sure you have the history, original board is added, so we can do it afterwards\r\n self.board_history.append(self.board)\r\n\r\n #Check for win or tie\r\n if self.board.check_win(self.num_to_win, cur_player):\r\n self._end_game(cur_player)\r\n return cur_player\r\n if self.board.check_tie():\r\n self._end_game()\r\n return -1\r\n if self.turn_number >= self.max_turns:\r\n self._end_game()\r\n return -1\r\n\r\n #Incriment turn counter if the game has not ended\r\n self._increment_turn()\r\n\r\n return 0", "def next_life_generation(a):\n w = len(a[0])\n h = len(a)\n new_a = create_board(w, h)\n\n for n in range(h):\n for m in range(w):\n if 0 < n < h - 1 and 0 < m < w - 1:\n count = count_neighbours(n, m, a)\n if count < 2 or count > 3:\n new_a [n][m] = 0\n elif count == 3:\n new_a[n][m] =1\n else:\n new_a[n][m] = a[n][m]\n else:\n new_a[n][m] = 0\n \n return new_a", "def evaluate(self, board):", "def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False", "def current_state(self):\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n for i in range(8):\n for j in range(8):\n if self.board_value[i][j]==self.current_player:\n square_state[0][i][j]=1\n elif self.board_value[i][j]!=self.current_player and self.board_value[i][j]!= 0:\n square_state[1][i][j]=1\n # indicate the last move location\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def computeSide(self):\n side = 0\n for c in 'abcdefgh':\n side += self.getPieceType(c,1)\n side += self.getPieceType(c,2)\n side -= self.getPieceType(c,7)\n side -= self.getPieceType(c,8) \n rospy.loginfo(\"Computed side value of: %d\" % side)\n if side > 0:\n self.side = self.WHITE # good to go\n else:\n self.side = self.BLACK \n # need to setup board \n temp_board = BoardState(self.side) \n for i in range(8):\n temp_board.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, self.getPiece(7-i, 7)) )\n temp_board.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, self.getPiece(7-i, 2)) )\n\n temp_board.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('h',8)) )\n temp_board.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('g',8)))\n temp_board.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('f',8)))\n temp_board.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, self.getPiece('e',8)))\n temp_board.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, self.getPiece('d',8)))\n temp_board.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, self.getPiece('c',8)))\n temp_board.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, self.getPiece('b',8)))\n temp_board.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, self.getPiece('a',8)))\n\n temp_board.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('h',1)) )\n temp_board.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('g',1)) )\n temp_board.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('f',1)) )\n temp_board.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, self.getPiece('e',1)) )\n temp_board.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, self.getPiece('d',1)) )\n temp_board.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, self.getPiece('c',1)) )\n temp_board.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, self.getPiece('b',1)) )\n temp_board.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, self.getPiece('a',1)) ) \n\n self.values = temp_board.values\n self.printBoard()\n\n self.last_move = \"go\"", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def gameOfLife(self, board: list[list[int]]) -> None:\n def game_of_life_infinite(live: set[tuple[int, int]]) -> set[tuple[int, int]]:\n ctr = Counter((I, J)\n for i, j in live\n for I in range(i - 1, i + 2)\n for J in range(j - 1, j + 2)\n if I != i or J != j)\n return {ij\n for ij in ctr\n if ctr[ij] == 3 or ctr[ij] == 2 and ij in live}\n\n live = {(i, j) for i, row in enumerate(board)\n for j, live in enumerate(row) if live}\n live = game_of_life_infinite(live)\n for i, row in enumerate(board):\n for j in range(len(row)):\n row[j] = int((i, j) in live)", "def advance(self, board):", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state", "def transition(board, player, action):\n if _ENV.is_valid((board, player), action):\n new_board, __ = _ENV.get_next_state((board, player), action)\n return new_board\n return None", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def gameOfLife(self, board) -> None:\n changelist = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.ischange(i, j, board):\n changelist.append([i, j])\n\n for x, y in changelist:\n board[x][y] = ~board[x][y] + 2", "def next_possible_states(path, check_dict, check):\r\n \r\n current_state_tuple = path[-1]\r\n state_container = []\r\n x = current_state_tuple[1][0]\r\n y = current_state_tuple[1][1]\r\n current_state = current_state_tuple[0]\r\n\r\n # Down\r\n if y < 3:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y + 1][x]\r\n new_state[y + 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y + 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Up\r\n if y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n if y == 1 and x == 0:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if is_goal(new_state):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n elif y > 1:\r\n new_state[y][x] = new_state[y - 1][x]\r\n new_state[y - 1][x] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x, y - 1)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Left\r\n if x > 0 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x - 1]\r\n new_state[y][x - 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x - 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n # Right\r\n if x < 2 and y > 0:\r\n new_state = copy.deepcopy(current_state)\r\n new_state[y][x] = new_state[y][x + 1]\r\n new_state[y][x + 1] = 0\r\n if not been_there(new_state, check_dict, check):\r\n new_index = (x + 1, y)\r\n h1 = euclidean_dist(new_state, path)\r\n new_state_tuple = (new_state, new_index, h1)\r\n state_container.append(new_state_tuple)\r\n\r\n return state_container", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width, move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width, move_oppo % self.height] = 1.0\n\n # last move indication\n square_state[2][self.last_move // self.width, self.last_move % self.height] = 1.0\n\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0\n\n return square_state[:, ::-1, :]", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.state:\n moves, players = np.array(list(zip(*self.state.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.state) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state", "def game_of_life():\n # 3x3 neighbourhood\n offsets = [[(y, x) for y in range(-1, 2)] for x in range(-1, 2)]\n\n # Create mappings\n mappings = {}\n for i in range(2 ** 9):\n\n # Determine the initial state (key)\n key = f\"{bin(i)[2:]:0>9}\" # As binary string\n key = tuple(k == \"1\" for k in key) # As tuple of bools\n key = tuple(key[i * 3:i * 3 + 3] for i in range(3)) # Reshape into 2D grid\n\n # Alive counts\n centre = key[1][1]\n others = sum(sum(row) for row in key) - centre\n\n # Skip if state does not evaluate to True\n if centre:\n if others not in (2, 3):\n continue\n\n else:\n if others != 3:\n continue\n\n mappings[key] = True\n\n return Mapping2DRuleset(mappings, offsets)", "def get_state(self,board):\n s = range(board.size())\n return [ board.getCell(x,y) for y in s for x in s]", "def gameOfLife(self, board: List[List[int]]) -> None:\r\n self.board = board\r\n self.l = len(board)\r\n self.w = len(board[0])\r\n status = [[0] * self.w for _ in range(self.l)]\r\n for i in range(self.l):\r\n for j in range(self.w):\r\n status[i][j] = self.statusUpdate(board[i][j], self.countLivingNeighbor([i, j]))\r\n #print(\"prev: \", i, j ,board[i][j], \" count: \", self.countLivingNeighbor([i, j]), \" after:\", status[i][j])\r\n for i in range(self.l):\r\n for j in range(self.w):\r\n board[i][j] = status[i][j]", "def solve(self, board: List[List[str]]) -> None:\n\n def expansion(i,j):\n for di,dj in {(-1,0),(1,0),(0,-1),(0,1)}:\n if -1<i+di<m and -1<j+dj<n and state[i+di][j+dj]=='O':\n return True\n return False\n\n if not board:\n return board\n\n m = len(board)\n n = len(board[0]) \n state = [['X']*n for _ in range(m)]\n\n for j in range(n):\n state[0][j] = board[0][j]\n state[m-1][j] = board[m-1][j]\n \n for i in range(m):\n state[i][0] = board[i][0]\n state[i][n-1] = board[i][n-1]\n \n flag = 1\n\n while flag:\n flag = 0\n\n for k in range(1, (1+min(m,n))//2):\n for j in range(k,n-k):\n if board[k][j]=='O' and state[k][j] == 'X' and expansion(k,j):\n state[k][j] = 'O'\n flag = 1\n \n if board[m-1-k][j]=='O' and state[m-1-k][j] == 'X' and expansion(m-1-k,j):\n state[m-1-k][j] = 'O'\n flag = 1\n \n for i in range(k,m-k):\n if board[i][k]=='O' and state[i][k] == 'X' and expansion(i,k):\n state[i][k] = 'O'\n flag = 1\n \n if board[i][n-1-k]=='O' and state[i][n-1-k] == 'X' and expansion(i,n-1-k):\n state[i][n-1-k] = 'O'\n flag = 1\n\n board[:] = state[:]", "def gameOfLife_1(self, board):\n self.template = [[[i, j] for i in (-1, 0, 1)] for j in (-1, 0, 1)]\n self.template[1][1] = [9999, 9999]\n tmp_board = [[j for j in row] for row in board]\n\n for i in range(len(tmp_board)):\n for j in range(len(tmp_board[i])):\n count = self.countLiveNeighborsCell(tmp_board, i, j)\n if tmp_board[i][j] == 0 and count == 3:\n board[i][j] = 1\n elif tmp_board[i][j] == 1 and count < 2:\n board[i][j] = 0\n # elif tmp_board[i][j] == 1 and count in [2, 3]:\n # board[i][j] = 1\n elif tmp_board[i][j] == 1 and count > 3:\n board[i][j] = 0", "def knight_tours(board: List[List[int]], curr: Tuple[int, int], count: int) -> -1:\n if count == len(board) ** 2:\n return\n\n deltas = [\n (2, 1),\n (1, 2),\n (-2, 1),\n (-1, 2),\n (2, -1),\n (1, -2),\n (-2, -1),\n (-1, -2),\n ]\n\n for delta in deltas:\n next_x, next_y = curr[0] + delta[0], curr[1] + delta[1]\n if not is_valid_coordinate((next_x, next_y), len(board)):\n continue\n\n board[next_x][next_y] = count\n knight_tours(board, (next_x, next_y), count + 1)\n board[next_x][next_y] = -1", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def _get_current_game_state(board):\n return np.concatenate((_get_pieces_one_hot(board, color=False),\n _get_pieces_one_hot(board, color=True)),\n axis=-1)", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def gameOfLife(self, board: List[List[int]]) -> None:\r\n # 通过本例我们将学习如何搜素一共二维数组\r\n if not board or not board[0]:\r\n return\r\n row = len(board)\r\n col = len(board[0])\r\n\r\n def countCeil(x:int,y:int) -> int:\r\n count = 0\r\n for x_offset in range(-1,2):\r\n for y_offset in range(-1,2):\r\n if x_offset == y_offset == 0:\r\n continue\r\n if 0<= x + x_offset < row and 0 <= y + y_offset < col:\r\n count += board[x + x_offset][y+ y_offset] & 0x0F\r\n if count == 3 or (board[x][y] and count == 2):\r\n count = 1\r\n else:\r\n count = 0\r\n board[x][y] |= (count <<4) # |=意思是按位或\r\n\r\n for x in range(row):\r\n for y in range(col):\r\n countCeil(x, y)\r\n for x in range(row):\r\n for y in range(col):\r\n board[x][y] = (board[x][y] & 0xF0) >> 4", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def __iterate(self):\n\t\tnext_board = []\n\n\t\tfor y, row in enumerate(self.__board):\n\t\t\tnext_board.append([])\n\n\t\t\tfor x, cell in enumerate(row):\n\t\t\t\tneighbors = [\n\t\t\t\t\tself.__get_cell_state(y - 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y - 1, x),\n\t\t\t\t\tself.__get_cell_state(y - 1, x + 1),\n\t\t\t\t\tself.__get_cell_state(y, x - 1),\n\t\t\t\t\tself.__get_cell_state(y, x + 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x),\n\t\t\t\t\tself.__get_cell_state(y + 1, x + 1)\n\t\t\t\t]\n\t\t\t\tnum_neighbors = sum(neighbors)\n\t\t\t\tstate = get_new_state(cell, num_neighbors)\n\t\t\t\tnext_board[y].append(state)\n\n\t\tself.__board = next_board\n\t\tself.__display(self.__board)", "def __init__(self, n: int):\n        self.rows = [[n, -1] for _ in range(n)]\n        self.cols = [[n, -1] for _ in range(n)]\n        self.diag = [[n, -1], [n, -1]] # 0 for normal, 1 for anti\n        \n    def move(self, row: int, col: int, player: int) -> int:\n        r1, r2 = self.check(self.rows, row, player), self.check(self.cols, col, player)\n        r3, r4 = 0, 0\n        if(row == col):\n            r3 = self.check(self.diag, 0, player)\n        if(row + col == len(self.rows)-1):\n            r4 = self.check(self.diag, 1, player)\n        \n        return max(r1,r2,r3,r4)\n    def check(self, arr, i, player):\n        arr[i][0] -= 1\n        \n        if(arr[i][1] == -1):\n            arr[i][1] = player\n        elif(arr[i][1] != player):\n            arr[i][1] = 0\n        \n        if(arr[i][0] == 0 and arr[i][1] != 0):\n            return player\n        return 0\n        \n        \"\"\"\n       Player {player} makes a move at ({row}, {col}).\n       @param row The row of the board.\n       @param col The column of the board.\n       @param player The player, can be either 1 or 2.\n       @return The current winning condition, can be either:\n               0: No one wins.\n               1: Player 1 wins.\n               2: Player 2 wins.\n       \"\"\"\n        ", "def successorStates(self, state):\n currentState = state[1]\n successors = []\n for action in Directions.CARDINAL:\n x, y = state[0] # currentPosition\n print(\"State: {}\".format(state[0]))\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n # Implement a successor discovery, check if any corners are satisfied\n # and update values as they are satisfied\n if (not hitsWall):\n successorsState = []\n nextxy = (nextx, nexty)\n if nextxy == self.corners[0]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[0])\n if nextxy == self.corners[1]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[1])\n if nextxy == self.corners[2]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[2])\n if nextxy == self.corners[3]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[3])\n # Put all updated values of 4 corners to a variable\n successorPost = (successorsState[0], successorsState[1],\n successorsState[2], successorsState[3])\n # Append to go to the next move\n successors.append(((nextxy, successorPost), action, 1))\n\n self._numExpanded += 1 # Count the number of nodes expanded\n return successors", "def next_state(self, debug=False):\n\n if self.current_state == 'NoObstacle':\n # First check if any obstacle is in sight\n if self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n elif self.transitions.obstacle_in_sight():\n self.current_state = 'Obstacle'\n\n elif self.current_state == 'Obstacle':\n # First check if obstacle is still in sight\n if self.transitions.no_obstacle_in_sight() and not self.transitions.obstacle_in_sight():\n self.current_state = 'NoObstacle'\n elif self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n\n elif self.current_state == 'RoomReached':\n self.current_state = 'InspectCorners'\n\n elif self.current_state == 'InspectCorners':\n if self.transitions.all_corners_inspected():\n if not self.transitions.all_rooms_visited():\n self.current_state = 'RotateToExit'\n else:\n self.current_state = 'Finished'\n\n elif self.current_state == 'RotateToExit':\n if self.transitions.aiming_to_carrot():\n self.current_state = 'NoObstacle'\n\n\n elif self.current_state == 'Finished':\n pass\n\n # DEBUG\n if debug:\n print 'Next state: %s' % self.current_state\n\n if self.current_state is not self.old_state:\n print self.current_state\n\n self.old_state = self.current_state\n\n return self.current_state", "def vidrach_actual(board):\n board_size = len(board)\n\n # coordinates queue - list of (red_pos, blue_pos) tuples\n posqueue = queue.SimpleQueue()\n posqueue.put(((0, 0), (board_size - 1, board_size - 1)))\n\n moves = {((0, 0), (board_size - 1, board_size - 1)): 0}\n\n while not posqueue.empty():\n curr_pos = posqueue.get()\n curr_move = moves[curr_pos]\n\n red_pos = curr_pos[0]\n blue_pos = curr_pos[1]\n\n # if at the swapped position, break/return as this is the fastest\n if red_pos == (board_size - 1, board_size - 1) and blue_pos == (0, 0):\n return curr_move\n\n # check all red moves\n if red_pos != (board_size - 1, board_size - 1):\n move_dist = board[blue_pos[0]][blue_pos[1]]\n\n # up\n new_pos = (red_pos[0], red_pos[1] - move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] >= 0\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # down\n new_pos = (red_pos[0], red_pos[1] + move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] < board_size\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # left\n new_pos = (red_pos[0] - move_dist, red_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] >= 0\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # right\n new_pos = (red_pos[0] + move_dist, red_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] < board_size\n and new_pos != blue_pos\n and (new_pos, blue_pos) not in moves\n ):\n posqueue.put((new_pos, blue_pos))\n moves[(new_pos, blue_pos)] = curr_move + 1\n\n # check all blue moves\n if blue_pos != (0, 0):\n move_dist = board[red_pos[0]][red_pos[1]]\n\n # up\n new_pos = (blue_pos[0], blue_pos[1] - move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] >= 0\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # down\n new_pos = (blue_pos[0], blue_pos[1] + move_dist)\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[1] < board_size\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # left\n new_pos = (blue_pos[0] - move_dist, blue_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] >= 0\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # right\n new_pos = (blue_pos[0] + move_dist, blue_pos[1])\n if ( # if in-bounds, not occupied, and has not been visited before\n new_pos[0] < board_size\n and new_pos != red_pos\n and (red_pos, new_pos) not in moves\n ):\n posqueue.put((red_pos, new_pos))\n moves[(red_pos, new_pos)] = curr_move + 1\n\n # if queue has been exhausted and the end has not been reached\n return -1", "def next_round(self, old_round):\n new_round = np.copy(old_round) #copy of the old grid\n # for each square\n for i in range(Lx):\n for j in range(Ly):\n if old_round[i][j] == 0 : #if the cell is dead, it will live if it has 3 living neighbors.\n if self.sum_living_cell(i, j, old_round) == 3:\n new_round[i][j] = 1\n else:\n new_round[i][j] = 0\n if old_round[i][j] == 1 : #if the cell is alive, it won't dead if it has 2 or 3 living neighors.\n square_score = self.sum_living_cell(i, j, old_round)\n if square_score != 2 and square_score != 3 :\n new_round[i][j] = 0\n else:\n new_round[i][j] = 1\n return new_round", "def check_complete_board(start_pos, dim_square, board):\n change = False\n for row in range(8):\n for col in range(8):\n # Grab image on real board\n im = region_grabber((start_pos[0] + col * dim_square[0],\n start_pos[1] - (row + 1.0) * dim_square[1],\n start_pos[0] + (col + 1.0) * dim_square[0],\n start_pos[1] - row * dim_square[1]))\n\n # Check if piece corresponds with piece on board if there is a piece\n if piece_on_pos((row, col), board):\n obj = board[row][col]\n if (row + col) % 2 == 0: # Black background\n pos = imagesearcharea(obj.im_b, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n else: # White background\n pos = imagesearcharea(obj.im_w, 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n continue\n\n # Else --> Go through every possible image\n if (row + col) % 2 == 0: # Black background\n # Pawn\n pos = imagesearcharea(\"Images/PWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWB.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBB.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n else: # White background\n # Pawn\n pos = imagesearcharea(\"Images/PWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PW\")\n continue\n pos = imagesearcharea(\"Images/PBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Pawn(\"PB\")\n continue\n # Tower\n pos = imagesearcharea(\"Images/TWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TW\")\n continue\n pos = imagesearcharea(\"Images/TBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Tower(\"TB\")\n continue\n # Horse\n pos = imagesearcharea(\"Images/HWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HW\")\n continue\n pos = imagesearcharea(\"Images/HBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Horse(\"HB\")\n continue\n # Bishop\n pos = imagesearcharea(\"Images/BWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BW\")\n continue\n pos = imagesearcharea(\"Images/BBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Bishop(\"BB\")\n continue\n # King\n pos = imagesearcharea(\"Images/KWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KW\")\n continue\n pos = imagesearcharea(\"Images/KBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = King(\"KB\")\n continue\n # Queen\n pos = imagesearcharea(\"Images/QWW.jpg\", 0, 0, 0, 0, 0.9, im) # White\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QW\")\n continue\n pos = imagesearcharea(\"Images/QBW.jpg\", 0, 0, 0, 0, 0.9, im) # Black\n if pos != [-1, -1]:\n change = True\n board[row][col] = Queen(\"QB\")\n continue\n board[row][col] = None\n\n if change:\n pyautogui.moveTo(start_pos[0] + 4 * dim_square[0],\n start_pos[1] - 4 * dim_square[1], 0.2)\n\n return change", "def player(board):\n if board == initial_state():\n return X\n\n total_x = 0\n total_o = 0\n\n for i in board:\n total_x += i.count(X)\n total_o += i.count(O)\n\n if (total_x + total_o) % 2 == 1:\n return O\n else:\n return X", "def changeCell(self, i, j):\n\t\t#If Cell is on Top row\n\t\tif(i==0):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[0][1] + self.board[1][0] + self.board[1][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[0][self.size-2] + self.board[1][self.size-2] + self.board[1][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[0][j-1] + self.board[1][j] + self.board[0][j+1] + self.board[1][j-1] + self.board[1][j+1]\n\t\t\t\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell on Bottom row\n\t\telif(i==(self.size-1)):\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[self.size-1][1] + self.board[self.size-2][0] + self.board[self.size-2][1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[self.size-1][self.size-2] + self.board[self.size-2][self.size-2] + self.board[self.size-2][self.size-1]\n\t\t\telse:\n\t\t\t\tn = self.board[self.size-1][j-1] + self.board[self.size-2][j] + self.board[self.size-1][j+1] + self.board[self.size-2][j-1] + self.board[self.size-2][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t#If Cell is in a middle row\n\t\telse:\n\t\t\tif(j==0):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\telif(j==(self.size-1)):\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1]\n\t\t\telse:\n\t\t\t\tn = self.board[i-1][j] + self.board[i+1][j] + self.board[i][j-1] + self.board[i-1][j-1] + self.board[i+1][j-1] + self.board[i][j+1] + self.board[i-1][j+1] + self.board[i+1][j+1]\n\t\t\tif((n == 2 and self.board[i][j] == 1) or n == 3):\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn 0", "def solve_board(bd):\n if is_solved(bd):\n print_board(bd)\n return\n elif len(next_valid_boards(bd)) == 0:\n return False\n else:\n for board in next_valid_boards(bd):\n solve_board(board)", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def ai_3(board: BoardState) -> BoardState:\n cur_piece = board.cpiece\n if cur_piece is not None:\n moved = False\n for (x,y) in board.open_spots:\n move = find_win_spot(cur_piece, board)\n if move:\n board[move] = board.cpiece_id\n moved = True\n break\n if not moved:\n board[choice(list(board.open_spots))] = board.cpiece_id\n board.cpiece_id = choose_none_winable_piece(board)\n else:\n board.cpiece_id = choose_none_winable_piece(board)\n\n if (board.cpiece_id is None) and not board.is_full:\n board.cpiece_id, _ = choice(list(board.unused_game_pieces))\n return board", "def get_new_game_state(self, game_state, line, vector, current_color): \n\n #Determine if the move is parallel to the line\n parallel = False\n \n if len(line) > 1:\n if (line[0][0]+vector[0], line[0][1]+vector[1]) == line[1]:\n parallel = True\n if (line[-1][0]+vector[0], line[-1][1]+vector[1]) == line[-2]:\n parallel = True\n\n\n if parallel:\n\n #Find the rear marble in the motion\n start = line[0] if sum_tuples(line[0], vector) == line[1] else line[-1]\n end = line[-1] if start==line[0] else line[0]\n\n off_end = sum_tuples(end, vector)\n if coord_in_board(off_end) and game_state[off_end] == current_color: return None\n\n counting_others = False\n self_count = 0\n other_count = 0\n current = start\n chain = [2]\n #Put the marbles in chain until an empty space or the edge is reached\n while coord_in_board(current) and game_state[current]!=2:\n\n current_marble = game_state[current]\n if current_marble == current_color:\n if counting_others: \n return None\n else:\n self_count+=1\n else:\n other_count+=1\n counting_others=True\n \n if self_count>3 or other_count > 3 or other_count>=self_count: return None\n\n chain.append(current_marble)\n current = (current[0] + vector[0], current[1]+vector[1])\n\n #Check if ball is being pushed off\n if not counting_others and not coord_in_board(current): \n return None\n \n #Lay down the chain onto the new game state\n new_game_state = game_state.copy()\n current = start\n for marble in chain:\n x,y = current\n if ((1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4)):\n new_game_state[current] = marble\n current = current[0]+vector[0], current[1]+vector[1]\n\n return new_game_state\n\n else: #Perpendicular moves\n\n for coord in line:\n move_coord = coord[0]+vector[0], coord[1]+vector[1]\n \n x,y = move_coord\n in_board = ((1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4))\n if in_board and game_state[move_coord] != 2:\n return None\n elif not in_board:\n return None\n\n new_game_state = game_state.copy()\n for coord in line:\n new_game_state[coord] = 2\n move_coord = coord[0]+vector[0], coord[1]+vector[1]\n x,y = coord\n if (1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4):\n new_game_state[move_coord] = current_color\n\n return new_game_state", "def get_board_state(self):\n\n board_state = ''\n for i in range(0, 3):\n board_state += ''.join([self.board['{}{}'.format(i, j)] for j in range(0, 3)])\n return board_state", "def assignState(self):\n\t\tblack = ['r', 'n', 'b','q','k','b','n','r']\n\t\twhite = ['R','N','B','Q','K','B','N','R']\n\n\t\tfor i in range(8):\n\t\t\tself.squares[8*i + 0].state = black[i]\n\t\t\tself.squares[8*i + 1].state = 'p'\n\t\t\tself.squares[8*i + 2].state = '.'\n\t\t\tself.squares[8*i + 3].state = '.'\n\t\t\tself.squares[8*i + 4].state = '.'\n\t\t\tself.squares[8*i + 5].state = '.'\n\t\t\tself.squares[8*i + 6].state = 'P'\n\t\t\tself.squares[8*i + 7].state = white[i]\n\n\t\tfor square in self.squares:\n\t\t\tself.boardMatrix.append(square.state)", "def next_move(self, board):\n \n return self.best_move(self.score_columns(board))", "def result(board, action):\n newstate = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n rows = 3\n columns = 3\n for i in range(rows):\n for j in range(columns):\n newstate[i][j] = board[i][j]\n# print(newstate)\n# print(action)\n ival = action[0]\n jval = action[1]\n if ival > 2:\n raise Exception(\"invalid i action\")\n if jval > 2:\n raise Exception(\"invalid j action\")\n if board[ival][jval] != EMPTY:\n raise Exception(\"invalid action\")\n else:\n if player(board) == X:\n newstate[ival][jval] = X\n else:\n newstate[ival][jval] = O\n return newstate\n\n #raise NotImplementedError", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move" ]
[ "0.8105419", "0.74080914", "0.7388347", "0.73284864", "0.7323322", "0.72788733", "0.7252915", "0.7215804", "0.72030765", "0.72030765", "0.7184667", "0.70937914", "0.70153326", "0.7014133", "0.7008801", "0.7003514", "0.7002293", "0.69419354", "0.693517", "0.69348997", "0.6908507", "0.69000673", "0.6897155", "0.68825406", "0.68706083", "0.68639475", "0.680844", "0.6763261", "0.6758143", "0.6748912", "0.6732388", "0.6730651", "0.67302865", "0.6730218", "0.6716199", "0.67072415", "0.6697106", "0.6686803", "0.6686069", "0.66838014", "0.6683459", "0.6668849", "0.666485", "0.66589147", "0.6657164", "0.66500616", "0.66171855", "0.6596276", "0.6589492", "0.6558643", "0.6552841", "0.65444696", "0.65268177", "0.6514629", "0.6498557", "0.64975005", "0.6497002", "0.6493691", "0.6467423", "0.6466476", "0.64604795", "0.6458959", "0.6444556", "0.64379406", "0.64342237", "0.6428214", "0.64180845", "0.6413758", "0.6405216", "0.63795304", "0.6362181", "0.6361813", "0.6353251", "0.63522816", "0.6344578", "0.6343548", "0.6340937", "0.63388085", "0.6338002", "0.63376856", "0.63376856", "0.6333187", "0.63287544", "0.6322808", "0.6281488", "0.6279367", "0.6252217", "0.6240589", "0.62278295", "0.62258077", "0.6224688", "0.62163687", "0.62117046", "0.6211699", "0.6193729", "0.6190101", "0.616511", "0.6162786", "0.6157419", "0.6154687", "0.6154634" ]
0.0
-1
receive from whichever address, echo to certain address
def run(self): super().run() echo = self.echo local = self.local remote = self.remote transport = Transceiver(local) transport.set_timeout(0.5) self.__result: list[Entry] = [] while True: try: packet = transport.recv(None) params = frame.deserialize(packet) seq = params["seq"] total = params["total"] t_master = params["t_master"] infinite = params["infinite"] payload = params["payload"] t_slave = time.time() if echo: data_send = frame.serialize(infinite, seq, total, t_master, t_slave, payload) transport.send(remote, data_send) t_ul = (t_slave - t_master) * 1000 self.add_result(Entry(seq, total, t_ul, 0)) print(f"seq = {seq}, ul = {t_ul:.2f} ms, payload: {hex_str(payload)}") if frame.is_end(params): print(f"receive last packet!") break except socket.timeout: continue except KeyboardInterrupt: break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _receive(self, what, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what)\n\n # print(\"DEBUG \" + tag_string)\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n ' --print --address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _receive cmd shlex list: ', cmd\n\n try:\n client = subprocess.Popen(cmd, shell=False,\n stdout=subprocess.PIPE)\n\n # client.communicate is blocking\n raw_out = client.communicate()\n # print('DEBUG1 ', raw_out)\n\n # value is stored as first tuple element\n # between a pair of square brackets\n\n raw_string = raw_out[0]\n # print(\"DEBUG2 \" + str(raw_string))\n raw_string = str(raw_string)\n out = raw_string[(raw_string.find('[') + 1):raw_string.find(']')]\n # print(\"DEBUG4 \" + out)\n return out\n\n except Exception as error:\n print('ERROR enip _receive: ', error)", "def _receive(self, what, address, **kwargs):\n\n print('_receive: please override me.')", "def sendto(self,msg,address):\n\n address = self.pubIPToMorse(address);\n \n if not self.validIPAndPort:\n print(\"Error: Invalid IP and port or socket has not been bound with an IP and port: message not sent!\");\n return;\n\n to_ip_addr = address[0];\n to_port = address[1];\n msg = msg.decode(\"utf-8\"); #Convert from bytearray to a string for ease of operation\n\n # Assemble UDP package\n udp_package = to_port + self.my_port + msg;\n\n # Assemble IP package\n ip_header = to_ip_addr + self.my_ip_addr + self.protocol_identifier + t.base36encode(len(udp_package));\n ip_package = ip_header + udp_package;\n\n # Assemble MAC package\n # First check to see if the MAC of the recieving IP is known, if not address message to router\n if to_ip_addr in self.macDict.keys(): mac_to = self.macDict[to_ip_addr];\n else: mac_to = self.macDict['router_mac']; # This only works if you're not the router...\n # Then assemble the remainder of the MAC package\n mac_from = self.my_mac;\n # Send the message\n print(mac_to+mac_from+ip_package)\n t.sendMessage(mac_to,mac_from,ip_package);", "def accepted(self, connection, address):\n data = connection.recv(1024)\n sys.stdout.write(\"receive from client: {}\\n\".format(data.decode()))\n # split needs to avoid \"ls -l\" execute as ONE COMMAND\n res = subprocess.check_output(data.decode().split(\" \"))\n print(\"---\")\n print(res)\n connection.send(res)\n sys.stdout.write(\"send to client: {}\\n\".format(data.decode()))", "def listener(address):\n\n\tlisten_socket = socket(AF_INET, SOCK_DGRAM)\n\tlisten_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\tlisten_socket.bind(address)\n\n\twhile True:\n\t\tmessage, client_address = listen_socket.recvfrom(512)\n\n\t\tparse_header(message[:12])\n\t\trequested_domain, requested_record, query_length = parse_body(message[12:])\n\n\t\theader, body = generate_query(requested_domain, requested_record, message[12:12 + query_length], message)\n\t\tlisten_socket.sendto(header + body, client_address)", "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n if packet.is_raw_data:\n if not (packet.src, packet.dest) in self.buffer:\n self.buffer[packet.src, packet.dest] = \"\"\n start = len(self.buffer[(packet.src, packet.dest)])\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)] + packet.payload\n i = max(start, 47)\n while i < len(self.buffer[(packet.src, packet.dest)]):\n i += 1\n h = utils.get_hash(self.buffer[(packet.src, packet.dest)][i-48:i])\n if utils.get_last_n_bits(h, 13) == self.GLOBAL_MATCH_BITSTRING:\n block = self.buffer[(packet.src, packet.dest)][:i]\n self.cache[utils.get_hash(block)] = block\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)][i:]\n i = 47\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, False, block), self.address_to_port[packet.dest])\n\n # remainder = self.buffer[(packet.src, packet.dest)][self.BLOCK_SIZE:]\n \n if packet.is_fin:\n block = self.buffer[(packet.src, packet.dest)]\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, True, block), self.address_to_port[packet.dest])\n self.buffer[(packet.src, packet.dest)] = \"\"\n else:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, packet.is_fin, self.cache[packet.payload]), self.address_to_port[packet.dest])\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_raw_data:\n if not (packet.src, packet.dest) in self.buffer:\n self.buffer[packet.src, packet.dest] = \"\"\n start = len(self.buffer[(packet.src, packet.dest)])\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)] + packet.payload\n i = max(start, 47)\n while i < len(self.buffer[(packet.src, packet.dest)]):\n i += 1\n h = utils.get_hash(self.buffer[(packet.src, packet.dest)][i-48:i])\n if utils.get_last_n_bits(h, 13) == self.GLOBAL_MATCH_BITSTRING:\n block = self.buffer[(packet.src, packet.dest)][:i]\n if utils.get_hash(block) in self.cache:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, False, False, utils.get_hash(block)), self.wan_port)\n else:\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, False, block), self.wan_port)\n self.buffer[(packet.src, packet.dest)] = self.buffer[(packet.src, packet.dest)][i:]\n i = 47\n\n if packet.is_fin:\n block = self.buffer[(packet.src, packet.dest)]\n if utils.get_hash(block) in self.cache:\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, False, True, utils.get_hash(block)), self.wan_port)\n else:\n self.cache[utils.get_hash(block)] = block\n self.send_block(tcp_packet.Packet(packet.src, packet.dest, True, True, block), self.wan_port)\n self.buffer[(packet.src, packet.dest)] = \"\"\n else:\n # 1/0\n self.send_block(packet, self.wan_port)", "def receive(self, message):", "async def view_receive_addresses(w):\n title = \"Proof Wallet: View Receive Addresses\"\n start = 0\n N = 10\n while True:\n external = w.deriveaddresses(start, start + N - 1, 0)\n internal = w.deriveaddresses(start, start + N - 1, 1)\n\n # display receive addreses\n addr_str = \"Derivation | Receive Address\\n\"\n for i, addr in enumerate(external):\n addr_str += f\"m/0/{str(i + start)} | \"\n addr_str += f\"{color_text(addr, GREEN_COLOR, fg)}\\n\"\n\n # display change addreses\n addr_str += f\"\\nDerivation | Change Address\\n\"\n for i, addr in enumerate(internal):\n addr_str += f\"m/1/{str(i + start)} | \"\n addr_str += f\"{color_text(addr, YELLOW_COLOR, fg)}\\n\"\n\n msg = f\"\"\"{title}\n\nAddresses {start} to {start + N - 1}\n\n{addr_str}\n\nControls\n'n' -- Next {N} addresses\n'p' -- Previous {N} addresses\n'x' -- Go back to wallet menu\n\"\"\"\n ch = await ux_show_story(msg, ['n', 'p', 'x'])\n if ch == 'n':\n start = start + N\n elif ch == 'p' and start > 0:\n start = start - N\n elif ch == 'x':\n return", "def receiver():\r\n global data\r\n DW1000.newReceive()\r\n DW1000.receivePermanently()\r\n DW1000.startReceive()", "def receive():\n pass", "def applicationA(data=None):\n\tsend(send_to_B['ip'], send_to_B['port'], data=data)\n\n\tprint receive(send_to_A['ip'], send_to_A['port'])", "def read_and_response(self, vsr, address_h, address_l):\n # time.sleep(0.2)\n self.send_cmd([vsr, 0x41, address_h, address_l])\n # time.sleep(0.2)\n resp = self.read_response() # ie resp = [42, 144, 48, 49, 13]\n reply = resp[2:-1] # Omit start char, vsr address and end char\n reply = \"{}\".format(''.join([chr(x) for x in reply])) # Turn list of integers into ASCII string\n # print(\" RR. reply: {} (resp: {})\".format(reply, resp)) # ie reply = '01'\n return resp, reply", "def receive(self, packet):\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n # if packet.is_fin:\n # print(\"2nd wan sees a fin\")\n\n if packet.is_fin and len(packet.payload) == 0:\n # print(\"empty fin, foward fin\")\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash not in self.hash_to_raw_data.keys():\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = False)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # reset buffer\n self.send(packet, self.address_to_port[packet.dest]) # forward empty fin\n return\n \n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n \n if packet.is_raw_data:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n pack_buff += packet.payload\n\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n # print(\"sending1\")\n if block_hash in self.hash_to_raw_data.keys():\n # send extract data from hash in packet\n block_to_send = self.hash_to_raw_data[block_hash]\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n\n if remaining_buff:\n # print(\"wan to client remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n block_hash = get_hash(remaining_buff)\n block_to_send = remaining_buff\n # print(\"sending2\")\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n # print(\"sending fin1\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n if packet.is_fin:\n # print(\"sending fin2\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff\n else:\n block_hash = packet.payload\n block_to_send = self.hash_to_raw_data[block_hash]\n # print(\"sending3\")\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = False)\n if packet.is_fin:\n # print(\"sending fin3\")\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.address_to_port[packet.dest])\n # self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\" # TESTING\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n if packet.is_fin and len(packet.payload) == 0:\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n block_hash = get_hash(pack_buff)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = pack_buff\n self.send_data_in_packets(packet.src, packet.dest, True, False, pack_buff, is_wan_port = True)\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n self.send(packet, self.wan_port)\n return\n\n if (packet.src, packet.dest) not in self.srcdest_to_buffer.keys():\n self.srcdest_to_buffer[(packet.src, packet.dest)] = \"\"\n pack_buff = self.srcdest_to_buffer[(packet.src, packet.dest)]\n\n pack_buff += packet.payload\n block_list, remaining_buff = self.break_data_into_blocks(pack_buff)\n\n # send off all completed blocks\n for block_to_send in block_list:\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n\n if remaining_buff:\n # print(\"wan to wan remaining_buff: \" + remaining_buff)\n if packet.is_fin:\n # print(\"finfin\")\n block_to_send = remaining_buff\n block_hash = get_hash(block_to_send)\n if block_hash in self.hash_to_raw_data.keys():\n # send hash in packet\n pack = Packet(packet.src, packet.dest, False, False, block_hash)\n self.send(pack, self.wan_port)\n else:\n self.hash_to_raw_data[block_hash] = block_to_send\n self.send_data_in_packets(packet.src, packet.dest, True, False, block_to_send, is_wan_port = True)\n fin_pack = Packet(packet.src, packet.dest, True, True, \"\")\n self.send(fin_pack, self.wan_port)\n pack_buff = \"\"\n else:\n pack_buff = remaining_buff\n else:\n pack_buff = \"\"\n self.srcdest_to_buffer[(packet.src, packet.dest)] = pack_buff", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def answer(self, address, sock):\n self.headers[\"QR\"] = 1\n header = headerdict2bytes(self.headers)\n body = datadict2bytes(self.data)\n sock.sendto(header + body, address)", "def receive_message(datagram, connection):", "def send(msg, dest=None):", "def do_ospfd(self, line):\n try:\n self.fibbing[line].call('telnet', 'localhost', '2604')\n except KeyError:\n log.error('Unknown node %s', line)", "def _receive(self, what, address='localhost:502', **kwargs):\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('r')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: kwargs\n if 'count' in kwargs:\n count = kwargs['count']\n COUNT = '--count {} '.format(kwargs['count'])\n else:\n count = 1\n COUNT = '--count {} '.format(1)\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT\n )\n # print 'DEBUG modbus_receive cmd shlex list: ', cmd\n\n try:\n client = subprocess.Popen(cmd, shell=False,\n stdout=subprocess.PIPE)\n\n # client.communicate is blocking\n raw_out = client.communicate()\n # print 'DEBUG modbus _receive raw_out: ', raw_out\n\n # value is stored as first tuple element\n # between a pair of square brackets\n raw_string = raw_out[0].strip()\n\n # NOTE: registers store int\n if what[0] == 'HR' or what[0] == 'IR':\n\n # NOTE: single read returns an int\n if count == 1:\n out = int(raw_string[1:-1])\n\n # NOTE: multiple reads returns a list of ints\n else:\n out = []\n hrs = raw_string[1:-1].split(',')\n for hr in hrs:\n out.append(int(hr))\n if len(out) != count:\n raise ValueError('Wrong number of values in the response.')\n\n # NOTE: coils and discrete inputs store 8 bools\n elif what[0] == 'CO' or what[0] == 'DI':\n # print 'DEBUG modbus _receive bools: ', bools\n\n # NOTE: pymodbus always returns at least a list of 8 bools\n bools = raw_string[1:-1].split(',')\n\n # NOTE: single read returns a bool\n if count == 1:\n if bools[0] == 'False':\n out = False\n elif bools[0] == 'True':\n out = True\n else:\n raise TypeError('CO or DI values must be bool.')\n\n # NOTE: multiple reads returns a list of bools\n else:\n out = []\n i = 0\n for b in bools:\n if i >= count:\n break\n elif b.strip() == 'False':\n out.append(False)\n elif b.strip() == 'True':\n out.append(True)\n else:\n raise TypeError('CO or DI values must be bool.')\n i += 1\n\n return out\n\n except Exception as error:\n print('ERROR modbus _receive: ', error)", "def send_and_receive(printer_address, code):\r\n\r\n message_data = '~{0}\\r\\n'.format(code)\r\n\r\n printer_socket = socket.socket()\r\n printer_socket.settimeout(TIMEOUT_SECONDS)\r\n printer_socket.connect((printer_address['ip'], printer_address['port']))\r\n printer_socket.send(message_data.encode())\r\n data = printer_socket.recv(BUFFER_SIZE)\r\n printer_socket.close()\r\n\r\n return data.decode()", "def receiver(): \n global data\n DW1000.newReceive()\n DW1000.receivePermanently()\n DW1000.startReceive()", "async def route(self, subnet):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"route\", \"for\", str(subnet)])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))", "def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!", "def echo_client(host, port):\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Connect the socket to the server\n server_address = (host, port)\n print \"Connecting to %s port %s\" % server_address\n sock.connect(server_address)\n\n while True:\n # Send data\n try:\n # Send data\n # message = raw_input()\n # print \"Sending %s\" % message\n # sock.sendall(message)\n # # Look for the response\n message = raw_input('\\ncontinue receive data? [Y / N]')\n sendata = raw_input('send something?\\n')\n if sendata is not None:\n sock.sendall(sendata)\n if message == 'N' or message == 'n':\n break\n \n data = sock.recv(1024)\n sock.sendall('reply:'+data)\n print(\"Received: %s\" ) % data\n except socket.errno, e:\n print \"Socket error: %s\" %str(e)\n except Exception, e:\n print \"Other exception: %s\" %str(e)\n # finally:\n # print \"Closing connection to the server\"\n # # sock.close()\n print('end connection\\n')\n sock.close()", "def recv(self):\n while True:\n data, useless = self.sok.recvfrom(1024)\n lines = data.split('\\n')\n neighbor_name = lines[1]\n if neighbor_name in self.neighbors:\n if lines[0] == 'ROUTE UPDATE':\n self.route_update(self.neighbors[neighbor_name], lines[2:])\n elif lines[0] == 'LINK UP':\n self.link_up_respond(self.neighbors[neighbor_name])\n elif lines[0] == 'LINK DOWN':\n self.link_down_respond(self.neighbors[neighbor_name])", "def cmd_here(command: IncomingCommand, replies: Replies) -> None:\n if command.payload:\n address = command.payload\n else:\n address = \"Berlin, Germany\"\n # print(address)\n replies.add(**get_maptile(address))", "def getreceivedbyaddress(self, vergeaddress, minconf=1):\n return self.proxy.getreceivedbyaddress(vergeaddress, minconf)", "def receive(self, packet):\n packet_key = (packet.src, packet.dest)\n # receiving wan\n if packet.dest in self.address_to_port:\n # The packet is destined to one of the clients connected to this middlebox;\n # send the packet there.\n\n if not packet.is_raw_data:\n block = self.hash_payloads[packet.payload]\n self.send_block(block, packet_key, packet.is_fin, False)\n else:\n self.add_packet_to_buffer(packet_key, packet)\n curr_block = self.buffers[packet_key]\n block_length = len(curr_block)\n left = 0\n right = 48 if block_length >= 48 else block_length\n while right <= block_length:\n block = curr_block[left:right]\n block_hash = utils.get_hash(block)\n is_delimiter = utils.get_last_n_bits(block_hash, 13) == self.GLOBAL_MATCH_BITSTRING\n if is_delimiter:\n send_block = curr_block[:right]\n curr_block = curr_block[right:]\n self.buffers[packet_key] = curr_block\n self.determine_if_hashed(send_block, packet_key, packet.is_fin, False)\n left = right\n if block_length - right >= 48:\n right = right + 48\n else:\n right = block_length\n if packet.is_fin:\n send_block = curr_block[left: block_length]\n self.determine_if_hashed(send_block, packet_key, True, False)\n self.buffers.pop(packet_key, None)\n break\n else:\n left = left + 1\n right = right + 1\n if packet_key in self.buffers:\n if packet.is_fin:\n send_block = self.buffers[packet_key]\n self.determine_if_hashed(send_block, packet_key, True, False)\n self.buffers.pop(packet_key, None)\n else:\n self.buffers.pop(packet_key, None)\n # sending wan\n else:\n # The packet must be destined to a host connected to the other middlebox\n # so send it across the WAN.\n\n self.add_packet_to_buffer(packet_key, packet)\n curr_block = self.buffers[packet_key]\n block_length = len(curr_block)\n left = 0\n right = 48 if block_length >= 48 else block_length\n while right <= block_length:\n block = curr_block[left:right]\n block_hash = utils.get_hash(block)\n is_delimiter = utils.get_last_n_bits(block_hash, 13) == self.GLOBAL_MATCH_BITSTRING\n if is_delimiter:\n send_block = curr_block[:right]\n curr_block = curr_block[right:]\n self.buffers[packet_key] = curr_block\n self.determine_if_hashed(send_block, packet_key, packet.is_fin, True)\n left = right\n if block_length - right >= 48:\n right = right + 48\n else:\n right = block_length\n if packet.is_fin:\n send_block = curr_block[left: block_length]\n self.determine_if_hashed(send_block, packet_key, True, True)\n self.buffers.pop(packet_key, None)\n break\n else:\n left = left + 1\n right = right + 1\n if packet_key in self.buffers:\n if packet.is_fin:\n send_block = self.buffers[packet_key]\n self.determine_if_hashed(send_block, packet_key, True, True)\n self.buffers.pop(packet_key, None)\n else:\n self.buffers.pop(packet_key, None)", "def poll_data(self):\n with s.socket(s.AF_INET, s.SOCK_DGRAM) as sock:\n sock.bind(('', self.__port))\n while True:\n message, address = sock.recvfrom(1024)\n self.__address = address\n logging.debug('Received: {}'.format(message))\n self.process_data(message)", "def main(args):\n client = MulticastCANClient((args.mcast_address, args.mcast_port), None)\n while True:\n client.recvMsg()", "def add_message_to_out_buff(self, address, message):\n\t\ttry:\n\t\t\tself.nodes[address].add_message_to_out_buff(message)\n\t\t\tprint(\n\t\t\t\tf'Add message with type = {message.type} from {message.get_source_server_address()} to {address} out buffer.')\n\t\texcept Exception as e:\n\t\t\t# desired_trace = traceback.format_exc(sys.exc_info())\n\t\t\tprint('Problem with sending message!' + message.body)", "def goto_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the taken off message\n\tprint a2, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init2\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1", "def receive(self, msg):\n pass", "def change_ip(sender_socket, ip, port):\n sender_socket.sendto(bytes(\"change ip\", \"UTF-8\"), (ip, port))\n new_ip_str = input(\"New Host IP Address: \")\n sender_socket.sendto(bytes(new_ip_str, \"UTF-8\"), (ip, port))\n sleep(0.5)\n status = sender_socket.recv(BUFFER_SIZE)\n status_message = status.decode(\"UTF-8\")\n if \"IP Address Successfully Changed\" in status_message:\n print(status_message)\n return True\n else:\n print(status_message)\n return False", "def recieve_can(offset):\n panda = Panda()\n while True:\n data = panda.can_recv()\n if data != []: \n for x in data:\n if x[0] == offset: \n if x[3] == 0: \n mes = f'{x[0]}, 0x{x[2].hex()}, {x[3]}'\n print(mes)", "def process_command_echo(command, replies):\n message = command.message\n contact = message.get_sender_contact()\n sender = 'From: {} <{}>'.format(contact.display_name, contact.addr)\n replies.add(text=\"{}\\n{!r}\".format(sender, command.payload))", "def client(msg):\n # address changed to 10000 for server2.py\n infos = socket.getaddrinfo('127.0.0.1', 10000)\n stream_info = [idx for idx in infos if idx[1] == socket.SOCK_STREAM][0]\n client_msg = socket.socket(*stream_info[:3])\n client_msg.connect(stream_info[-1])\n client_msg.sendall(msg.encode('utf8'))\n buffer_length = 80\n msg_complete = False\n msg = u''\n while not msg_complete:\n part = client_msg.recv(buffer_length)\n msg += part.decode('utf8')\n if len(part) < buffer_length:\n msg_complete = True\n client_msg.close()\n return msg\n print(msg)", "def applicationB(data=None):\n\tdata = receive(send_to_B['ip'], send_to_B['port'])\n\n\tdata = handle(data)\n\n\ttime.sleep(2)\n\tsend(send_to_A['ip'], send_to_A['port'], data=data)", "def route(self, connection, data):\n sender = self.connections[connection]\n try:\n splited_data = re.search('@(.*?)[\\s,](.*)', data)\n user, message = splited_data.group(1), splited_data.group(2)\n if user not in self.connections.values():\n response = ('server', f'No such user \"{user}\" active',)\n self.send(connection, response)\n for client in self.connections:\n if self.connections[client] == user and message:\n logging.info(f'{sender} sending to {user}')\n self.send(client, (sender, message.rstrip(),))\n except AttributeError:\n response = ('server', 'Please, enter some message',)\n self.send(connection, response)", "def receive_pickle(self,message,address):\n\t\tcomplete=False\n\t\tdata=message\n\t\tprint(\"Address of sender:\",address)\n\t\tpnum=self.pladdr.index(address)+1\n\t\twhile not complete:\n\t\t\ttry:\n\t\t\t\tprint(\"Server trying to 'loads' data\")\n\t\t\t\tpickle.loads(data)\n\t\t\t\tcomplete=True\n\t\t\t\tself.send('complete',pnum,'pack')\n\t\t\texcept:\t\t\t\t\t\t\t\t\t\t\n\t\t\t\tprint(\"Unable to 'loads', waiting for the next packet\")\n\t\t\t\tself.send('next_datagram',pnum,'pack')\t# self.receive can't be called \n\t\t\t\tmsg,addr=self.listener.recvfrom(512)\t# here because this very function\n\t\t\t\tdata+=msg\t\t\t\t\t\t\t\t# was called from it.\n\t\treturn data", "def incomingMsg(self, ip):\n #print(\"[ACTM] Receiving data for IP\", ip)\n if ip in self.ip:\n idx = self.ip.index(ip)\n sID = idx + 1 #karena index mulai dari 0\n self.actChanged.emit(sID, True) #lalu notifikasi dashboard\n self.timer[idx].start() #lalu jalankan timernya", "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def datagram_received(self, data, addr):\n self.node.notify(Task(TaskType.PEER_MSG, (data, addr)))", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(2048)\r\n print(\"Response \", self.response)\r\n except socket.error as exc:\r\n print (\"Receive Thread caught exception socket.error : %s\" % exc)", "def receive(ip_addr: str, port: int, verbose: bool):\n # Set the socket up to run across internet and to use UDP\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Bind our socket to the passed in IP and port num\n sock.bind((ip_addr, port))\n\n # Recieve the data as well as the address that sent it\n data_received, addr = sock.recvfrom(BUFF_SIZE)\n\n # data_received = data.decode(\"utf-8\") # Decode the bytes string\n if verbose:\n print(\"Received message: {} from: {}\".format(data_received, addr))\n\n message = dissect_message(data_received, verbose) # Dissect the message\n return message", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def friend(tcp, udp, userId, data):\n\n # from server get address of potential friend\n tcp.sendMessage('SEARCH ' + data[0])\n address = tcp.receiveMessage().split()[-2:]\n address = (address[0], int(address[1]))\n\n # send friend request\n if address:\n udp.sendto('FRIEND ' + userId, address)\n print 'Sent friend request to ' + data[0]\n else: print 'Could not send friend request to ' + data[0]", "def receive_message(self, message):", "def privmsg(self):\n if(self.data[0]==\"orcbot\"):\n print \"target is orcbot\"\n self.target = self.orcbot_socket\n self.message = \":\" + SOCKET_TO_USERID[self.source] + \"!~@localhost \"+ self.message\n if(self.source == self.orcbot_socket):\n print \"source is orcbot\"\n self.target = USERID_TO_SOCKET[self.data[0]]\n self.message = \":orcbot!~@localhost \" + self.message\n self.send()", "def _receive_multiple(self, what, address, **kwargs):\n\n print('_receive_multiple: please override me.')", "def chatWith(self, item):\n self.c.requestUdpConnection(item.text())", "def set_receiver(self, to: bool):\n self.send_message(b'\\x02' + int(to).to_bytes(1, 'big'))", "def echo(s_socket):\r\n value = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(value) + 5, 4, 'little')\r\n s_socket.send('d' + bytes_value + value)\r\n print(s_socket.recv(64))", "def send(event=None): #event is passed by binders.\n try:\n msg = my_msg.get()\n my_msg.set(\" \") #Clears input field.\n client_socket.send(bytes(msg, \"utf8\"))\n \n except:\n \n HOST = '10.0.0.8'\n PORT = 8081\n ADDR = (HOST, PORT)\n \n s = socket(AF_INET, SOCK_STREAM)\n client_socket.bind(ADDR)\n s.connect((HOST, PORT))\n s.send(msg)\n \n if msg == \"{quit}\":\n client_socket.close()\n top.destroy()", "def connectionMade(self):\n print \"connection received from\", self.addr", "def transfer(self, address, direction, repeats):\n if direction == \"in\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"inlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"outlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n elif direction == \"out\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"outlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"inlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n else:\n pass # return error", "def lineReceived(self, line):\n self.sendLine('reply '+line)", "def goto_first():\n\tglobal c1\n\tglobal a1\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c1.recv(BUF_SIZE) # wait for taken off message\n\tprint a1, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init1\n\t\tc1.send(json.dumps(new_msg))\n\t\tstate += 1", "def datagram_received(self, data, addr):\n self.decode_msg(data, self.state)", "def run_command(command, sender_socket, ip, port):\n command_bytes = bytes(command, \"UTF-8\")\n sender_socket.sendto(command_bytes, (ip, port))\n has_data = True\n while has_data:\n try:\n byte_reply = sender_socket.recv(BUFFER_SIZE)\n str_reply = byte_reply.decode(\"UTF-8\")\n print(str_reply)\n if \"|-- Transfer \" in str_reply:\n handle_keylog_transfer(str_reply, sender_socket)\n except socket.timeout:\n has_data = False", "def bind(self, address, recv=True):\n\n address = self.pubIPToMorse(address);\n\n # Returns with error if inputs are invalid\n if not self.validFamilyAndProtocol:\n print(\"Error: Invalid family and protocol or family and protocol are not initialized: socket not bound!\");\n return;\n\n self.my_ip_addr = address[0];\n self.my_port = address[1];\n self.validIPAndPort = True;\n\n #Starts a monitor function on a new thread that queues messages as they are recieved\n if recv:\n self.qt = threading.Thread(target=r.monitor);\n self.qt.start(); #This may cause a memory leak - unsure.\n \n if self.verbose:\n print(\"Socket bound. Your IP is \" + self.my_ip_addr + \". Your port is \" + self.my_port);", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_string: ', tag_string\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send: ', error)", "def _receive_thread(self):\r\n while True:\r\n try:\r\n self.response, ip = self.socket.recvfrom(3000)\r\n except socket.error as exc:\r\n print (f\"Caught exception socket.error: {exc}\")", "def receive_msg(self):\n while True:\n try:\n msg = self.srvsock.recv(100)\n with self.t.location(0, self.k):\n print(msg.decode())\n self.k = self.k + 1\n except BaseException as e:\n print('Server Error! Connection shut down.')\n raise e", "def sendto(self, msg, addr, family):\n sock = socket.socket(family, socket.SOCK_DGRAM)\n sock.sendto(msg, addr)\n time.sleep(0.1)", "def forward(self, data):\n host = self._CONFIG.read('syslog', 'host')\n port = self._CONFIG.read('syslog', 'port')\n self._SOC.sendto(bytes(data, encoding='utf-8'), (host, int(port)))", "def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))", "def discover(self, srv_port):\n addr = None\n answ = None\n\n # Creates a new datagram socket to broadcast\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n s.settimeout(self.timeout)\n s.sendto(REQ_HELLO, ('255.255.255.255', srv_port))\n\n # Wait for a server answer\n try:\n answ = s.recvfrom(1024)\n except socket.timeout:\n print 'Timeout exceeded...'\n\n # Close the diagram socket.\n s.close()\n\n if answ is not None and answ[0] == ANS_HELLO:\n # Saves the address if the server answer was correct.\n addr = answ[1]\n return addr", "def receiver(port_receiv):\n sock_receiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #cambiar a udp\n server_address = ('localhost', int(port_receiv)) # usar localhost para pruebas en el mismo portatil(?)\n sock_receiver.bind(server_address)\n\n print(1)\n #bucle para recibir audio\n while True:\n data, addr = sock_receiver.recvfrom(1024) # tamanhio de buffer no definitivo\n # recibir datos, descomprimirlos y etc. luego reproducir\n print (\"received message:\", data.decode('utf-8'))", "async def handle_echo(reader, writer):\r\n addr = writer.get_extra_info('peername')\r\n message = f\"{addr} is connected !!!!\"\r\n CLIENT_DICTIONARY[addr[1]] = Server()\r\n print(message)\r\n while True:\r\n data = await reader.read(10000)\r\n message = data.decode().strip()\r\n if message == 'quit':\r\n CLIENT_DICTIONARY[addr[1]].removelog()\r\n break\r\n print(f\"Received {message} from {addr}\")\r\n reply = CLIENT_DICTIONARY[addr[1]].split(message)\r\n print(f\"Send: {reply}\")\r\n #hello = 'successful'\r\n if reply != '' or reply != 'None':\r\n writer.write(reply.encode())\r\n else:\r\n reply = '.'\r\n writer.write(reply.encode())\r\n await writer.drain()\r\n print(\"Close the connection\")\r\n writer.close()", "def director_address():\n while True:\n #addr = etcd.watch(\"director_publish_addr\")\n #director_address = addr.value\n break", "def relay(self, line):\n if not self.recv:\n self.recv = True\n self.sendingto = list(self.factory.getClients())\n for client in self.sendingto:\n if client != self:\n client.sendString(line)", "def test_address(self):\n result = irc.dccDescribe(\"CHAT arg 3232235522 6666\")\n self.assertEqual(result, \"CHAT for host 192.168.0.2, port 6666\")", "def receive(self):\n pass", "def receive(ip=None, port=0):\n\tsock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n\n\tsock.bind((ip, port))\n\n\twhile True:\n\t\tdata, addr = sock.recvfrom(1024) # buffer size is 1024 bytes\n\t\tif len(data) > 0:\n\t\t\tbreak\n\n\treturn data", "def reply_to(self):\n return self.receiver.remote_source.address", "async def send_to_user(self, user: User, msg: Msg, address: str = None):\n if address is None:\n address = user.current_address\n\n await self.send(msg, address)", "def _speak_address(which, user_data):\n addr = user_data.get(which)\n if not addr:\n return \"I don't know your %s address.\" % which\n else:\n return (\"Your %s address is set to %s.\" %\n (which, location.text_to_speech(addr['address'])))", "async def send(self, msg: Msg, address: str):\n ident, interface = address.split(address_split_char)\n\n try:\n inbox = self.plugin_inboxes[interface]\n except AttributeError:\n raise AzuraBotError(f\"There is no inbox registered for \"\n f\"'{interface} (address {address})\")\n\n print(f\"[bot] Private message: AzuraBot->{address}: {msg.text}\")\n await inbox.put(msg)", "def func_to(self, data, get_recv, get_data):\n checking = bytes(data).decode().encode('ascii', 'ignore').decode()\n if not get_data:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[0]\n if not get_recv:\n checking = bytes(data).decode().encode('ascii', 'ignore').decode().splitlines()[1]\n try:\n data_list = checking.split(':')\n remove_bracket = str(data_list[1])\n remove_bracket = remove_bracket[2:-1]\n data_list[1] = remove_bracket\n check = data_list[0].lower().rstrip()\n if check == 'rcpt to':\n message = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message)\n return True\n except:\n return False", "def _on_stdin_read(self, data):\n if not self.opts[\"udp\"]:\n self.fire(write(data))\n else:\n self.fire(write((self.host, self.port), data))", "def read_versa5(self,addr,fullrepsonse=False):\n time.sleep(0.002)\n addr = addr & 0xff\n cmd = bytes([0x07,0xea,addr,0x00])\n res = self.command(0x3c,cmd)\n if fullresponse:\n return res\n else:\n return res.response_data & 0x0ff", "def sendto(self, data, addr):\n asyncio.ensure_future(self.__inner_protocol.send_data(data, addr))", "def client(ip, port, message): \n # Conectado con el servidor\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n try:\n sock.sendall(bytes(message, 'utf-8'))\n response = sock.recv(BUF_SIZE)\n print (\"Recibido por el cliente: %s\" %response)\n finally:\n sock.close()", "def server_client():\r\n MESSAGE = input(\"Mesaj vanzator:\")\r\n\r\n tcpClientA = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n tcpClientA.connect((host, bank_port))\r\n\r\n while MESSAGE != 'stop':\r\n tcpClientA.send(MESSAGE.encode())\r\n data = tcpClientA.recv(BUFFER_SIZE)\r\n print(\"Vanzator a primit:\", data)\r\n MESSAGE = input(\"Mesaj vanzator:\")\r\n\r\n tcpClientA.close()", "def send(event=None): # event is passed by binders.\n msg = my_msg.get()\n print(\"This is send: \",type(msg))\n if msg == \"{quit}\":\n root.quit()\n if msg == \"Type your messages here.\" or msg == \"\" :\n pass\n else:\n final_msg = \"You: \" + msg\n msg_list.insert(END, final_msg)\n receive_msg = receive(msg.lower())\n rec_msg = \"Genie: \" + receive_msg\n msg_list.insert(END, rec_msg)\n my_msg.set(\"\")", "def getAddress(user):", "def get_user_input(self):\n while not self.suspended:\n input = raw_input()\n input = input.split('|')\n if input[0] in ['exit', 'quit', 'kill']:\n self.broadcast('kill')\n self.suspended = True\n for client in self.clients.values():\n client.socket.close()\n self.s.close() # Have to connect to socket to exit server.\n sock = socket(AF_INET, SOCK_STREAM)\n port = bind_to_random(sock)\n sock.connect((str(self.ip), self.port))\n elif len(input) > 1:\n msg = '|'.join(['#server']+input[1:])\n if input[0][:1] == '@':\n destination = input[0][1:].lower()\n if destination == 'server':\n print msg\n elif destination == 'all':\n self.broadcast(msg)\n else:\n client = self.clients.get(destination, None)\n if client:\n client_send(client.socket, msg)\n else:\n print 'Destination not active'\n else:\n print msg", "def handle_connection(conn, addr, create):\r\n print 'connection recieved from:', addr\r\n server = create()\r\n server.startup()\r\n while True:\r\n data = conn.recv(1024) # read data from the connection / raw input\r\n if not data:\r\n break\r\n print 'from client:', data\r\n response = server.process(data)\r\n conn.send(response)\r\n server.finish()\r\n conn.close()", "def set_unicast_address(self,serial,address):\n self.set_TX_address(0xff)\n if type(serial) is not str: serial=struct.pack('<BBBHBB',*serial)\n if type(address) is not str: address=chr(address)\n self.send_multicast('\\xb2'+serial+address)", "def receive_and_store(self, socket, addr):\n\n # Create the incoming connection\n conn = IncomingConnection(addr, socket)\n # Receive the data from the connection\n data = conn.recv_data()\n if not data:\n logger.warning(\"Invalid data received\")\n return\n\n # Get the type of data\n transfer_type = data[0]\n\n # Do an appropriate action based on the type of data\n if transfer_type == COMMAND_TRANSFER:\n command_string = data[1]\n command = data[1].split(\" \")[0]\n command_object = Command(command_string, \"00:00:00\", self.command_targets[command])\n self.command_queue.put(command_object)\n logger.debug(\"LightCommandInput: Command added to queue -> \" + data[1])\n\n elif transfer_type == FILE_TRANSFER:\n logger.warning(\"File transfer started\")\n pass", "def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)", "def arm_second():\n\tglobal c1, c2\n\tglobal a1, a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg1 = c1.recv(BUF_SIZE) # wait for the arrival message from first copter\n\tprint a1, ' >> ', msg1\n\tif msg1 != 'Arrived':\n\t\terror(msg1)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = 'ARM'\n\t\tc2.send(new_msg)\n\t\tstate += 1", "def recvfrom(self, buflen):\n\n data = baseRecv(buflen);\n if data is not None:\n message = data[0];\n mac_header = data[1];\n ip_header = data[2];\n udp_header = data[3];\n\n udp_to = udp_header[0];\n mac_from = mac_header[1];\n ip_from = ip_header[1];\n udp_from = udp_header[1];\n\n\n # Add the MAC to the MAC dictionary if it is not already recorded.\n if ip_from in self.macDict: self.macDict[ip_from] = mac_from;\n\n # If the message is not addressed to this computer's IP, discard the message (should be redudant with MAC)\n if ip_to != self.my_ip_addr: return None;\n\n # If the message is not addressed to this application's port, discard the message\n if udp_to != self.my_port: return None;\n\n return message, pubIPToMorse(ip_from,udp_from); \n else: return None;", "def readfrom_into(self, addr: int, buf: bytes, stop: bool = True, /) -> None:", "def readfrom_into(self, addr: int, buf: bytes, stop: bool = True, /) -> None:", "def handleClient(self, connection, address):\r\n # time.sleep(5) #server Action\r\n while True:\r\n try:\r\n data = connection.recv(1024).decode(\"utf-8\")\r\n except:\r\n print('client disconnect: ', address, 'at', self.now())\r\n data = \"\"\r\n\r\n if not data: break\r\n\r\n data = self.change_host(data, address)\r\n result = self.manag_bd.dispatcher(data)\r\n\r\n mutex = thread.allocate_lock()\r\n\r\n\r\n if type(result)==type(list()):\r\n mutex.acquire() #Lock interrupt\r\n l = len(result)\r\n reply = str(l)\r\n connection.send(reply.encode(\"utf-8\"))\r\n for line in result:\r\n time.sleep(0.0025)\r\n reply = line\r\n connection.send(reply.encode(\"utf-8\"))\r\n mutex.release()# permission to interrupt\r\n else:\r\n reply = str(self.now())\r\n connection.send(reply.encode(\"utf-8\"))\r\n\r\n\r\n\r\n connection.close()", "def moteAddressChanged(self, mote):\n if self._printSWAP == True:\n print \"Mote changed address to \" + str(mote.address)", "def address(self):\n ..." ]
[ "0.6488217", "0.627662", "0.61385673", "0.6084831", "0.6061891", "0.6032116", "0.59276605", "0.587819", "0.58575743", "0.58568585", "0.57779133", "0.57538867", "0.57408154", "0.57389486", "0.5735249", "0.5732271", "0.572568", "0.571604", "0.5709182", "0.5708799", "0.56972384", "0.56543833", "0.56199306", "0.56033516", "0.5600198", "0.5594354", "0.55708617", "0.55698985", "0.5562733", "0.5559248", "0.5554366", "0.5537933", "0.5516512", "0.55146354", "0.5504447", "0.54904324", "0.5481497", "0.5478123", "0.54627156", "0.54538316", "0.5443129", "0.54385144", "0.54345596", "0.54265267", "0.5423144", "0.54153496", "0.54142463", "0.5409138", "0.5408739", "0.5407914", "0.5405751", "0.5402829", "0.5394726", "0.5391486", "0.53848356", "0.537201", "0.5362062", "0.536073", "0.53488314", "0.53453785", "0.5344929", "0.5344384", "0.5344132", "0.5343901", "0.5341432", "0.53395325", "0.5338241", "0.533628", "0.533622", "0.53361964", "0.5325847", "0.53167087", "0.5316186", "0.52929145", "0.5279961", "0.52716976", "0.52667004", "0.52656895", "0.5257848", "0.5256158", "0.52513975", "0.5233732", "0.5233054", "0.5231411", "0.5228341", "0.5227746", "0.5225398", "0.52185506", "0.5216115", "0.5211479", "0.5210387", "0.520763", "0.52051693", "0.52017534", "0.5200364", "0.519685", "0.5195227", "0.5195227", "0.51938033", "0.51908034", "0.5184525" ]
0.0
-1
Runs the unit tests without test coverage.
def test(): tests = unittest.TestLoader().discover('project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def main():\n import coverage\n import nose\n import os\n from shutil import rmtree\n rmtree('./covhtml', ignore_errors=True)\n try:\n os.remove('./.coverage')\n except Exception,e:\n pass\n\n # run nose in its own process because the .coverage file isn't written\n # until the process terminates and we need to read it\n nose.run()", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def runTests(self):\n \n pass", "def RunUnitTests():\n if FLAGS.test_targets:\n tests = FLAGS.test_targets\n else:\n tests = shell_interfaces.GetStdout(\n 'bazel query kind(\"cc_test\", ...)').split()\n\n # Run coverage, joining all data into one file.\n subprocess.check_call(['bazel', 'coverage', '--instrument_test_targets',\n '--experimental_cc_coverage',\n '--combined_report=lcov',\n ('--coverage_report_generator=@bazel_tools//tools/tes'\n 't/CoverageOutputGenerator/java/com/google/devtools/'\n 'coverageoutputgenerator:Main')] + tests)", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def main():\n run_test_all()", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def run(self):\n cmd = 'coverage run setup.py test && coverage report -m'\n check_call(cmd, shell=True)", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def __main() :\n launchTests()", "def test_dummy_test():\n pass", "def unitary_test():", "def test_single_test_case():\n pass", "def tests():\n api.local('nosetests')", "def run_tests(self):\n raise NotImplementedError", "def runtests(ctx):\n run('pytest -s tests', pty=pty_available)\n run('flake8 --ignore E265,E266,E501 --exclude src, lib', pty=pty_available)", "def test():\n nose.run()", "def spec_tests():\n pass", "def runalltests():\n doctest.testmod()", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def main():\n fix_sys_path()\n result = unittest.TextTestRunner(verbosity=2).run(createTestSuite())\n\n if result.testsRun != EXPECTED_TEST_COUNT:\n raise Exception(\n 'Expected %s tests to be run, not %s.' % (EXPECTED_TEST_COUNT, result.testsRun))\n\n if len(result.errors) != 0 or len(result.failures) != 0:\n raise Exception(\n \"Functional test suite failed: %s errors, %s failures of %s tests run.\" % (\n len(result.errors), len(result.failures), result.testsRun))", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def _run_ci_integration_test():\n _run_install(False)\n _run_integration_tests_on_github(False)", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests(self):\n import nose\n from pocketlint.formatcheck import main as pocket_main\n\n nose_args = ['nosetests']\n if self.verbose:\n nose_args.append('-v')\n else:\n nose_args.append('-q')\n\n module = self.test_suite\n if self.test_module:\n module = self.test_module\n\n nose_args.extend([\n '--with-coverage',\n '--cover-package=' + module,\n '--cover-erase',\n '--cover-test',\n module.replace('.', '/'),\n ])\n\n pocket_args = [\n 'README.rst',\n 'release-notes.rst',\n 'setup.py',\n ]\n for root, dirs, files in os.walk('chevah/keycert', topdown=False):\n for name in files:\n pocket_args.append(os.path.join(root, name))\n\n nose_code = nose.run(argv=nose_args)\n if nose_code:\n nose_code = 0\n else:\n nose_code = 1\n\n pocket_code = pocket_main(pocket_args)\n if not pocket_code:\n print('Linter OK')\n\n coverage_args = [\n 'report',\n '--include=chevah/keycert/tests/*',\n '--fail-under=100',\n ]\n covergate_code = load_entry_point(\n 'coverage', 'console_scripts', 'coverage')(argv=coverage_args)\n if not covergate_code:\n print('Tests coverage OK')\n\n sys.exit(pocket_code or nose_code or covergate_code)", "def tests():", "def test(coverage):\n print('success')\n pass", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests(self):\n\n # log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n # test methods start here\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n\n # dummy_method\n self.dummy_method()\n\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # test methods end here\n\n # log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def test():\r\n import unittest\r\n tests=unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests(virtual_env):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n args = [\n 'python',\n 'setup.py',\n 'nosetests',\n '--with-coverage',\n '--with-xunit',\n ]\n subprocess.call(args, cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n subprocess.call(args, cwd=plugin_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n for addon_dir in open(join(HOLLAND_ROOT, 'addons', 'ACTIVE')):\n addon_dir = addon_dir.rstrip()\n addon_path = join(HOLLAND_ROOT, 'addons', addon_dir)\n subprocess.call(args, cwd=addon_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n #return subprocess.call(args, env=virtual_env)", "def test():\n import unittest\n tests = unittest \n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runTest(self):\n self.setUp()\n self.test_ProstateReporting1()", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runtest(self):", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def coverage():\n print(\"Coverage tests always re-run\")\n with safe_cd(SRC):\n my_env = config_pythonpath()\n # You will need something like this in pytest.ini\n # By default, pytest is VERY restrictive in the file names it will match.\n #\n # [pytest]\n # DJANGO_SETTINGS_MODULE = core.settings\n # python_files = tests.py test_*.py *_tests.py test*_*.py *_test*.py\n if not os.path.exists(\"pytest.ini\") and IS_DJANGO:\n print(\n \"pytest.ini MUST exist for Django test detection or too few tests are found.\"\n )\n exit(-1)\n return\n\n my_env = config_pythonpath()\n command = \"{0} py.test {1} --cov={2} --cov-report html:coverage --cov-fail-under 55 --verbose\".format(\n PIPENV, \"test\", PROJECT_NAME\n )\n execute_with_environment(command, my_env)", "def runTest(self):\r\n self.setUp()", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_test_coverage(self, build=False):\n\n # print('Running unit tests for package %s' % package)\n if self.is_metapackage:\n self.out = 'This is a metapackage'\n return 0\n\n if not self.has_test:\n self.out = 'No tests defined on CMakeLists.txt'\n return 0\n\n if build:\n self.build_for_coverage() \n\n # Capture initial zero coverage data\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --directory build --zerocounters')\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --capture --initial --directory build/' + self.name + ' --output-file build/lcov.base')\n\n # Run tests with coverage flags\n extra_parms = '--no-deps --cmake-args -DCMAKE_CXX_FLAGS=\"-g -O0 -Wall -fprofile-arcs -ftest-coverage\" -DCMAKE_EXE_LINKER_FLAGS=\"-fprofile-arcs -ftest-coverage\"'\n cmd = ['catkin', 'run_tests', self.name]\n cmd.extend(shlex.split(extra_parms))\n\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE,\n universal_newlines=True)\n\n self.out , self.err = process.communicate()\n\n self.setSummary(self.get_test_summary())\n self.setExecutionStatus(process.returncode)\n\n if process.returncode != 0:\n return process.returncode\n\n # Capture coverage data after running tests\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --no-checksum --directory build/' + self.name + ' --capture --output-file build/lcov.info')\n\n # Add baseline counters\n out, err = self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --add-tracefile build/lcov.base --add-tracefile build/lcov.info --output-file build/lcov.total')\n\n # Remove coverage data for a particular set of files from the tracefile\n out, err = self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --remove build/lcov.total /usr* /opt* */test/* */CMakeFiles/* */build/* --output-file build/lcov.total.cleaned')\n \n # Extract line coverage from output\n if 'lines......:' in out:\n self.coverage = float(out.split('lines......: ')[1].split('%')[0])\n else:\n self.coverage = 0\n\n return 0", "def test_nothing(self):", "def test_dummy():", "def unittest():\n from a6test import test_all\n test_all()", "def _run_local_tests(self, *args, **kwargs):\n pass", "def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)", "def startTestRun(self, event):\n self.prof = cProfile.Profile()\n event.executeTests = self.prof.runcall", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def test(coverage=False):\n COV = True\n if COV:\n import coverage\n cov = coverage.coverage(branch=True, include='app/*')\n cov.start()\n\n import unittest\n # tests = unittest.TestLoader().discover('testing_tests')\n tests = unittest.TestLoader().discover('tests')\n\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def test(cline):\n print(\"Running unit tests.\")\n cline.run(\"TF_CPP_MIN_LOG_LEVEL=3 python3 -m unittest\")", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def test():\n import unittest\n import tests\n tests = unittest.TestLoader().discover('tests', pattern='*tests.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def coverage(ctx):\n ctx.run(\"coverage run --source {PROJECT_NAME} -m pytest\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"coverage report -m\")\n ctx.run(\"coverage html\")", "def collectTests(self, global_ctx):\n pass", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def main():\n try:\n unittest.main(testLoader=BetterLoader(), defaultTest='suite')\n except Exception:\n import sys\n import traceback\n traceback.print_exc()\n sys.exit(1)", "def task_coverage():\n return {\n 'actions': ['py.test --cov nikola --cov-report term-missing tests/'],\n 'verbosity': 2,\n }", "def actionRunUnitTests():\n UnitTestRunner.init()\n \n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_BUILD, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Running unit tests for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration, ColoredFormatter.YELLOW)\n result = UnitTestRunner.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_RUN_UNITTESTS, target, platform, cpu, configuration, result, UnitTestRunner.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed to execute unit tests!')\n else:\n Logger.printEndActionMessage('Executed all unit tests')", "def run_test(self):\n raise NotImplementedError", "def main():\n unittest.main(exit=False, verbosity=2)\n return 0", "def runTest(self):\n self.setUp()\n self.test_Analytics1()", "def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n COV.html_report()\n COV.erase()\n return 0\n return 1", "def runTest(self):\n self.setUp()\n self.test_FiducialTransform1()", "def cov():\n cov = coverage.coverage(branch=True, include='project/*')\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def test_basic_execution(self):", "def cov():\n cov = coverage.coverage(\n branch=True,\n include='project/*',\n omit=\"*/__init__.py\"\n )\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print 'Coverage Summary:'\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def runTest(self):\n self.setUp()\n self.test_SegmentDicom1()", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def test_script(self) -> None:\n main()", "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n sys.exit(result)", "def test_all_no_class(self):" ]
[ "0.7740198", "0.7395665", "0.73899317", "0.73077023", "0.7184236", "0.71184987", "0.7116021", "0.7049126", "0.695683", "0.6951264", "0.6858743", "0.682145", "0.6820713", "0.6819008", "0.6809791", "0.67945355", "0.6780462", "0.6756857", "0.6749489", "0.671787", "0.6669164", "0.66648895", "0.66419506", "0.663322", "0.6627821", "0.6617628", "0.66106844", "0.66097254", "0.6583377", "0.65735316", "0.65563804", "0.6552433", "0.6549086", "0.65327287", "0.652728", "0.651633", "0.650739", "0.6502367", "0.6502367", "0.64956135", "0.6489306", "0.6489306", "0.6489306", "0.6489248", "0.64619476", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.64542", "0.6454192", "0.6453797", "0.6445205", "0.6442094", "0.64406633", "0.64386016", "0.6429521", "0.6421117", "0.6417108", "0.6409513", "0.6398091", "0.6391008", "0.6389989", "0.63870597", "0.63816077", "0.6380179", "0.63757944", "0.6371951", "0.63498116", "0.63433075", "0.6342515", "0.634239", "0.63381493", "0.63336605", "0.6323768", "0.63147837", "0.63058037", "0.6304162", "0.630137", "0.6294818", "0.62875044", "0.62819076", "0.62689775", "0.62682", "0.62675077", "0.6246566", "0.6244128", "0.6243914", "0.6243798", "0.62375784", "0.6233122", "0.6194239" ]
0.0
-1
Runs the unit tests with coverage.
def cov(): tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() return 0 return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n cmd = 'coverage run setup.py test && coverage report -m'\n check_call(cmd, shell=True)", "def coverage(ctx):\n ctx.run(\"coverage run --source {PROJECT_NAME} -m pytest\".format(PROJECT_NAME=PROJECT_NAME))\n ctx.run(\"coverage report -m\")\n ctx.run(\"coverage html\")", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def coverage():\n print(\"Coverage tests always re-run\")\n with safe_cd(SRC):\n my_env = config_pythonpath()\n # You will need something like this in pytest.ini\n # By default, pytest is VERY restrictive in the file names it will match.\n #\n # [pytest]\n # DJANGO_SETTINGS_MODULE = core.settings\n # python_files = tests.py test_*.py *_tests.py test*_*.py *_test*.py\n if not os.path.exists(\"pytest.ini\") and IS_DJANGO:\n print(\n \"pytest.ini MUST exist for Django test detection or too few tests are found.\"\n )\n exit(-1)\n return\n\n my_env = config_pythonpath()\n command = \"{0} py.test {1} --cov={2} --cov-report html:coverage --cov-fail-under 55 --verbose\".format(\n PIPENV, \"test\", PROJECT_NAME\n )\n execute_with_environment(command, my_env)", "def main():\n import coverage\n import nose\n import os\n from shutil import rmtree\n rmtree('./covhtml', ignore_errors=True)\n try:\n os.remove('./.coverage')\n except Exception,e:\n pass\n\n # run nose in its own process because the .coverage file isn't written\n # until the process terminates and we need to read it\n nose.run()", "def test(coverage):\n print('success')\n pass", "def coverage(session):\n session.install(\"coverage[toml]\", \"codecov\")\n session.run(\"coverage\", \"xml\", \"--fail-under=0\")\n session.run(\"codecov\", *session.posargs)", "def coverage(context):\n context.run(\" \".join([\n \"python -m pytest\",\n \"--cov=%s\" % PACKAGE_NAME,\n \"--cov-report html\",\n \"--cov-branch\",\n \"--cov-fail-under=75\"\n ]))", "def cov():\n cov = coverage.coverage(branch=True, include='project/*')\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def task_coverage():\n return {\n 'actions': ['py.test --cov nikola --cov-report term-missing tests/'],\n 'verbosity': 2,\n }", "def cov():\n cov = coverage.coverage(\n branch=True,\n include='project/*',\n omit=\"*/__init__.py\"\n )\n cov.start()\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n cov.stop()\n cov.save()\n print 'Coverage Summary:'\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def cov():\n tests = unittest.TestLoader().discover('project/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n COV.html_report()\n COV.erase()\n return 0\n return 1", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def coverage(session) -> None:\n session.install(\".[test]\", \"pytest-cov\")\n session.run(\n \"pytest\", \"-n\", \"auto\", \"--cov=./\", \"--cov-report=xml\", *session.posargs\n )", "def run_coverage(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n session.run_always(\"pip\", \"install\", \"coverage\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[all]\")\n if sys.platform != \"win32\":\n session.run_always(\"pip\", \"install\", \"-e\", \".[odes]\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[jax]\")\n session.run(\"coverage\", \"run\", \"--rcfile=.coveragerc\", \"run-tests.py\", \"--nosub\")\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"xml\")", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def cov():\n tests = unittest.TestLoader().discover('tests')\n result = unittest.TextTestRunner(verbosity=1).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def RunUnitTests():\n if FLAGS.test_targets:\n tests = FLAGS.test_targets\n else:\n tests = shell_interfaces.GetStdout(\n 'bazel query kind(\"cc_test\", ...)').split()\n\n # Run coverage, joining all data into one file.\n subprocess.check_call(['bazel', 'coverage', '--instrument_test_targets',\n '--experimental_cc_coverage',\n '--combined_report=lcov',\n ('--coverage_report_generator=@bazel_tools//tools/tes'\n 't/CoverageOutputGenerator/java/com/google/devtools/'\n 'coverageoutputgenerator:Main')] + tests)", "def test(coverage=False):\n COV = True\n if COV:\n import coverage\n cov = coverage.coverage(branch=True, include='app/*')\n cov.start()\n\n import unittest\n # tests = unittest.TestLoader().discover('testing_tests')\n tests = unittest.TestLoader().discover('tests')\n\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n cov.stop()\n cov.save()\n print('Coverage Summary:')\n cov.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n cov.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n cov.erase()", "def run_test_coverage(self, build=False):\n\n # print('Running unit tests for package %s' % package)\n if self.is_metapackage:\n self.out = 'This is a metapackage'\n return 0\n\n if not self.has_test:\n self.out = 'No tests defined on CMakeLists.txt'\n return 0\n\n if build:\n self.build_for_coverage() \n\n # Capture initial zero coverage data\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --directory build --zerocounters')\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --capture --initial --directory build/' + self.name + ' --output-file build/lcov.base')\n\n # Run tests with coverage flags\n extra_parms = '--no-deps --cmake-args -DCMAKE_CXX_FLAGS=\"-g -O0 -Wall -fprofile-arcs -ftest-coverage\" -DCMAKE_EXE_LINKER_FLAGS=\"-fprofile-arcs -ftest-coverage\"'\n cmd = ['catkin', 'run_tests', self.name]\n cmd.extend(shlex.split(extra_parms))\n\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE,\n universal_newlines=True)\n\n self.out , self.err = process.communicate()\n\n self.setSummary(self.get_test_summary())\n self.setExecutionStatus(process.returncode)\n\n if process.returncode != 0:\n return process.returncode\n\n # Capture coverage data after running tests\n self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --no-checksum --directory build/' + self.name + ' --capture --output-file build/lcov.info')\n\n # Add baseline counters\n out, err = self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --add-tracefile build/lcov.base --add-tracefile build/lcov.info --output-file build/lcov.total')\n\n # Remove coverage data for a particular set of files from the tracefile\n out, err = self.run_lcov_cmd('--rc lcov_branch_coverage=' + self.use_branch_coverage + ' --remove build/lcov.total /usr* /opt* */test/* */CMakeFiles/* */build/* --output-file build/lcov.total.cleaned')\n \n # Extract line coverage from output\n if 'lines......:' in out:\n self.coverage = float(out.split('lines......: ')[1].split('%')[0])\n else:\n self.coverage = 0\n\n return 0", "def test_run_coverage(self):\n cmd = GreenTestCommand(Distribution())\n cmd.coverage = True\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(_subprocess_call_args(), Contains(\"-r\"))", "def test(coverage, test_names):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import subprocess\n os.environ['FLASK_COVERAGE'] = '1'\n sys.exit(subprocess.call(sys.argv))\n\n import unittest\n if test_names:\n tests = unittest.TestLoader().loadTestsFromNames(test_names)\n else:\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "def cov(test_class):\n if test_class == 'all':\n tests = unittest.TestLoader().discover('project/tests')\n else:\n # note, test module must be imported above, doing lazily for now\n test_module = globals()[test_class]\n tests = unittest.TestLoader().loadTestsFromTestCase(test_module)\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n return 0\n return 1", "def test(coverage=False):\n # 如果没有设置环境变量,则先设定环境变量FLASK_COVERAGE,之后,脚本会重启\n # 再次运行时,脚本顶端的代码发现已经设定了环境变量,于是立即启动覆盖检测。\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n # 执行测试\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n # 输出覆盖报告\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "def run_tests(self):\n import nose\n from pocketlint.formatcheck import main as pocket_main\n\n nose_args = ['nosetests']\n if self.verbose:\n nose_args.append('-v')\n else:\n nose_args.append('-q')\n\n module = self.test_suite\n if self.test_module:\n module = self.test_module\n\n nose_args.extend([\n '--with-coverage',\n '--cover-package=' + module,\n '--cover-erase',\n '--cover-test',\n module.replace('.', '/'),\n ])\n\n pocket_args = [\n 'README.rst',\n 'release-notes.rst',\n 'setup.py',\n ]\n for root, dirs, files in os.walk('chevah/keycert', topdown=False):\n for name in files:\n pocket_args.append(os.path.join(root, name))\n\n nose_code = nose.run(argv=nose_args)\n if nose_code:\n nose_code = 0\n else:\n nose_code = 1\n\n pocket_code = pocket_main(pocket_args)\n if not pocket_code:\n print('Linter OK')\n\n coverage_args = [\n 'report',\n '--include=chevah/keycert/tests/*',\n '--fail-under=100',\n ]\n covergate_code = load_entry_point(\n 'coverage', 'console_scripts', 'coverage')(argv=coverage_args)\n if not covergate_code:\n print('Tests coverage OK')\n\n sys.exit(pocket_code or nose_code or covergate_code)", "def test(coverage=False):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "def test(coverage=False):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable]+sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "def test(coverage=False):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n\n import pytest\n pytest.main('-v')\n\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "def cover(ctx, html=False):\n header(cover.__doc__)\n extra = \"--cov-report html\" if html else \"\"\n with ctx.cd(ROOT):\n ctx.run(\n \"pytest --benchmark-skip --cov flask_restx --cov-report term --cov-report xml {0}\".format(\n extra\n ),\n pty=True,\n )", "def runTests(self):\n \n pass", "def run_tests(virtual_env):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n args = [\n 'python',\n 'setup.py',\n 'nosetests',\n '--with-coverage',\n '--with-xunit',\n ]\n subprocess.call(args, cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n subprocess.call(args, cwd=plugin_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n for addon_dir in open(join(HOLLAND_ROOT, 'addons', 'ACTIVE')):\n addon_dir = addon_dir.rstrip()\n addon_path = join(HOLLAND_ROOT, 'addons', addon_dir)\n subprocess.call(args, cwd=addon_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n #return subprocess.call(args, env=virtual_env)", "def test(coverage=False, test_name=None):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n\n import unittest\n if test_name is None:\n tests = unittest.TestLoader().discover('tests')\n else:\n tests = unittest.TestLoader().loadTestsFromName('tests.' + test_name)\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def covwatch(context):\n context.run(\" \".join([\n \"python -m pytest\",\n \"--cov=%s\" % PACKAGE_NAME,\n \"--cov-branch\",\n \"--cov-fail-under=75\"\n ]))", "def test_coverage_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-2.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_1(inst2)", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def html_it():\n import coverage\n cov = coverage.coverage()\n cov.start()\n import here # pragma: nested\n cov.stop() # pragma: nested\n cov.html_report(directory=\"../html_other\")", "def test_coverage_2(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example-selfpay.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_2(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_2(inst2)", "def commands_coverage_server():\n try:\n coverage()\n coverage_server()\n except KeyboardInterrupt:\n logger.info(\"Command canceled\")", "def task_test():\n return {\n \"actions\": [[\"pytest\", \"--cov=mad_gui\", \"--cov-config=.coveragerc\", \"-vv\"]],\n \"verbosity\": 2,\n }", "def __main() :\n launchTests()", "def main():\n run_test_all()", "def build_for_coverage(self):\n\n extra_parms = '--cmake-args -DCMAKE_CXX_FLAGS=\"-g -O0 -Wall -fprofile-arcs -ftest-coverage\" -DCMAKE_EXE_LINKER_FLAGS=\"-fprofile-arcs -ftest-coverage\"'\n cmd = ['catkin', 'build', self.name ]\n cmd.extend(shlex.split(extra_parms))\n\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE,\n universal_newlines=True)\n\n process.communicate()\n\n return process.returncode", "def run_tests(self, cov, functionsToRun): # pragma: nested\n print(\"runed cases\")\n for context in functionsToRun:\n #print(context)\n info = context.split(\".\")\n suite_name =info[0]\n #print(suite_name)\n className = info[1]\n caseName = info[2]\n cov.start()\n suite = import_local_file(suite_name)\n #print(dir(suite))\n try:\n # Call all functions in this module\n for name in dir(suite):\n variable = getattr(suite, name)\n #print(\"variable.__name__\")\n #print(variable.__name__)\n if inspect.isclass(variable) and variable.__name__== className:\n obj = variable()\n \n memberNames = inspect.getmembers(variable,inspect.isfunction)\n \n for member in memberNames:\n if member[0].startswith('test_') and member[0] == caseName:\n \n print(context)\n getattr(obj, member[0])()\n #if inspect.isfunction(variable):\n # variable()\n finally:\n cov.stop()", "def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def main():\n parser = argparse.ArgumentParser(description=\"Analyze requirement coverage\")\n parser.add_argument(\n \"project_info_path\",\n help=\"JSON file containing project information\",\n type=Path,\n )\n parser.add_argument(\n \"test_result_path\",\n help=\"XML file containing test result\",\n type=Path,\n )\n parser.add_argument(\n \"requirements_path\",\n help=\"CSV file containing requirements\",\n type=Path,\n )\n\n args = parser.parse_args()\n\n ok = analyze(args.project_info_path, args.test_result_path, args.requirements_path)\n if not ok:\n exit(1)\n else:\n exit(0)", "def main():\n coverage = calculate_code_coverage()\n platform = os.uname()[0]\n if coverage < CODE_COVERAGE_GOAL[platform]:\n data = {\n 'expected': CODE_COVERAGE_GOAL[platform],\n 'observed': coverage,\n }\n print '\\033[91mFAIL: %(observed).2f%% does not meet goal of %(expected).2f%%\\033[0m' % data\n sys.exit(1)", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def run_tests(self):\n\n # log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n # test methods start here\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n\n # dummy_method\n self.dummy_method()\n\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # test methods end here\n\n # log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def run_tests(self):\n raise NotImplementedError", "def test(coverage=False):\n # enable test coverage\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n\n print(\"**************Testing Started**********\")\n # run the app in tesing configration\n app.config.from_object(config['testing'])\n config['testing'].init_app(app)\n # Remove the sqlite database files if exist\n for fl in glob.glob('data-test.sqlite'):\n os.remove(fl)\n print('old test sqlite database removed')\n\n deploy() # redeploy the database\n fakedata() # generate the fakedata\n\n import unittest\n tests = unittest.TestLoader().discover('tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests).wasSuccessful()\n\n # generate test coverage report\n if COV:\n COV.stop()\n COV.save()\n print('Test Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()\n\n # the exit code is used for CircleCI\n import sys\n\n if result: # tests passed\n sys.exit(0)\n else: # tests failed\n sys.exit(1)", "def main(argv=None, directory=None):\n argv = argv or sys.argv\n arg_dict = parse_coverage_args(argv[1:])\n\n quiet = arg_dict[\"quiet\"]\n level = logging.ERROR if quiet else logging.WARNING\n logging.basicConfig(format=\"%(message)s\", level=level)\n\n GitPathTool.set_cwd(directory)\n fail_under = arg_dict.get(\"fail_under\")\n percent_covered = generate_coverage_report(\n arg_dict[\"coverage_file\"],\n arg_dict[\"compare_branch\"],\n html_report=arg_dict[\"html_report\"],\n json_report=arg_dict[\"json_report\"],\n markdown_report=arg_dict[\"markdown_report\"],\n css_file=arg_dict[\"external_css_file\"],\n ignore_staged=arg_dict[\"ignore_staged\"],\n ignore_unstaged=arg_dict[\"ignore_unstaged\"],\n include_untracked=arg_dict[\"include_untracked\"],\n exclude=arg_dict[\"exclude\"],\n include=arg_dict[\"include\"],\n src_roots=arg_dict[\"src_roots\"],\n diff_range_notation=arg_dict[\"diff_range_notation\"],\n ignore_whitespace=arg_dict[\"ignore_whitespace\"],\n quiet=quiet,\n show_uncovered=arg_dict[\"show_uncovered\"],\n )\n\n if percent_covered >= fail_under:\n return 0\n LOGGER.error(\"Failure. Coverage is below %i%%.\", fail_under)\n return 1", "def run_gcov(filename, coverage, args):\n if args.verbose:\n warn(\"calling:\", 'gcov', '-i', filename)\n stdout = None\n else:\n # gcov is noisy and don't have quit flag so redirect stdout to /dev/null\n stdout = subprocess.DEVNULL\n\n subprocess.check_call(['gcov', '-i', filename], stdout=stdout)\n\n for gcov_file in glob('*.gcov'):\n if args.verbose:\n warn('parsing', gcov_file)\n src_file, count = parse_gcov_file(gcov_file)\n os.remove(gcov_file)\n\n if src_file not in coverage:\n coverage[src_file] = defaultdict(int, count)\n else:\n # sum execution counts\n for line, exe_cnt in count.items():\n coverage[src_file][line] += exe_cnt", "def tests(session):\n args = session.posargs or [\"--cov\"]\n session.run(\"poetry\", \"install\", external=True)\n session.run(\"pytest\", *args)", "def unittest(core):\n\n # local import because nose uses pkg_resources that is very slow to import\n import nose.core\n\n config = core.config\n\n application_name = config.get(APPLICATION_NAME)\n application_package = config.get(APPLICATION_ROOT)\n\n extension_names = config.get(EXTENSION_NAMES)\n tests = config.get(TESTS)\n report_enabled = config.get(REPORT_ENABLED)\n report_file = config.get(REPORT_FILE)\n coverage_enabled = core.config.get(COVERAGE_ENABLED)\n verbose = core.config.get(DETAILS)\n exclude_systests = core.config.get(EXCLUDE_SYSTESTS)\n\n matching_packages = packages.find_packages(\n extension_names, core.extension_manager, application_name, application_package)\n command = create_nose_command(\n matching_packages, verbose, tests, report_enabled, report_file, exclude_systests)\n\n if coverage_enabled:\n coverage_file = core.config.get(COVERAGE_FILE)\n xml_enabled = core.config.get(COVERAGE_XML_ENABLED)\n xml_file = core.config.get(COVERAGE_XML_FILE)\n with coverage.coverage(matching_packages, sys.stdout, coverage_file, xml_enabled, xml_file):\n result = nose.core.run(argv=command)\n else:\n result = nose.core.run(argv=command)\n\n return 0 if result else 1", "def test(cline):\n print(\"Running unit tests.\")\n cline.run(\"TF_CPP_MIN_LOG_LEVEL=3 python3 -m unittest\")", "def runtests(ctx):\n run('pytest -s tests', pty=pty_available)\n run('flake8 --ignore E265,E266,E501 --exclude src, lib', pty=pty_available)", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def cuv(ctx, coverage_fname, exclude, branch):\n if coverage_fname is None:\n coverage_fname = find_coverage_data('.')\n # coverage_fname still could be None\n\n cfg = Config()\n ctx.obj = cfg\n\n cfg.nice_width = min(80, shutil.get_terminal_size()[0])\n cfg.exclude = exclude\n\n cfg.branch = branch\n if coverage_fname is not None:\n cfg.data = coverage.Coverage(data_file=coverage_fname)\n cfg.data.load()\n else:\n raise click.UsageError(\n \"No coverage data. Do you have a .coverage file?\"\n )", "def _cmd_coverage(args):\n pset = coverage.do_coverage(\n args.interval,\n args.bam_file,\n args.count,\n args.min_mapq,\n args.processes,\n args.fasta,\n )\n if not args.output:\n # Create an informative but unique name for the coverage output file\n bambase = core.fbase(args.bam_file)\n bedbase = core.fbase(args.interval)\n tgtbase = (\n \"antitargetcoverage\" if \"anti\" in bedbase.lower() else \"targetcoverage\"\n )\n args.output = f\"{bambase}.{tgtbase}.cnn\"\n if os.path.exists(args.output):\n args.output = f\"{bambase}.{bedbase}.cnn\"\n core.ensure_path(args.output)\n tabio.write(pset, args.output)", "def test():\n import unittest\n tests = unittest \n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def tests():", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def test_coverage_report(mocker):\n mocked_function = mocker.patch(\"os.system\")\n coverage_report()\n mocked_function.assert_called_once_with(\"coverage run -m py.test\")", "def test():\r\n import unittest\r\n tests=unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_coverage_4(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"coverage-example.json\"\n inst = coverage.Coverage.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Coverage\" == inst.resource_type\n\n impl_coverage_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Coverage\" == data[\"resourceType\"]\n\n inst2 = coverage.Coverage(**data)\n impl_coverage_4(inst2)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def derive_project_coverage(self) -> None:\n self.get_project_column_description_coverage()\n self.get_project_test_coverage()", "def commands_all():\n lint()\n complexity()\n coverage()", "def cc():\n load_env_vars('dev')\n from tools.static_code_analysis import CyclomaticComplexity\n radon_cc = CyclomaticComplexity()\n score = radon_cc.run_test()\n radon_cc.create_badge(score)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_single_test_case():\n pass", "def run(self, pkgs=(), testopts=\"\", covermode=\"atomic\", debug=False):\n pkgs = pkgs or self.test_imports.keys()\n pkgs.sort()\n\n cmds = []\n\n if debug:\n print(\"`go test` commands that will be run:\\n\")\n\n for pkg in pkgs:\n pkg_deps = self.get_pkg_recursive_deps(pkg)\n cmd = [\"go\", \"test\"] + list(self.tag_args)\n if pkg in self.test_imports:\n cmd += testopts.split() + [\n \"-covermode\", covermode,\n \"-coverprofile\", self.get_coverprofile_filename(pkg),\n \"-coverpkg\", \",\".join(pkg_deps)]\n cmd += [pkg]\n if debug:\n print(\"\\t\" + \" \".join(cmd))\n cmds.append((cmd, pkg_deps))\n\n if debug:\n print(\"\\nResults:\\n\")\n\n for cmd, pkg_deps in cmds:\n app = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in app.stdout:\n # we are trying to generate coverage reports for everything in the base package, and some may not be\n # actually exercised in the test. So ignore this particular warning.\n if not line.startswith(\"warning: no packages being tested depend on {0}\".format(self.base_pkg)):\n line = line.replace(\"statements in \" + \", \".join(pkg_deps),\n \"statements in <{0} dependencies>\".format(len(pkg_deps)))\n sys.stdout.write(line)\n\n app.wait()\n if app.returncode != 0:\n print(\"\\n\\nTests failed.\\n\")\n sys.exit(app.returncode)", "def collectTests(self, global_ctx):\n pass", "def test(conanfile):\n with conanfile_exception_formatter(conanfile, \"test\"):\n with chdir(conanfile.build_folder):\n conanfile.test()", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)" ]
[ "0.8225887", "0.8197109", "0.8087866", "0.8035677", "0.79417413", "0.78974116", "0.77752006", "0.7715321", "0.7702348", "0.7699923", "0.7665237", "0.7642242", "0.7576189", "0.75723445", "0.7559191", "0.75513554", "0.7547175", "0.7532325", "0.75018466", "0.74963635", "0.7467095", "0.73310155", "0.7262438", "0.7219002", "0.721661", "0.7206347", "0.71984947", "0.70016724", "0.6928386", "0.69281864", "0.69234115", "0.68444014", "0.677081", "0.6610271", "0.6603396", "0.656456", "0.65624344", "0.6544588", "0.6534733", "0.6531103", "0.6524771", "0.651744", "0.65147984", "0.6512509", "0.64980346", "0.64729345", "0.64540994", "0.64255476", "0.64090043", "0.63722426", "0.63320106", "0.6330993", "0.6329886", "0.6325395", "0.63180053", "0.6283057", "0.62712634", "0.62630683", "0.62604356", "0.625619", "0.62497604", "0.62426144", "0.623554", "0.6229008", "0.6228544", "0.6224282", "0.6202391", "0.6198187", "0.61900735", "0.618619", "0.6185681", "0.6178685", "0.6174834", "0.6174834", "0.6172738", "0.6172279", "0.6169615", "0.61571795", "0.6154444", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61539835", "0.61435694", "0.6142807", "0.61361873", "0.6135887", "0.61345744", "0.61335415", "0.61335415" ]
0.76248956
12
Creates the db tables.
def create_db(): database.db.create_all() get_ulm() for fixture_file in glob.glob(config.DevelopmentConfig.FIXTURES_DIRS + '/*.json'): fixtures = JSONLoader().load(fixture_file) load_fixtures(database.db, fixtures) MigrationManager().stamp_db()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def create_db_tables():\n\n try:\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()\n except Exception as e:\n # TODO: melhorar o informe do erro\n raise e", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def create_tables() -> None:\n print(\"Creating database tables using SQLAlchemy ORM\")\n Base.metadata.create_all(engine)\n print(\"Done creating tables\")", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def create_tables(self):\n con = self.connect()\n cursor = con.cursor()\n queries = self.tables()\n for query in queries:\n cursor.execute(query)\n cursor.close()\n con.commit()\n con.close()", "def create_all_tables():\n\tcommon_db.create_all_tables()", "def create_tables():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=DBNAME, user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=DBNAME)\n\n create_train_table(conn)\n create_questions_table(conn)\n create_lectures_table(conn)\n create_example_test_table(conn)\n create_example_test_table(conn)\n\n conn.close()", "def create_all_tables(self):\n pass", "def create_tables(self):\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n Base.metadata.create_all(self.engine)", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def create_db_tables(self):\n self.releases = Table(\n RELEASES,\n self.metadata,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"repo\", String),\n Column(\"tag_name\", String),\n Column(\"release_name\", String),\n Column(\"created\", DateTime, default=datetime.datetime.now()),\n extend_existing=True,\n )\n self.commits = Table(\n COMMITS,\n self.metadata,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"repo\", String),\n Column(\"branch\", String),\n Column(\"sha\", String),\n Column(\"created\", DateTime, default=datetime.datetime.now()),\n extend_existing=True,\n )\n self.tags = Table(\n TAGS,\n self.metadata,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"repo\", String),\n Column(\"tag_name\", String),\n Column(\"sha\", String),\n Column(\"created\", DateTime, default=datetime.datetime.now()),\n extend_existing=True,\n )\n try:\n self.metadata.create_all()\n log.info(\"Tables created\")\n except Exception as e:\n log.error(\"Error occurred during Table creation!\")\n log.exception(e)", "def createTables():\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table, query in tables.items():\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to create tables:\" )\n print(ex)\n sys.exit(1)", "def createTables(self):\n metadata = Base.metadata\n metadata.create_all(self._engine)\n return", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def create_tables(self):\n users_tb = \"\"\"CREATE TABLE IF NOT EXISTS Users(\n user_id serial PRIMARY KEY,\n email varchar(30) NOT NULL,\n username varchar(15) NOT NULL,\n password varchar(250) NOT NULL,\n role varchar(10) DEFAULT 'User'\n );\"\"\"\n\n meals_tb = \"\"\"CREATE TABLE IF NOT EXISTS Meals(\n meal_id serial PRIMARY KEY,\n meal_name varchar(25) NOT NULL,\n image varchar(150) NOT NULL,\n description varchar(250) NOT NULL,\n unit_price decimal(5,2) NOT NULL\n );\"\"\"\n\n orders_tb = \"\"\"CREATE TABLE IF NOT EXISTS Orders(\n order_id serial PRIMARY KEY,\n user_id integer NOT NULL,\n meal_id integer NOT NULL,\n address varchar(50),\n quantity int NOT NULL,\n order_date timestamp NOT NULL,\n status varchar(15),\n FOREIGN KEY (user_id) REFERENCES users(user_id)\n ON DELETE CASCADE ON UPDATE CASCADE,\n FOREIGN KEY (meal_id) REFERENCES meals(meal_id)\n ON DELETE CASCADE ON UPDATE CASCADE\n );\"\"\"\n return [users_tb, meals_tb, orders_tb]", "def create_database():\n with connection:\n connection.execute(CREATE_MOVIE_TABLE)\n connection.execute(CREATE_USER_TABLE)\n connection.execute(CREATE_WATCHED_TABLE)", "def create_tables():\r\n db = connect_database()\r\n table_wait = \"waiting\"\r\n table_helped = \"helped\"\r\n table_help = \"help\"\r\n param_name = ['cus_num', 'name', 'username', 'ru_id', 'os_platform', 'description']\r\n param_type1 = ['INTEGER PRIMARY KEY AUTOINCREMENT', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n param_type2 = ['INTEGER PRIMARY KEY', 'TEXT', 'TEXT', 'TEXT', 'TEXT', 'TEXT']\r\n with db:\r\n create_table(db, table_wait, param_name, param_type1)\r\n create_table(db, table_helped, param_name, param_type2)\r\n create_table(db, table_help, param_name, param_type2)\r\n db.close()", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def create_tables(args):\n\n from bob.db.base.utils import create_engine_try_nolock\n\n engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose >= 2));\n Client.metadata.create_all(engine)\n File.metadata.create_all(engine) \n Annotation.metadata.create_all(engine)\n #Protocol_File_Association.metadata.create_all(engine)", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n \n print('Tables created.')", "def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()", "def create_tables(cur, conn) -> None:\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables (self):\n cursor = self.cur()\n cursor.execute('DROP TABLE IF EXISTS person')\n cursor.execute('DROP TABLE IF EXISTS room')\n cursor.execute('DROP TABLE IF EXISTS allocation')\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS person (person_id INTEGER PRIMARY KEY, name TEXT, role TEXT)WITHOUT ROWID')\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS room (name TEXT , no_of_members INTEGER, room_type TEXT)')\n cursor.execute(\n 'CREATE TABLE IF NOT EXISTS allocation (room_name TEXT , person_id INTEGER)')", "def create_table(self):\n pass", "async def create_tables():\n text_create_citizens_table = queries.CREATE_CITIZENS\n text_create_relative_table = queries.CREATE_RELATIVES\n text_create_consts_table = queries.CREATE_CONSTS\n text_insert_base_import_id = queries.INSERT_IMPORT_ID_0\n text_create_index_citizens = queries.CREATE_INDEX_CITIZENS\n text_create_index_relatives = queries.CREATE_INDEX_RELATIVES\n\n queries_list = [text_create_citizens_table, text_create_relative_table,\n text_create_consts_table, text_insert_base_import_id,\n text_create_index_citizens,text_create_index_relatives]\n\n connection = await sql.connect()\n async with connection.transaction():\n for query in queries_list:\n await sql.execute_query(connection, query)\n await sql.close(connection)", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(cur, conn):\n \n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()", "def _create_tables(self):\n\n print(\"\\n ** Creating DynamoDB Tables\")\n\n # Create Tables\n for table_config in self.table_list:\n with open(os.path.join(self.config_dir, table_config), \"rt\") as handle:\n config_data = json.load(handle)\n story_table = DynamoDB(DynamoTable.STACK_NAME, config_data[self.stack_name])\n story_table.create()", "def create_tables(cur, conn): \n for query in create_table_queries:\n cur.execute(query)\n conn.commit()", "def create_tables(engine):\n BASE.metadata.create_all(engine)", "def create_tables(self):\n\n self.cur.execute('''CREATE TABLE IF NOT EXISTS my_business_entry\n (\n id SERIAL PRIMARY KEY,\n url_yes_no boolean,\n url TEXT,\n phone_yes_no boolean,\n phone TEXT,\n rating TEXT,\n nr_of_ratings TEXT,\n myBusiness boolean,\n company TEXT\n );''')\n\n self.connection.commit()", "def create_tables(self):\n\n cur = self.conn.cursor()\n cur.execute('CREATE TABLE blog(blog_id INTEGER PRIMARY KEY, '\n ' title TEXT, subtitle TEXT, content TEXT, date TEXT, '\n ' author_id INTEGER, '\n 'FOREIGN KEY (author_id) REFERENCES author(author_id)) ')\n\n cur.execute('CREATE TABLE author(author_id INTEGER PRIMARY KEY, '\n ' name TEXT UNIQUE) ')\n\n cur.execute('CREATE TABLE password(password_id INTEGER PRIMARY KEY,'\n ' author_id INTEGER, '\n ' password TEXT, '\n 'FOREIGN KEY (author_id) REFERENCES author(author_id)) ')\n\n self.conn.commit()", "def create_tables(self):\n for name, attribute in self.__dict__.items():\n if hasattr(attribute, 'create_table_in_sqlite_db'):\n attribute.create_table_in_sqlite_db()", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def create_tables(self):\n\n conn = sqlite3.connect(self.db_name)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS Installation;\")\n c.execute(\"DROP TABLE IF EXISTS Equipment;\")\n c.execute(\"DROP TABLE IF EXISTS Activity;\")\n c.execute(\"DROP TABLE IF EXISTS EquipmentActivity;\")\n\n c.execute(\"CREATE TABLE Installation (Id INTEGER PRIMARY KEY, Name TEXT, Address TEXT, \"\n \"PostalCode INTEGER, City TEXT, Latitude REAL, Longitude Real);\")\n\n c.execute(\"CREATE TABLE Equipment (Id INTEGER PRIMARY KEY, Name TEXT, \"\n \"IdInstallation INTEGER, FOREIGN KEY(IdInstallation) REFERENCES Installation(Id));\")\n\n c.execute(\"CREATE TABLE Activity (Id INTEGER PRIMARY KEY, Name TEXT);\")\n\n c.execute(\n \"CREATE TABLE EquipmentActivity (IdEquipment INTEGER, IdActivity INTEGER, PRIMARY KEY (IdEquipment, \"\n \"IdActivity));\")\n\n conn.commit()\n conn.close()", "def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()", "def create_tables(self, script):\n for query in script:\n try:\n self.cursor.execute(query)\n except:\n print(\"\\n Une erreur s'est produite lors \"\n \"de la création des tables \\n\")\n print(\"\\n Les tables ont bien été créées \"\n f\"dans la base de données |{DB_NAME}| \\n\")", "def create_database_structure(self):\n Base.metadata.create_all(self.engine)", "def create_tables(self, tables=None):\n LOG.debug(f\"Creating table subset {tables}\")\n Base.metadata.create_all(self.engine, tables, checkfirst=False)", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n db.create_all()", "def create_db():\n _init_db()\n db.create_all()", "def init_db():\n # users table\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS users (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"name VARCHAR(255) NOT NULL,\"\n \"email VARCHAR(255) NOT NULL,\"\n \"password VARCHAR(30) NOT NULL,\"\n \"birthdate DATE);\"\n )\n\n # users' phone records table\n cur.execute(\"CREATE TABLE IF NOT EXISTS records (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"ownerID INTEGER,\"\n \"name VARCHAR(255),\"\n \"phone VARCHAR(22),\"\n \"birthdate DATE);\")", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def create_tables(self):\n\n # Uses methods from class DbAuth\n cursor = self.connect.create_cursor()\n cursor.execute(\"USE `dbPurBeurre`\")\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS `dbPurBeurre`.`Categories` (\"\n \" `num` INT UNSIGNED AUTO_INCREMENT,\"\n \" `id` VARCHAR(80) NOT NULL UNIQUE,\"\n \" `name` VARCHAR(80) NOT NULL,\"\n \" `url` VARCHAR(255) NOT NULL,\"\n \" `products` INT NULL,\"\n \" PRIMARY KEY (`num`))\"\n \" ENGINE = InnoDB\"\n )\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS `dbPurBeurre`.`Produits` (\"\n \" `num` INT UNSIGNED AUTO_INCREMENT,\"\n \" `id` VARCHAR(80) NOT NULL UNIQUE,\"\n \" `product_name` VARCHAR(80) NOT NULL,\"\n \" `nutrition_grade_fr` CHAR(1) NOT NULL,\"\n \" `brands` VARCHAR(80) NULL,\"\n \" `stores` VARCHAR(80) NOT NULL,\"\n \" `url` VARCHAR(255) NOT NULL,\"\n \" `watchlist` DATE NULL,\"\n \" PRIMARY KEY (`num`, `id`))\"\n \" ENGINE = InnoDB\"\n )\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS `dbPurBeurre`.`Asso_Prod_Cat` (\"\n \" `id_categories` VARCHAR(80) NOT NULL,\"\n \" `id_produits` VARCHAR(80) NOT NULL,\"\n \" PRIMARY KEY (`id_categories`, `id_produits`),\"\n \" CONSTRAINT `fk_id_categories`\"\n \" FOREIGN KEY (`id_categories`)\"\n \" REFERENCES `Categories` (`id`),\"\n \" CONSTRAINT `fk_id_produits`\"\n \" FOREIGN KEY (`id_produits`)\"\n \" REFERENCES `Produits` (`id`))\"\n \" ENGINE = InnoDB\"\n )", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def create_tables():\n\n create_site_table_query = 'CREATE TABLE Sites (entryID int AUTO_INCREMENT, ' \\\n 'Site VARCHAR(100) NOT NULL, ' \\\n 'PRIMARY KEY (entryID))'\n\n create_pass_table_query = 'CREATE TABLE Passwords ' \\\n '(entryID int AUTO_INCREMENT, ' \\\n 'Passwords BINARY(100) NOT NULL, ' \\\n 'FOREIGN KEY (entryID) REFERENCES Sites(entryID))'\n\n key_table_query = 'CREATE TABLE Crypt ' \\\n '(key_id int AUTO_INCREMENT, ' \\\n 'crypt_key BINARY(120) NOT NULL, ' \\\n 'PRIMARY KEY (key_id))'\n\n create_master_table_query = 'CREATE TABLE Master ' \\\n '(master_key_id int AUTO_INCREMENT, ' \\\n 'master_key BINARY(100) NOT NULL, ' \\\n 'PRIMARY KEY (master_key_id))'\n\n my_cursor.execute(key_table_query)\n my_cursor.execute(create_site_table_query)\n my_cursor.execute(create_pass_table_query)\n my_cursor.execute(create_master_table_query)\n pw_db.commit()", "def _create_database(self):\n self._connect()\n cursor = self._connection.cursor()\n cursor.execute(make_table_creation_command(\"reviews\", FIELD_DESCRIPTIONS))\n self._connection.commit()", "def __create_tables(self):\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS federations(\n id INTEGER PRIMARY KEY UNIQUE,\n name TEXT,\n category TEXT\n )\n \"\"\")\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS clubs(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n code_fede INT,\n dpt TEXT,\n nb_clubs INT,\n year INT,\n FOREIGN KEY(code_fede) REFERENCES federations(i)\n )\n \"\"\")\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS by_age(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n code_fede INT,\n age TEXT,\n sex TEXT,\n nb INT,\n year INT,\n FOREIGN KEY(code_fede) REFERENCES federations(id)\n )\n \"\"\")\n self.__cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS by_dpt(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n code_fede INT,\n dpt TEXT,\n nb INT,\n sex TEXT,\n year INT,\n FOREIGN KEY(code_fede) REFERENCES federations(id)\n )\n \"\"\")\n\n # Save all the changes\n self.save()", "def create_tables(session):\n for query in create_table_queries:\n session.execute(query)", "def make_db():\n\n db.create_all()", "def db_createall():\n db.create_all()", "def create_tables(session):\n\n for query in create_table_queries:\n session.execute(query)", "def createDbTables():\n logger.info(\"creating required DB table if it does not already exist\")\n try:\n with db.connect() as conn:\n conn.execute(\n \"CREATE TABLE IF NOT EXISTS birthdays \"\n \"( username VARCHAR(255) NOT NULL, birthday DATE NOT NULL, \"\n \"PRIMARY KEY (username) );\"\n )\n return OK\n except Exception as e:\n logger.info(\"Error creating table: %s\" % e)\n return ERR", "async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return", "def create_tables(c, drop_all = False):\n\tprint \"create_tables [status]: create tables triggered.\"\n\tautoincrement = \" autoincrement\"\n\tglobal Options, Database\n\tif drop_all:\n\t\tprint \"create_tables [status]: dropping tables initiated.\"\n\t\tif Database.type == DatabaseTypes.SQLITE:\n\t\t\tdrop_sql = open(mk_drop(\"sqlite\")).read()\n\t\telif Database.type == DatabaseTypes.MYSQL:\n\t\t\tdrop_sql = open(mk_drop(\"mysql\")).read()\n\t\telif Database.type == DatabaseTypes.POSTGRES:\n\t\t\tdrop_sql = open(mk_drop(\"postgres\")).read()\n\t\tdrop_list = drop_sql.split(\";\")\n\t\tfor line in drop_list:\n\t\t\tquery = line.strip()\n\t\t\tif query:\n\t\t\t\tc.execute(line)\n\t\tprint \"create_tables [status]: dropping tables complete.\"\n\tif Database.type == DatabaseTypes.SQLITE:\n\t\tdbf = open(mk_schema(\"sqlite\", Options.use_dict))\n\t\tquery_list = dbf.read()\n\t\tc.executescript(query_list)\n\telif Database.type == DatabaseTypes.MYSQL:\n\t\tdbf = open(mk_schema(\"mysql\", Options.use_dict))\n\t\texecutescript(c, dbf)\n\telif Database.type == DatabaseTypes.POSTGRES:\n\t\tdbf = open(mk_schema(\"postgres\", Options.use_dict))\n\t\texecutescript(c, dbf)", "def insert_db():\n populate_tables()", "def create_tables(cur, conn):\n for query in create_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print(e)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def create_table(self):\n Engine.create_table(self)\n self.connection.commit()", "def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)", "def create_tables():\n\tlog_msg4(\"No hay tablas para el año \" + txt_year + \". Creando\")\n\n\tcreate_table('visited')\n\tcreate_table('saved')\n\tcreate_table('actions')\n\n\tglobal new_tables_created\n\tnew_tables_created = True\n\n\tlog_msg_ok4()", "def CreateDB(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateClassTable'])\r\n\t\t\tfor ii,classname in enumerate(self.SQLCMDs['ClassesList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertClass'],(ii,classname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSetTable'])\r\n\t\t\tfor ii,setname in enumerate(self.SQLCMDs['SetList']) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['InsertSet'],(ii,setname))\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateSampleTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictListTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateDictBuildTable'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateWordLists'])\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['CreateFeatureTable'])\r\n\t\t\tself.DB_Connect.commit()\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to create the database: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn", "def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results", "def create_tables(cur, conn):\n for query in create_table_queries:\n cur.execute(query)\n conn.commit()\n print(f\"\\nRunning: {query}\")", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def create_all():\n db.create_all()", "def create_db():\n db.create_all()\n print('Database structure created successfully')", "def create_tables():\n print('Creating tables.')\n # import the models used to describe the tables we're creating (using the\n # ORM). Link: http://flask-sqlalchemy.pocoo.org/2.3/models/\n import src.models.models as m\n Base.metadata.create_all(bind=engine)\n session.commit()\n\n import csv\n import bcrypt\n\n # let's add the admin user\n role = 1\n pw_hashed = bcrypt.hashpw('1234'.encode('utf-8'), bcrypt.gensalt(12))\n u = m.User(name=\"Admin\", email=\"admin@email.com\", pw=pw_hashed.decode('ascii'), role=role)\n session.add(u)\n\n # let's add the normal user\n role = 0\n pw_hashed = bcrypt.hashpw('5678'.encode('utf-8'), bcrypt.gensalt(12))\n u = m.User(name=\"User\", email=\"user@email.com\", pw=pw_hashed.decode('ascii'), role=role)\n session.add(u)\n\n # let's add three congregations\n n_cong = m.Congregation(name=\"North\")\n b_cong = m.Congregation(name=\"Belconnen\")\n c_cong = m.Congregation(name=\"City\")\n session.add(n_cong)\n session.add(b_cong)\n session.add(c_cong)\n \n # let's add all the books of the bible\n # thanks to jpoehls @ gh.com/jpoehls/bible-metadata for the bible_meta file!\n r = 0\n with open('bible_meta.csv', newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n if r>0:\n b = m.Books_Bible(name=row[1], nickname=row[2], volume=row[4])\n session.add(b)\n else:\n r += 1 # first line contains junk\n\n session.commit()", "def create_db_execute(self):\n self.execute(query=self.db_create_schema.format(self.db_name))", "def create(self):\n db.create_all()", "def setup_db(filepath, tables=(), reset=False):\n \n if os.path.exists(filepath) and not reset:\n return\n \n if os.path.exists(filepath) and reset:\n os.remove(filepath)\n \n # create table with appropriate columns\n with get_conn(filepath) as conn:\n for tab in tables:\n make_table(conn, tab.name,\n tab.text_fields, tab.real_fields)", "def create_tables():\n inf(\"Creating tables\")\n \n pinners = Table('pinners', metadata,\n Column('pinner_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n pinners.create()\n \n contents = Table('contents', metadata,\n Column('content_id', Integer, primary_key=True),\n Column('url', String(80)),\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id'))\n )\n contents.create()\n\n reviewers = Table('reviewers', metadata,\n Column('reviewer_id', Integer, primary_key=True),\n Column('name', String(40)),\n Column('email', String(40))\n )\n reviewers.create()\n\n complaints = Table('complaints', metadata,\n Column('complaint_id', Integer, primary_key=True),\n Column('complaint_timestamp', DateTime), # when the complaint was filed\n Column('complaint_type', String(80)), # objectionable, copyright\n Column('process_status', String(20)), # complaint, review, done\n Column('display_status', String(20)), # good, objectionable, copyright\n Column('review_timestamp', DateTime), # when the compliant was resolved\n Column('pinner_id', Integer, ForeignKey('pinners.pinner_id')),\n Column('reviewer_id', Integer, ForeignKey('reviewers.reviewer_id')),\n Column('content_id', Integer, ForeignKey('contents.content_id'))\n )\n complaints.create()\n \n # could create a table of \"near by\" images and/or near by features and \n # include these in the review", "def create_tables():\n with db.connect() as conn:\n conn.execute(\n \"CREATE TABLE IF NOT EXISTS url_list \"\n \"(url_id VARCHAR(20) NOT NULL UNIQUE, url_data VARCHAR(2083) NOT NULL);\"\n )", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def create_tables( self ) :\n return self._create_tables", "def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query" ]
[ "0.90999454", "0.90999454", "0.8910174", "0.86784077", "0.8614145", "0.8553592", "0.8508828", "0.845593", "0.8445083", "0.8376737", "0.83389515", "0.82984424", "0.8226738", "0.8191471", "0.81631696", "0.8158732", "0.81120193", "0.80407983", "0.7961357", "0.7934169", "0.79331195", "0.79155004", "0.7908272", "0.7901442", "0.78924435", "0.788377", "0.78529644", "0.7846712", "0.784559", "0.7825876", "0.7825876", "0.7825876", "0.7825876", "0.7825876", "0.7825876", "0.7825876", "0.7825876", "0.7825876", "0.7814576", "0.7813396", "0.7796079", "0.77902174", "0.77716607", "0.7770054", "0.77640486", "0.77594686", "0.775553", "0.77189225", "0.77024364", "0.7699509", "0.7695276", "0.7646638", "0.76314926", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7608928", "0.7606043", "0.76009685", "0.7585135", "0.7582852", "0.7572783", "0.75651896", "0.75532144", "0.7550416", "0.7545819", "0.7534306", "0.7527869", "0.750657", "0.75064856", "0.7503436", "0.75030786", "0.74966896", "0.74956644", "0.74923795", "0.7480715", "0.7479025", "0.7467438", "0.7464284", "0.74616975", "0.74592286", "0.7448888", "0.7444606", "0.7438196", "0.7434994", "0.74334353", "0.7431086", "0.74295706", "0.7422864", "0.7419452", "0.73977804", "0.73964643", "0.73951966" ]
0.7389031
100
Drops the db tables.
def drop_db(): database.db.reflect() database.db.drop_all() print('Dropped the database')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)", "def drop(self):\n c = self.cursor()\n for table in ['experiment','fact']:\n c.execute(\"drop table if exists {}\".format(table))\n self.commit()", "def drop(self):\n self.__init__()\n cursor = self.connection.cursor()\n cursor.execute(drop_tables)\n queries = cursor.fetchall()\n for i in queries:\n cursor.execute(i[0])\n\n self.commit()\n self.__init__()", "def drop_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"DROP TABLE tweets\")\n conn.execute(\"DROP TABLE tweet_peaks\")", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def drop_all_tables():\n\tcommon_db.drop_all_tables()", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def drop_db(self) -> None:\n try:\n if not self._check_delete_okay():\n return\n except DatabaseWriteException as e:\n raise e\n\n existing_tables = self.list_tables()\n for table_name in existing_tables:\n self.dynamodb.Table(table_name).delete()", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def _drop_db(keep_tables=None):\n server.db.session.remove()\n if keep_tables is None:\n keep_tables = []\n meta = server.db.metadata\n for table in reversed(meta.sorted_tables):\n if table.name in keep_tables:\n continue\n server.db.session.execute(table.delete())\n server.db.session.commit()", "def drop(self):\n cursor = self.connect.create_cursor()\n queries = (\n (\"USE dbPurBeurre\"),\n (\"SET foreign_key_checks = 0\"),\n (\"DROP TABLE IF EXISTS Asso_Prod_Cat\"),\n (\"DROP TABLE IF EXISTS Categories\"),\n (\"DROP TABLE IF EXISTS Produits\")\n )\n\n for query in queries:\n cursor.execute(query)", "def delete_all_tables(self):\n\t\tif self.__dbfile is not None:\n\t\t\tfor table_name in list(LocalData.table_info.keys()):\n\t\t\t\tif self.table_exists(table_name):\n\t\t\t\t\tself._conn.execute(\"DROP TABLE %s\" % table_name)\n\t\t\tself._conn.commit()", "def dropdb():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def clean_up():\n drop_all_tables()\n create_all()", "def drop_tables(db_config):\n tables = [\"users\", \"incidents\", \"images\", \"videos\",\n \"images\", \"location\" \"login\"]\n try:\n conn = connect(db_config)\n cursor = conn.cursor()\n for table in tables:\n query = \"DROP TABLE IF EXISTS {} CASCADE;\".format(table)\n cursor.execute(query)\n conn.commit()\n # print('Table {} deleted'.format(tables), '\\n')\n except(Exception, psycopg2.DatabaseError) as error:\n print(\"Warning: Table Deletion Error\", error)", "def delete_db():\n db.drop_all()", "def clear_db():\n from flask_monitoringdashboard.database import get_tables, engine\n\n for table in get_tables():\n table.__table__.drop(engine)\n table.__table__.create(engine)", "def clear_db():\n for name in TABLES:\n result = execute_query('truncate table {};'.format(name)), ())", "def drop_tables():\n commands = (\n \"\"\"\n DROP TABLE utilizador_partilha CASCADE\n \"\"\",\n \"\"\" \n DROP TABLE album CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE compositores CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE grupo CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE editora CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE genero_musical CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_criticas CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE utilizador_partilha_playlist CASCADE \n \"\"\",\n \"\"\"\n DROP TABLE album_genero_musical CASCADE\n \"\"\",\n \"\"\"\n DROP TABLE letras_musica_playlist CASCADE \n \"\"\")\n\n try:\n\n conn = psycopg2.connect(host=\"localhost\",database=\"SoundBox\", user=\"postgres\", password=\"postgres\")\n cur = conn.cursor()\n # DROP table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "def drop_all_tables(args):\n engine = sqlalchemy.create_engine(CONFIG.db_uri)\n print(\"Dropping all tables on {}...\".format(CONFIG.db_uri), end=\" \")\n Base.metadata.drop_all(bind=engine)\n print(\"finished.\")", "def drop_all():\n db.drop_all()", "def _drop_tables(self):\n logging.info(\"Dropping all tables from the database!\")\n db_conn = self.engine.connect()\n query_result = list()\n query_result.append(db_conn.execute(\n \"DROP SCHEMA public CASCADE;CREATE SCHEMA public;\"))\n\n if self.database_choice == 'remote_database' or self.database_choice \\\n == 'remote_database_master':\n query_result.append(db_conn.execute('''\n GRANT ALL PRIVILEGES ON SCHEMA public TO housingcrud;\n GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO housingcrud;\n GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO housingcrud;\n GRANT ALL ON SCHEMA public TO public;\n '''))\n return query_result", "def cleanup(self):\n for table in filter(lambda x: self.cmd.exists(x, silent=(log.level < DEBUG)), self.tables):\n log.info(\"MLoad\", \"Dropping table '{}'...\".format(table))\n self.cmd.drop_table(table, silent=True)", "def db_cleanup(self):\n with self.context():\n meido.db.session.remove()\n meido.db.drop_all()", "def drop_tables (cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def droptables(db, cursor):\n cursor.execute('''DROP TABLE IF EXISTS worlds;''')\n cursor.execute('''DROP TABLE IF EXISTS characters''')\n cursor.execute('''DROP TABLE IF EXISTS chardata''')\n db.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n print('All tables dropped.')", "def _drop_tables(self, tables):\n cursor = self.conn.cursor()\n try:\n cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))\n except:\n pass\n finally:\n cursor.close()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn) -> None:\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop(drop_all=False):\n\n engine = current_app.extensions['meowth_dbutils'].db.engine\n if current_app.extensions['meowth_dbutils'].metadata.bind is None:\n current_app.extensions['meowth_dbutils'].metadata.bind = engine\n with perform(\n name='dbutils drop',\n before='Dropping all project tables',\n fail='Error occured while droping project tables',\n ):\n current_app.extensions['meowth_dbutils'].metadata.drop_all()\n with perform(\n name='dbutils drop',\n before='Dropping alembic versioning table',\n fail='Error occured while dropping alembic table',\n ):\n engine.execute('drop table if exists alembic_version')\n if drop_all:\n with perform(\n name='dbutils drop',\n before='Dropping all other tables in database',\n fail='Error occured while dropping other tables',\n ):\n current_app.extensions['meowth_dbutils'].db.reflect()\n current_app.extensions['meowth_dbutils'].db.drop_all()", "def refresh_tables(db):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"DROP TABLE waiting\")\r\n c.execute(\"DROP TABLE help\")\r\n c.execute(\"DROP TABLE helped\")\r\n create_tables()\r\n except Error as e:\r\n print(e)", "def reset_db():\n\n metadata = sa.MetaData()\n metadata.reflect(engine)\n for tbl in reversed(metadata.sorted_tables):\n tbl.drop(engine)\n create_tables()", "def drop():\n if prompt_bool(\"Are you sure you want to lose all your data\"):\n db.drop_all()\n db.engine.execute(\"drop table if exists alembic_version\")", "def ResetDB(self) :\r\n\t\ttry :\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectDictTables'])\r\n\t\t\tDictTables = self.DB_Cursor.fetchall()\r\n\t\t\tfor table in DictTables :\r\n\t\t\t\tlogging.debug(\"Dropping %s\"%table[0])\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['DropTable']%table[0])\r\n\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectWordLists'])\r\n\t\t\tWordListTables = self.DB_Cursor.fetchall()\r\n\t\t\tfor table in WordListTables :\r\n\t\t\t\tlogging.debug(\"Dropping %s\"%table[0])\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['DropTable']%table[0])\r\n\r\n\t\t\tfor table in self.SQLCMDs['TableList'] :\r\n\t\t\t\tlogging.debug(\"Dropping %s\"%table)\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['DropTable']%(table,))\r\n\r\n\t\t\tself.DB_Connect.commit()\r\n\t\texcept Exception as detail:\r\n\t\t\tlogging.error(\"Failed to reset the database: %s\"%detail.message)\r\n\t\treturn", "def drop_tables(cur, conn): \n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def drop_tables(cur, conn):\n \n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()", "def tearDown(self):\n\n InitializeDb('TEST_DATABASE_URI').drop_tables()", "def db_dropall():\n # db_dropall doesn't work if the models aren't imported\n import_string('models', silent=True)\n for blueprint_name, blueprint in app.blueprints.items():\n import_string('%s.models' % blueprint.import_name, silent=True)\n db.drop_all()", "def deleteDBtables(self, tables=None):\n\n # If tables is None, all tables are deleted and re-generated\n if tables is None:\n # Delete all existing tables\n self._c.execute('SET FOREIGN_KEY_CHECKS = 0')\n for table in self.getTableNames():\n self._c.execute(\"DROP TABLE \" + table)\n self._c.execute('SET FOREIGN_KEY_CHECKS = 1')\n\n else:\n # It tables is not a list, make the appropriate list\n if type(tables) is str:\n tables = [tables]\n\n # Remove all selected tables (if exist in the database).\n for table in set(tables) & set(self.getTableNames()):\n self._c.execute(\"DROP TABLE \" + table)\n\n self._conn.commit()\n\n return", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def drop_tables(session):\n for query in drop_table_queries:\n session.execute(query)", "def teardown_db():\n engine = config['tg.app_globals'].sa_engine\n connection = engine.connect()\n\n # INFO - D.A. - 2014-12-04\n # Recipe taken from bitbucket:\n # https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/DropEverything\n\n inspector = reflection.Inspector.from_engine(engine)\n metadata = MetaData()\n\n tbs = []\n all_fks = []\n views = []\n\n # INFO - D.A. - 2014-12-04\n # Sequences are hard defined here because SQLA does not allow to reflect them from existing schema\n seqs = [\n Sequence('seq__groups__group_id'),\n Sequence('seq__contents__content_id'),\n Sequence('seq__content_revisions__revision_id'),\n Sequence('seq__permissions__permission_id'),\n Sequence('seq__users__user_id'),\n Sequence('seq__workspaces__workspace_id')\n ]\n\n for view_name in inspector.get_view_names():\n v = Table(view_name,metadata)\n views.append(v)\n\n for table_name in inspector.get_table_names():\n\n fks = []\n for fk in inspector.get_foreign_keys(table_name):\n if not fk['name']:\n continue\n fks.append(\n ForeignKeyConstraint((),(),name=fk['name'])\n )\n t = Table(table_name,metadata,*fks)\n tbs.append(t)\n all_fks.extend(fks)\n\n if not config['sqlalchemy.url'].startswith('sqlite'):\n for fkc in all_fks:\n connection.execute(DropConstraint(fkc))\n\n for view in views:\n drop_statement = 'DROP VIEW {}'.format(view.name)\n # engine.execute(drop_statement)\n connection.execute(drop_statement)\n\n for table in tbs:\n connection.execute(DropTable(table))\n\n\n for sequence in seqs:\n try:\n connection.execute(DropSequence(sequence))\n except Exception as e:\n logger.debug(teardown_db, 'Exception while trying to remove sequence {}'.format(sequence.name))\n\n transaction.commit()\n connection.close()\n engine.dispose()", "def dropTables(t=None):\n tablelist = tables.keys if t == None else [t]\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table in tables.keys():\n query = \"DROP TABLE IF EXISTS %s;\" % table\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to drop tables:\" )\n print(ex)\n sys.exit(1)", "def tearDown(self):\n db.drop_all()", "def tearDown(self):\n db.drop_all()", "def tearDown(self):\r\n\r\n db.session.rollback()\r\n db.drop_all()", "def dropall_cmd():\n drop_all()\n print(\"all tables dropped\")", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n try:\n cur.execute(query)\n conn.commit()\n except Exception as e:\n print(e)", "def drop_tables(session):\n\n for query in drop_table_queries:\n session.execute(query)", "def db_drop_and_create_all():\n db.drop_all()\n db.create_all()", "def drop_all(self):\n self._engine.execute(\n DDL(f\"drop schema if exists {_schema.CUBEDASH_SCHEMA} cascade\")\n )", "def tearDown(self):\n\n db.session.rollback()\n db.session.remove()\n db.drop_all()", "def drop_fact_tables(cur,conn):\n\tfor query in drop_fact_queries:\n\t\tcur.execute(query)\n\t\tconn.commit()", "def Drop_Tables_From_DB(self, tables_list, d_params=None):\n ctx = self.__Connect_To_Snowflake(d_params)\n if len(tables_list) > 0:\n for table in tables_list:\n try:\n ctx.cursor().execute(f\"drop table {table};\")\n print(f\"Deleted : {table}\")\n except:\n print(f\"Doesn't exist : {table}\")\n ctx.close()\n return None", "def tearDown(self):\n self.db.drop_all()\n pass", "def drop_all_tables(self):\n\n # Retrieve database name from application config\n app = self.db.app\n mongo_settings = app.config['MONGODB_SETTINGS']\n database_name = mongo_settings['db']\n\n # Flask-MongoEngine is built on MongoEngine, which is built on PyMongo.\n # To drop database collections, we need to access the PyMongo Database object,\n # which is stored in the PyMongo MongoClient object,\n # which is stored in app.extensions['mongoengine'][self]['conn']\n py_mongo_mongo_client = app.extensions['mongoengine'][self.db]['conn']\n py_mongo_database = py_mongo_mongo_client[database_name]\n\n # Use the PyMongo Database object\n for collection_name in py_mongo_database.collection_names():\n py_mongo_database.drop_collection(collection_name)", "def delete_tables(db, table_names):\n with tables(db.engine, *table_names) as tpl:\n for tbl in tpl[1:]:\n tbl.delete().execute()", "def tearDown(self):\n self.database.truncate_all_tables()", "def tearDown(self):\n db.session.commit()\n db.drop_all()", "def drop_tables(self, table):\n drop_table = \"DROP TABLE IF EXISTS {} CASCADE;\".format(table)\n self.cursor.execute(drop_table)", "def cleanup():\n cat = CSVCatalog.CSVCatalog()\n cat.drop_table(\"people\", force_drop=True)\n cat.drop_table(\"batting\", force_drop=True)\n cat.drop_table(\"teams\", force_drop=True)", "def drop_everything():\n from sqlalchemy.engine.reflection import Inspector\n from sqlalchemy.schema import DropConstraint, DropTable, MetaData, Table\n\n con = db.engine.connect()\n trans = con.begin()\n inspector = Inspector.from_engine(db.engine)\n\n # We need to re-create a minimal metadata with only the required things to\n # successfully emit drop constraints and tables commands for postgres (based\n # on the actual schema of the running instance)\n meta = MetaData()\n tables = []\n all_fkeys = []\n\n for table_name in inspector.get_table_names():\n fkeys = []\n\n for fkey in inspector.get_foreign_keys(table_name):\n if not fkey[\"name\"]:\n continue\n\n fkeys.append(db.ForeignKeyConstraint((), (), name=fkey[\"name\"]))\n\n tables.append(Table(table_name, meta, *fkeys))\n all_fkeys.extend(fkeys)\n\n for fkey in all_fkeys:\n con.execute(DropConstraint(fkey))\n\n for table in tables:\n con.execute(DropTable(table))\n\n trans.commit()", "def drop_tables(cur, conn):\n for query in drop_table_queries:\n cur.execute(query)\n conn.commit()\n print(f\"\\nRunning: {query}\")", "def drop_everything():\n from sqlalchemy.engine.reflection import Inspector\n from sqlalchemy.schema import DropConstraint, DropTable, MetaData, Table\n\n con = db.engine.connect()\n trans = con.begin()\n inspector = Inspector.from_engine(db.engine)\n\n # We need to re-create a minimal metadata with only the required things to\n # successfully emit drop constraints and tables commands\n # for postgres (based on the actual schema of the running instance)\n meta = MetaData()\n tables = []\n all_fkeys = []\n\n for table_name in inspector.get_table_names():\n fkeys = []\n\n for fkey in inspector.get_foreign_keys(table_name):\n if not fkey[\"name\"]:\n continue\n\n fkeys.append(db.ForeignKeyConstraint((), (), name=fkey[\"name\"]))\n\n tables.append(Table(table_name, meta, *fkeys))\n all_fkeys.extend(fkeys)\n\n for fkey in all_fkeys:\n con.execute(DropConstraint(fkey))\n\n for table in tables:\n con.execute(DropTable(table))\n\n trans.commit()", "def tearDown(self):\n\n # Remove all tables from test db\n db.session.remove()\n db.drop_all()", "def drop_data():\n DATABASE['product'].drop()\n DATABASE['customer'].drop()\n DATABASE['rental'].drop()", "async def drop_databases(self) -> None:\n if not self._inited:\n raise ConfigurationError(\"You have to call .init() first before deleting schemas\")\n for db_client in self._db_client_map.values():\n await db_client.close()\n await db_client.db_delete()\n\n self._reset()", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)", "def test_table_drop(app, runner):\n result = runner.invoke(drop_tables, input=\"y\")\n\n with app.app_context():\n assert not db.engine.has_table('link')\n assert not db.engine.has_table('user')", "def cleanup_database():\n with sqlite3.connect(DB_STRING) as con:\n con.execute(\"DROP TABLE data\")", "def reset_db():\n db.drop_all()\n _init_db()", "def drop_database():\n drop_db(app)", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()", "def tearDown(self):\n db.session.close()\n db.drop_all()", "def tearDown(self):\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()\n db.create_all()", "def drop_db():\n if prompt_bool(\"Are you sure you want to lose all your data?\"):\n app = create_app(dotenv.get('FLASK_CONFIG'))\n with app.app_context():\n db.drop_all()", "def tearDown(self):\n #db.session.remove()\n db.drop_all()" ]
[ "0.8613892", "0.85542655", "0.8401308", "0.8334024", "0.829665", "0.82727927", "0.8257214", "0.8239192", "0.8230405", "0.8215113", "0.8172018", "0.8147676", "0.8134603", "0.8033471", "0.80164933", "0.7996094", "0.7996094", "0.7996094", "0.7996094", "0.7996094", "0.7996094", "0.7996094", "0.7996094", "0.7996094", "0.7894234", "0.7875027", "0.7873612", "0.78511506", "0.7773903", "0.7724372", "0.771585", "0.77138174", "0.770552", "0.76994824", "0.7680368", "0.76589215", "0.76400244", "0.7628304", "0.7625522", "0.76149416", "0.76149416", "0.76149416", "0.76149416", "0.76149416", "0.76149416", "0.76149416", "0.76149416", "0.7607953", "0.75828725", "0.756823", "0.7563804", "0.7560733", "0.75563496", "0.7542313", "0.7540511", "0.749718", "0.7491599", "0.7470517", "0.7453935", "0.7444334", "0.7437961", "0.7424574", "0.7419464", "0.7419464", "0.7402106", "0.7388655", "0.73856413", "0.7383703", "0.7375738", "0.73668706", "0.7366355", "0.7361254", "0.7341148", "0.73343515", "0.73233736", "0.732201", "0.7304949", "0.7298174", "0.72867006", "0.72805035", "0.7275694", "0.7266739", "0.726055", "0.7245091", "0.72354376", "0.7231649", "0.7230602", "0.72202545", "0.71816844", "0.7169232", "0.71413684", "0.71351475", "0.7134233", "0.7134233", "0.7134233", "0.7134233", "0.7127793", "0.7126689", "0.71246046", "0.71206164" ]
0.766048
35
Write the workflow info dict to the output stream.
def __call__(info, output_stream, config_variant=u""):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def dump(self, output_stream):\n raise NotImplementedError", "def save(self, context_dir: ContextDir) -> None:\n logger.info(\"Writing workflow data into %s\", context_dir.workflow_json)\n with open(context_dir.workflow_json, \"w+\") as f:\n json.dump(self.as_dict(), f, cls=WorkflowDataEncoder)", "def write(self, output_stream=sys.stdout):\n for model in self.models:\n if len(model.chains) == 0:\n continue\n if len(self.models) > 1:\n print(\"MODEL %4d\" % (model.number), file=output_stream)\n model.write(output_stream)\n if len(self.models) > 1:\n print(\"ENDMDL\", file=output_stream)\n print(\"END\", file=output_stream)", "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def DumpInfo(self):\n if self.logdir is None:\n return\n self.dumpfile = '%s/preprocess_info.yaml' % (self.logdir)\n try:\n f = open(self.dumpfile,'w')\n f.write(yaml.dump(self.info,default_flow_style=False, indent=4))\n f.close()\n except IOError:\n self.errors = True\n errstr = 'Error accessing %s' % self.dumpfile\n raise IOError(errstr)\n self.LogErrors(errstr)", "def writeOutput(self):\n\n self.collect.writeOutput()", "def save_log_to_file(dictionary, output_file, workflow_step=None):\n if workflow_step is None:\n workflow_step = \"NoKEY\"\n\n dict2log = {workflow_step: dictionary}\n\n with open(output_file, \"a+\") as fileout:\n YAML().dump(dict2log, fileout)", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))", "def write_output(self):", "def write(self):\n print yaml.dump(self._config, default_flow_style=False),", "def writeto(self,output_file,**kwargs):\n dump_pkl(self._properties,output_file,**kwargs)\n return", "def write_output(self):\n self.tcex.log.info('Writing Output')\n self.tcex.log.info(type(self.all_items))\n self.tcex.log.info(len(self.all_items))\n self.tcex.playbook.create_output('firework_alert.json', self.all_items)", "def write_to(self, stream: StreamWrapper):\n stream.write_int(len(self.moves))\n for element in self.moves:\n element.write_to(stream)\n stream.write_int(len(self.buildings))\n for element in self.buildings:\n element.write_to(stream)\n if self.choose_specialty is None:\n stream.write_bool(False)\n else:\n stream.write_bool(True)\n stream.write_int(self.choose_specialty)", "def __exit__(self, *_):\n with self._info_yaml_file_path.open(\"w\") as info:\n self._yml.dump(self._info, info)", "def create_workflow_file(self, workflow: Workflow, props: PropertySet):", "def write_job_manifest(self):\n import time\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n for k, v in self.job.items():\n hout.write(';'.join([k, v]) + '\\n')", "def dump(self, stream):\n items = (\n ('time', self.time),\n ('inc', self.inc),\n )\n # use ordered dict to retain order\n ts = collections.OrderedDict(items)\n json.dump(dict(ts=ts), stream)", "def write(self, stream, root, order):\n yaml.dump(root, stream, default_flow_style=False if args.style == 'block' else None)", "def workflow_details(self) -> pulumi.Output[Optional['outputs.ServerWorkflowDetails']]:\n return pulumi.get(self, \"workflow_details\")", "def writeOutput(self, output):", "def export(self, stream):\n pass", "def create_workflow_info(html=True):\n if html:\n hdr = \"<h3>Workflow Theses</h3>\"\n state_info = \"<p><b>stav:</b>\" + self.workflow.workflow.state + \"</p>\"\n started_info = \"<p><b>spuštěno:</b>\" + str(self.workflow.workflow.started) + \"</p>\"\n stopped_info = \"<p><b>zastaveno:</b>\" + str(self.workflow.workflow.stopped) + \"</p>\"\n double_stop = \"<br><br>\"\n batches_count_info = \"<p><b>počet zpracovaných dávek:</b>\" + \\\n str(len(self.workflow.batches_processing)) + \"</p><br><br>\"\n else:\n hdr = \"Workflow Theses\\n\\n\"\n state_info = \"stav:\" + self.workflow.workflow.state + \"\\n\"\n started_info = \"spuštěno:\" + str(self.workflow.workflow.started) + \"\\n\"\n stopped_info = \"zastaveno:\" + str(self.workflow.workflow.stopped) + \"\\n\"\n double_stop = \"\\n\\n\"\n batches_count_info = \"počet zpracovaných dávek:\" + str(len(self.workflow.batches_processing)) + \"\\n\\n\"\n\n msg_part = hdr + state_info + started_info + stopped_info + double_stop + batches_count_info\n\n return msg_part", "def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)", "def writeToMetadata(self, context):\n pass", "def write_info(self, opt):\n path = EVT_PERIGEE_INFO_PATH(opt.data_dir, self)\n LOGGER.info(f\"Writing info to {path}\")\n path.write_text(json.dumps(self.info, indent=4))", "def write_to_file(self):\n\t\tfile = open(\"states.txt\", \"w\")\n\t\t\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tfile.write(pointer.state + \"\\t\" + pointer.info)\t\n\t\t\tpointer = pointer.next\n\n\t\tfile.close()", "def write(self, filename, mode=\"w\"):\n d = self._result_dict\n val = yaml.safe_dump(d, default_flow_style=False)\n\n with open(str(filename), mode) as outfile:\n outfile.write(val)", "def write(self, fp):\n if self._defaults:\n fp.write(\"[%s]\\n\" % DEFAULTSECT)\n for (key, value) in self._defaults.items():\n fp.write(\"%s = %s\\n\" % (key, str(value).replace('\\n', '\\n\\t')))\n fp.write(\"\\n\")\n for section in self._sections:\n fp.write(\"[%s]\\n\" % section)\n for (key, value) in self._sections[section].items():\n if key == \"__name__\":\n continue\n if (value is not None) or (self._optcre == self.OPTCRE):\n key = \" = \".join((key, str(value).replace('\\n', '\\n\\t')))\n fp.write(\"%s\\n\" % (key))\n fp.write(\"\\n\")", "def _amber_write_input_file(self):\n logger.debug(\"Writing {}\".format(self.input))\n with open(os.path.join(self.path, self.input), \"w\") as f:\n f.write(\"{}\\n\".format(self.title))\n f.write(\" &cntrl\\n\")\n self._write_dict_to_mdin(f, self.cntrl)\n\n if self.ewald is not None:\n f.write(\" &ewald\\n\")\n self._write_dict_to_mdin(f, self.ewald)\n\n if self.cntrl[\"nmropt\"] == 1:\n if self.wt is not None:\n for line in self.wt:\n f.write(\" \"+line+\"\\n\")\n f.write(\" &wt type = 'END', /\\n\")\n if self.restraint_file is not None:\n f.write(\"DISANG = {}\\n\".format(self.restraint_file))\n f.write(\"LISTOUT = POUT\\n\\n\")\n if self.group is not None:\n f.write(\"{:s}\".format(self.group))", "def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)", "def save_sess_to_txt(self, info_dict=None):\n sub_folder_dic, pair_list_dic = info_dict\n saving_pair_info(sub_folder_dic, pair_list_dic)", "def export_workflow(args):\n if args.type == 'magnis':\n clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)\n elif args.type == 'mip':\n clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file)", "def print_metadata():\n data = {\n 'python_implementation': platform.python_implementation(),\n 'python_version_info': tuple(sys.version_info),\n 'pickle_protocol': pickle.HIGHEST_PROTOCOL,\n }\n if sys.version_info < (3,):\n out_stream = sys.stdout\n else:\n out_stream = sys.stdout.buffer\n out_stream.write(json.dumps(data).encode(_IPC_ENCODING) + b'\\n')", "def save_info_file(self, path):\n path = os.path.join(path, 'asv-env-info.json')\n content = {\n 'tool_name': self.tool_name,\n 'python': self._python,\n 'requirements': self._requirements,\n 'build_env_vars': self.build_env_vars\n }\n util.write_json(path, content)", "def output(self):\n\n # output dir, check if using d6tpipe\n if hasattr(self, 'pipename'):\n import d6tflow.pipes\n dirpath = d6tflow.pipes.get_dirpath(self.pipename)\n else:\n dirpath = settings.dirpath\n\n save_ = getattr(self, 'persist', [])\n output = dict([(k, self.target_class(self._getpath(dirpath, k))) for k in save_])\n if self.persist==['data']: # 1 data shortcut\n output = output['data']\n return output", "def print_workflow_summary(workflow_stats ):\n\t# status\n\tworkflow_stats.set_job_filter('nonsub')\n\t# Tasks\n\ttotal_tasks = workflow_stats.get_total_tasks_status()\n\ttotal_succeeded_tasks = workflow_stats.get_total_succeeded_tasks_status()\n\ttotal_failed_tasks = workflow_stats.get_total_failed_tasks_status()\n\ttotal_unsubmitted_tasks = total_tasks -(total_succeeded_tasks + total_failed_tasks)\n\ttotal_task_retries = workflow_stats.get_total_tasks_retries()\n\ttotal_invocations = total_succeeded_tasks + total_failed_tasks + total_task_retries\n\t# Jobs\n\ttotal_jobs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_jobs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_jobs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_jobs = total_jobs - (total_succeeded_jobs + total_failed_jobs )\n\ttotal_job_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_job_instance_retries = total_succeeded_jobs + total_failed_jobs + total_job_retries\n\t# Sub workflows\n\tworkflow_stats.set_job_filter('subwf')\n\ttotal_sub_wfs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_sub_wfs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_sub_wfs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_sub_wfs = total_sub_wfs - (total_succeeded_sub_wfs + total_failed_sub_wfs)\n\ttotal_sub_wfs_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_sub_wfs_tries = total_succeeded_sub_wfs + total_failed_sub_wfs + total_sub_wfs_retries\n\n\t# tasks\n\tsummary_str = \"\"\n\tsummary_str += \"total_succeeded_tasks: \" + convert_to_str(total_succeeded_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_tasks: \" + convert_to_str(total_failed_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_tasks: \" + convert_to_str(total_unsubmitted_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_tasks: \" + convert_to_str(total_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_task_retries: \" + convert_to_str(total_task_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_invocations: \" + convert_to_str(total_invocations)\n\tsummary_str += NEW_LINE_STR\n\n\n\tsummary_str += \"total_succeeded_jobs: \" + convert_to_str(total_succeeded_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_jobs: \" + convert_to_str(total_failed_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_jobs: \" + convert_to_str(total_unsubmitted_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_jobs:\" + convert_to_str(total_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_job_retries: \" + str(total_job_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_job_instance_retries:\" + convert_to_str(total_job_instance_retries)\n\tsummary_str += NEW_LINE_STR\n\n\n\tsummary_str += \"total_succeeded_sub_wfs: \" + convert_to_str(total_succeeded_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_sub_wfs: \" + convert_to_str(total_failed_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_sub_wfs: \" + convert_to_str(total_unsubmitted_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs: \" + convert_to_str(total_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs_retries: \" + str(total_sub_wfs_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs_tries: \" + convert_to_str(total_sub_wfs_tries)\n\tsummary_str += NEW_LINE_STR\n\n\tworkflow_states_list = workflow_stats.get_workflow_states()\n\tworkflow_wall_time = stats_utils.get_workflow_wall_time(workflow_states_list)\n\n\tif workflow_wall_time is None:\n\t\tsummary_str += \"workflow_runtime: -\"\n\telse:\n\t\tsummary_str += \"workflow_runtime: %-20s (total %d seconds)\" % \\\n\t\t\t\t(format_seconds(workflow_wall_time), (workflow_wall_time))\n\tsummary_str += NEW_LINE_STR\n\tworkflow_cum_job_wall_time = workflow_stats.get_workflow_cum_job_wall_time()[0]\n\tif workflow_cum_job_wall_time is None:\n\t\tsummary_str += \"cumulative_workflow_runtime_kickstart: -\"\n\telse:\n\t\tsummary_str += \"cumulative_workflow_runtime_kickstart: %-20s (total %d seconds)\" % \\\n\t\t\t(format_seconds(workflow_cum_job_wall_time),workflow_cum_job_wall_time)\n\tsummary_str += NEW_LINE_STR\n\tsubmit_side_job_wall_time = workflow_stats.get_submit_side_job_wall_time()[0]\n\tif submit_side_job_wall_time is None:\n\t\tsummary_str += \"cumulative_workflow_runtime_dagman: -\"\n\telse:\n\t\tsummary_str += \"cumulative_workflow_runtime_dagman: %-20s (total %d seconds)\" % \\\n\t\t\t(format_seconds(submit_side_job_wall_time), submit_side_job_wall_time)\n\treturn summary_str", "def serialize(self, out_stream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def write_output_shifts_to_file(self, shift_output):\n pass", "def write_out(self, niter, locuslen):\n try:\n handle = open(self.output, 'w')\n except OSError:\n print 'Error, you do not have permission to write files here.'\n extit(1)\n # First, write the pop names\n handle.write('#Pop 1: ' + self.popnames[0] + '\\n')\n handle.write('#Pop 2: ' + self.popnames[1] + '\\n')\n # Then write the run parameters\n handle.write('#Model: ' + self.modelname + '\\n')\n handle.write('#Max iterations: ' + str(niter) + '\\n')\n # Then write some model summaries\n handle.write('#Data Likelihoods: ' + ' '.join([str(s) for s in self.mod_like]) + '\\n')\n handle.write('#Optimized Likelihoods: ' + ' '.join([str(s) for s in self.opt_like]) + '\\n')\n handle.write('#AIC: ' + ' '.join([str(s) for s in self.aic]) + '\\n')\n handle.write('#LocusLem: ' + str(locuslen) + '\\n')\n handle.write('#4*Na*u*L: ' + str(self.theta_mean) + '\\n')\n handle.write('#Na: ' + str(self.Na) + '\\n')\n for name, val in zip(self.params['Names'], self.scaled_params):\n towrite = '#' + name + ': ' + str(val) + '\\n'\n handle.write(towrite)\n # Then a table of the parameters that were found\n handle.write('Iteration\\t' + '\\t'.join(self.params['Names']) + '\\n')\n handle.write('Initial\\t' + '\\t'.join([str(s) for s in self.params['Values']]) + '\\n')\n # Write the perturbed parameters\n for index, vals in enumerate(self.p_init):\n name = 'Perturbed_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the hot annealed values\n for index, vals in enumerate(self.hot_params):\n name = 'Hot_Anneal_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the cold annealed values\n for index, vals in enumerate(self.cold_params):\n name = 'Cold_Anneal_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the BFGS parameters\n for index, vals in enumerate(self.opt_params):\n name = 'BFGS_' + str(index) + '\\t'\n handle.write(name + '\\t'.join([str(s) for s in vals]) + '\\n')\n # And the final params\n handle.write('Hot_Mean\\t' + '\\t'.join([str(s) for s in self.hot_mean]) + '\\n')\n handle.write('Cold_Mean\\t' + '\\t'.join([str(s) for s in self.cold_mean]) + '\\n')\n handle.write('BFGS_Mean\\t' + '\\t'.join([str(s) for s in self.bfgs_mean]) + '\\n')\n handle.flush()\n handle.close()\n return", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "def write_workflow_file(workflow_model, file_path):\n WorkflowWriter().write(workflow_model, file_path)", "def process(self):\n self.output_info = self.attributes.copy()", "def write_output_plist(self):\n \n if self.env is None:\n return\n \n try:\n plistlib.writePlist(self.env, self.outfile)\n except BaseException as e:\n raise ProcessorError(e)", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def write(self):\n pass", "def write(self):\n pass", "def write_all(self):\n\n for _, seq in self.seq_dict.items():\n write_mode(seq)", "def write(self, stream):\n # write the data\n pyffi.object_models.xml.struct_.StructBase.write(\n self, stream, self)", "def save_dict_as_yaml_integration_file(self, output_file: str):\n logger.debug(f\"Writing collected metadata to {output_file}.\")\n\n write_yml(output_file, self.metadata_dict)\n logger.info(\"[green]Finished successfully.[/green]\")", "def create_log(self):\n self.model.graph.get_stats()\n out = self.model.graph.summary\n out[\"training_error\"] = zip(self.train_it, self.train_err)\n out[\"validation_error\"] = zip(self.validation_it, self.validation_err)\n with open(self.log, \"w\") as f:\n f.write(json.dumps(out, default=defaultencode))", "def write(self, stream):\r\n\r\n stream.write(pystache.render(self._template, self.template_data))", "def write_sitemap ( self ):\n try:\n self.output_fd = open ( file=dflt_cfg.DFLT_CFG[ OUTPUT_PATH ], mode='w' )\n self.print_url_links ( self.root )\n except (PermissionError, AttributeError) as err:\n self.logger.error ( \"Error {0} occurred. Output file {1} cannot be created\".format ( err, \\\n dflt_cfg.DFLT_CFG[\n OUTPUT_PATH ] ) )\n except Exception as err:\n self.logger.error ( \"Error {0} occurred while writing sitemap in output file: {1}\".format ( err, \\\n dflt_cfg.DFLT_CFG[ OUTPUT_PATH ] ) )\n self.output_fd.close ( )\n else:\n print(\"Sitemap for {} is written in {}.\".format(dflt_cfg.DFLT_CFG[DOMAIN], dflt_cfg.DFLT_CFG[ OUTPUT_PATH ]))\n print( \"Logs (Broken or dead URLs along with application logs) for domain {0} are available in {1} directory.\".format ( dflt_cfg.DFLT_CFG[DOMAIN], \"./logs\" ) )\n self.output_fd.close ( )", "def save_info(self):\n json.dump(self.info, open(os.path.join(self.dstore_dir, \"info.json\"), \"w\"),\n sort_keys=True, indent=4, ensure_ascii=False)", "def save_info_file(self, info_dict: Dict[str, Dict[str, int]]) -> NoReturn:\n with open(os.path.join(self.out_folder, INFO_FILE_NAME), \"w\") as file_handle:\n for split in info_dict:\n file_handle.write(split + \":\\n\")\n for class_name in info_dict[split]:\n file_handle.write(\n class_name + \": \" + str(info_dict[split][class_name]) + \"\\n\"\n )\n file_handle.write(\"\\n\")", "def result(self, step):\n indent_extra = 0\n if self.current_rule:\n indent_extra = self.indent_size\n\n step = self.steps.pop(0)\n indent = make_indentation(2 * self.indent_size + indent_extra)\n if self.show_aligned_keywords:\n # -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6):\n text = u\"%s%6s %s ... \" % (indent, step.keyword, step.name)\n else:\n text = u\"%s%s %s ... \" % (indent, step.keyword, step.name)\n self.stream.write(text)\n\n status_text = step.status.name\n if self.show_timings:\n status_text += \" in %0.3fs\" % step.duration\n\n unicode_errors = 0\n if step.error_message:\n try:\n self.stream.write(u\"%s\\n%s\\n\" % (status_text, step.error_message))\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s\\n\" % status_text)\n self.stream.write(u\"%s while writing error message: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n else:\n self.stream.write(u\"%s\\n\" % status_text)\n\n if self.show_multiline:\n if step.text:\n try:\n self.doc_string(step.text)\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s while writing docstring: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n if step.table:\n self.table(step.table)", "def write(self):\n out = json.dumps({\"items\": self.items})\n sys.stdout.write(out)", "def write(self):\n self.json_o.write()", "def write(self, stream, root, order):\n if root and isinstance(root[0], dict):\n self.markup(stream, order, '| ', ' | ', ' |')\n self.markup(stream, ['-'] * len(order), '| ', ' | ', ' |')\n for row in root:\n self.markup(stream, [row.get(col, '') for col in order], '| ', ' | ', ' |')\n else:\n for row in root:\n self.markup(stream, row, '| ', ' | ', ' |')", "def write_input(self, atoms, properties, system_changes):\n io.write_all_inputs(\n atoms, properties, parameters=self.parameters,\n pp_paths=self.pp_paths,\n label=self.label, v8_legacy_format=self.v8_legacy_format)", "def save_info(self, combos=None, cases=None, fn_args=None):\n # If saving Harvester or Runner, strip out function information so\n # as just to use pickle.\n if self.farmer is not None:\n farmer_copy = copy.deepcopy(self.farmer)\n farmer_copy.fn = None\n farmer_pkl = to_pickle(farmer_copy)\n else:\n farmer_pkl = None\n\n write_to_disk(\n {\n \"combos\": combos,\n \"cases\": cases,\n \"fn_args\": fn_args,\n \"batchsize\": self.batchsize,\n \"num_batches\": self.num_batches,\n \"_batch_remainder\": self._batch_remainder,\n \"shuffle\": self.shuffle,\n \"farmer\": farmer_pkl,\n },\n os.path.join(self.location, INFO_NM),\n )", "def printIns(self, stream):\n print(' ', str(self), file=stream)", "def serialize(self, to_file=None):\n if to_file is not None:\n raise ValueError(\n \"TaskInfo does not support serialization to a custom filename\")\n\n to_file = self.filename\n gitrepo.write_task(to_file, self.pretty(self.dict()))", "def write(self, stream, root, order):\n\n for (pos, row) in enumerate(root):\n log.debug('{pos}: {row}'.format(**locals()))\n if isinstance(row, dict):\n if pos == 0:\n stream.write(args.separator.join(order))\n stream.write('\\n')\n stream.write(args.separator.join([row[name] for name in order]))\n else:\n stream.write(args.separator.join(row))\n stream.write('\\n')", "def get_workflow_outputs(workflow_dict, input_dict):\n output_dict = {}\n for output_parameter in workflow_dict['outputs']:\n if 'outputSource' in output_parameter:\n value, found = resolve_output_reference(\n output_parameter['outputSource'], workflow_dict, input_dict)\n if found:\n output_dict[output_parameter['id']] = value\n return output_dict", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def write(self, stream, root, order):\n if not isinstance(root, dict):\n parser.error('Form format requires a dictionary')\n if len(root) != 1:\n parser.error('A {count} element dictionary cannot be processed in form format'.format(count=len(root)))\n for column in order:\n stream.write(column + '\\n')\n stream.write(root[0][column] + '\\n')", "def write_file(self):\n\n running_time = str(self.running_time_end - self.running_time_start)\n rounded_running_time = '{:.10}'.format(running_time)\n output = 'path_to_goal: ' + str(self.path_to_goal) + '\\n'\n output += 'cost_of_path: ' + str(self.cost_of_path) + '\\n'\n output += 'nodes_expanded: ' + str(self.nodes_expanded) + '\\n'\n output += 'fringe_size: ' + str(self.fringe_size) + '\\n'\n output += 'max_fringe_size: ' + str(self.max_fringe_size) + '\\n'\n output += 'search_depth: ' + str(self.search_depth) + '\\n'\n output += 'max_search_depth: ' + str(self.max_search_depth) + '\\n'\n output += 'running_time: ' + rounded_running_time + '\\n'\n\n system_name = system()\n if system_name == 'Windows':\n output += 'max_ram_usage: (Not available on Windows OS)'\n elif system_name == 'Linux':\n output += 'max_ram_usage: ' + \\\n str(getrusage(RUSAGE_SELF).ru_maxrss / 1024) + '\\n'\n\n file = open('output.txt', 'w+')\n file.write(output)\n print(output)", "def _write_outputs(self):\n\n #########################\n # Create necessary variables for generic metadata file, as well as\n # generate and fill metadata file, if user wants it\n record_start = pd.to_datetime(self.dt_array[0]).date()\n record_end = pd.to_datetime(self.dt_array[-1]).date()\n\n if self.metadata_mode == 1 and self.script_mode == 1:\n # user wants to fill metadata and it is the correct mode\n\n # First check to see if metadata file already exists\n if not os.path.isfile('correction_metadata.xlsx'):\n # file does not exist, create new one\n metadata_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n metadata_info.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n else:\n # file is already created, so we need to read it in, append our new information to the bottom of it\n # and then save the info\n metadata_info = pd.read_excel('correction_metadata.xlsx', sheet_name=0, index_col=None, engine='xlrd',\n keep_default_na=False, verbose=True, skip_blank_lines=True)\n\n new_meta_info = pd.DataFrame({'station_name': self.station_name, 'station_lat': self.station_lat,\n 'station_lon': self.station_lon, 'station_elev_m': self.station_elev,\n 'record_start': record_start, 'record_end': record_end,\n 'anemom_height_m': self.ws_anemometer_height,\n 'output_file_path': self.output_file_path}, index=np.array([1]))\n\n output_metadata = pd.concat([metadata_info, new_meta_info], ignore_index=True)\n\n with pd.ExcelWriter('correction_metadata.xlsx', date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD HH:MM:SS', engine='openpyxl', mode='w') as writer:\n output_metadata.to_excel(writer, header=True, index=False, sheet_name='Sheet1')\n\n else:\n # do nothing\n pass\n\n # if we are using a network-specific metadata file, we need to update the run count to pass it on\n if self.metadata_path is not None:\n current_row = self.metadata_df.run_count.ne(2).idxmax() - 1\n current_run = self.metadata_df.run_count.iloc[current_row] + 1\n\n self.metadata_df.run_count.iloc[current_row] = current_run\n self.metadata_df.record_start.iloc[current_row] = record_start\n self.metadata_df.record_end.iloc[current_row] = record_end\n self.metadata_df.output_path.iloc[current_row] = self.output_file_path\n\n with pd.ExcelWriter(self.metadata_path, date_format='YYYY-MM-DD',\n datetime_format='YYYY-MM-DD', engine='openpyxl', mode='w') as writer:\n self.metadata_df.to_excel(writer, header=True, index=True, sheet_name='Sheet1')\n\n #########################\n # Generate output file\n # Create any final variables, then create panda dataframes to save all the data\n # Includes the following sheets:\n # Corrected Data : Actual corrected values\n # Delta : Magnitude of difference between original data and corrected data\n # Filled Data : Tracks which data points have been filled by script generated values instead of provided\n # Data that is provided and subsequently corrected by the script do not count as filled values.\n print(\"\\nSystem: Saving corrected data to .xslx file.\")\n\n # Create any individually-requested output data\n ws_2m = _wind_height_adjust(uz=self.data_ws, zw=self.ws_anemometer_height)\n\n # Create corrected-original delta numpy arrays\n diff_tavg = np.array(self.data_tavg - self.original_df.tavg)\n diff_tmax = np.array(self.data_tmax - self.original_df.tmax)\n diff_tmin = np.array(self.data_tmin - self.original_df.tmin)\n diff_tdew = np.array(self.data_tdew - self.original_df.tdew)\n diff_ea = np.array(self.data_ea - self.original_df.ea)\n diff_rhavg = np.array(self.data_rhavg - self.original_df.rhavg)\n diff_rhmax = np.array(self.data_rhmax - self.original_df.rhmax)\n diff_rhmin = np.array(self.data_rhmin - self.original_df.rhmin)\n diff_rs = np.array(self.data_rs - self.original_df.rs)\n diff_rs_tr = np.array(self.opt_rs_tr - self.orig_rs_tr)\n diff_rso = np.array(self.rso - self.original_df.rso)\n diff_ws = np.array(self.data_ws - self.original_df.ws)\n diff_precip = np.array(self.data_precip - self.original_df.precip)\n diff_etr = np.array(self.etr - self.original_df.etr)\n diff_eto = np.array(self.eto - self.original_df.eto)\n\n # Create datetime for output dataframe\n datetime_df = pd.DataFrame({'year': self.data_year, 'month': self.data_month, 'day': self.data_day})\n datetime_df = pd.to_datetime(datetime_df[['month', 'day', 'year']])\n\n # Create output dataframe\n output_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': self.data_tavg, 'TMax (C)': self.data_tmax,\n 'TMin (C)': self.data_tmin, 'TDew (C)': self.data_tdew,\n 'Vapor Pres (kPa)': self.data_ea, 'RHAvg (%)': self.data_rhavg,\n 'RHMax (%)': self.data_rhmax, 'RHMin (%)': self.data_rhmin, 'Rs (w/m2)': self.data_rs,\n 'Opt_Rs_TR (w/m2)': self.opt_rs_tr, 'Rso (w/m2)': self.rso,\n 'Windspeed (m/s)': self.data_ws, 'Precip (mm)': self.data_precip,\n 'ETr (mm)': self.etr, 'ETo (mm)': self.eto, 'ws_2m (m/s)': ws_2m},\n index=datetime_df)\n\n # Creating difference dataframe to track amount of correction\n delta_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TAvg (C)': diff_tavg, 'TMax (C)': diff_tmax,\n 'TMin (C)': diff_tmin, 'TDew (C)': diff_tdew,\n 'Vapor Pres (kPa)': diff_ea, 'RHAvg (%)': diff_rhavg, 'RHMax (%)': diff_rhmax,\n 'RHMin (%)': diff_rhmin, 'Rs (w/m2)': diff_rs, 'Opt - Orig Rs_TR (w/m2)': diff_rs_tr,\n 'Rso (w/m2)': diff_rso, 'Windspeed (m/s)': diff_ws, 'Precip (mm)': diff_precip,\n 'ETr (mm)': diff_etr, 'ETo (mm)': diff_eto}, index=datetime_df)\n\n # Creating a fill dataframe that tracks where missing data was filled in\n fill_df = pd.DataFrame({'date': datetime_df, 'year': self.data_year, 'month': self.data_month,\n 'day': self.data_day, 'TMax (C)': self.fill_tmax, 'TMin (C)': self.fill_tmin,\n 'TDew (C)': self.fill_tdew, 'Vapor Pres (kPa)': self.fill_ea, 'Rs (w/m2)': self.fill_rs,\n 'Complete Record Rso (w/m2)': self.fill_rso},\n index=datetime_df)\n\n # Open up pandas excel writer\n output_writer = pd.ExcelWriter(self.output_file_path, engine='xlsxwriter')\n # Convert data frames to xlsxwriter excel objects\n output_df.to_excel(output_writer, sheet_name='Corrected Data', na_rep=self.missing_fill_value)\n delta_df.to_excel(output_writer, sheet_name='Delta (Corr - Orig)', na_rep=self.missing_fill_value)\n fill_df.to_excel(output_writer, sheet_name='Filled Data', na_rep=self.missing_fill_value)\n # Save output file\n output_writer.save()\n\n logger = open(self.log_file, 'a')\n if self.script_mode == 1 and self.fill_mode == 1:\n if np.isnan(self.eto).any() or np.isnan(self.etr).any():\n print(\"\\nSystem: After finishing corrections and filling data, \"\n \"ETr and ETo still had missing observations.\")\n logger.write('After finishing corrections and filling data, '\n 'ETr and ETo still had missing observations. \\n')\n else:\n logger.write('The output file for this station has a complete record of ETo and ETr observations. \\n')\n else:\n pass\n logger.write('\\nThe file has been successfully processed and output files saved at %s.' %\n dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n logger.close()", "def print_info(self):\n\n self.logging.info(str(self.filename) + ':' + str(self.__info_dict))", "def write(self, out):", "def write_info(model, dataset, batch_size, learning_rate, epochs):\n file_info = open(os.path.join(model['model_dir'], 'info.json'), 'w')\n dic_info = {\n 'cameras': dataset['num_cameras'],\n 'dataset': dataset['dataset_dir'],\n 'max_demos': dataset['max_demos'],\n 'resume': model['resume'],\n 'epochs': epochs,\n 'learning_rate': learning_rate,\n 'batch_size': batch_size\n }\n file_info.write(json.dumps(dic_info, cls=NumpyEncoder))\n file_info.close()", "async def dump(self, data: dict, file: IO):", "def _output_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n fileout = os.path.normpath('{}/{}-{}.xml'.\\\n format(self.MapCreator, self.Source, self.ddnCurProject.get()))\n linesout = ['<?xml version=\"1.0\" encoding=\"UTF-8\"?>', \\\n '<DictionarySet xmlns:mc=\"urn:fmosoft-map-creator\" xmlns=\"urn:fmosoft-map-creator\" Version=\"1\">', \\\n ' <Dictionary SourceLanguage=\"{}\" SourceLanguageIsPredefined=\"true\" TargetLanguage=\"{}\" TargetLanguageIsPredefined=\"false\">'.\\\n format(self.Source, self.ddnCurProject.get()), \\\n ]\n for child in self.tree.get_children('approved'):\n vv = self.tree.item(child)['values']\n linesout.append(' <Translation Source=\"{}\" Target=\"{}\"/>'.format(vv[0], vv[1]))\n linesout.append(' </Dictionary>')\n linesout.append('</DictionarySet>')\n linesout.append('')\n\n if os.path.exists(fileout):\n os.remove(fileout)\n\n if fileout:\n output = codecs.open(fileout, mode='w', encoding='utf-8')\n output.write('\\n'.join(linesout))\n output.close()\n pass", "def generate_workflow(self) -> str:\n analysisTasks = self._parse_parameters()\n terminalTasks = self._identify_terminal_tasks(analysisTasks)\n\n ruleList = {k: SnakemakeRule(v, self._pythonPath)\n for k, v in analysisTasks.items()}\n\n workflowString = 'rule all: \\n\\tinput: ' + \\\n ','.join([ruleList[x].full_output()\n for x in terminalTasks]) + '\\n\\n'\n workflowString += '\\n'.join([x.as_string() for x in ruleList.values()])\n\n return self._dataSet.save_workflow(workflowString)", "def dump(self, stream):\n log.error('Cannot dump: %s', self.file_name)", "def workflow_descriptor(self, workflow: WorkflowObject) -> Dict:\n obj = {\n WORKFLOW_ID: workflow.workflow_id,\n WORKFLOW_NAME: workflow.name\n }\n if workflow.description is not None:\n obj[WORKFLOW_DESCRIPTION] = workflow.description\n if workflow.instructions is not None:\n obj[WORKFLOW_INSTRUCTIONS] = workflow.instructions\n return obj", "def _dumpConfiguration(self):\n print \"Writing configuration:\"\n print \" - title = \" + self.title\n print \" - executablePath = \" + self.exePath\n print \" - configPath = \" + self.configPath\n print \" - outputName = \" + self.outputName\n print \" - branch = \" + self.branch\n print \" - branchName = \" + self.branchName\n print \" - buildid = \" + self.buildid\n print \" - currentDate = \" + self.currentDate\n print \" - testDate = \" + self.testDate\n print \" - resultsServer = \" + self.resultsServer\n print \" - resultsLink = \" + self.resultsLink\n print \" - activeTests = \" + self.activeTests\n if self.symbolsPath:\n print \" - symbolsPath = \" + self.symbolsPath", "def xnat_workflow_info_complete(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info complete: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\tjson_item = json_items[0]\n\tjson_data_fields = json_item['data_fields']\n\n\tput_url = \"http://\" + args.server + \"/REST/workflows\"\n\t# workflow identifying information\n\tput_url += \"?wrk:workflowData/id=\" + json_data_fields['ID']\n \tput_url += \"&wrk:workflowData/pipeline_name=\" + json_data_fields['pipeline_name']\n\tput_url += \"&wrk:workflowData/launch_time=\" + json_data_fields['launch_time']\n\tput_url += \"&wrk:workflowData/data_type=\" + json_data_fields['data_type']\n\t# workflow information to be updated\n \tput_url += \"&wrk:workflowData/status=\" + \"Complete\"\n \tput_url += \"&wrk:workflowData/current_step_id=\" + \"-1\"\n\tput_url += \"&wrk:workflowData/step_description=\" + \"End\"\n\tput_url += \"&wrk:workflowData/percentageComplete=\" + \"100.0\"\n\tput_url += \"&wrk:workflowData/current_step_launch_time=\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\tput_url = put_url.replace(\" \", \"%20\");\n\n\tprint(\"xnat_workflow_info complete: put_url: \" + put_url)\n\n\tresponse = requests.put(put_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot update workflow\")\n\t\tprint(\"response.status_code: \" + str(response.status_code))\n\n\txnat_workflow_info_show(args)", "def write_output(word_dict):\n # create an empty output.txt file\n output = open('output.txt', 'w')\n\n for i in words_dict: \n output.write(i + \" : \" + str(words_dict[i]) + \"\\n\")", "def write(self, fd):\n indent = \" \"\n in2 = indent + indent\n print >>fd, \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n if self.__topComment is not None:\n print >>fd, \"<!--%s-->\" % self.__topComment\n print >>fd, \"<runConfig>\"\n for d in self.__domCfgList:\n print >>fd, d.xml(indent)\n for n in self.__domCfgNames:\n print >>fd, n.xml(indent)\n if self.__replayBaseDir is not None:\n print >>fd, \"%s<hubFiles baseDir=\\\"%s\\\">\" % \\\n (indent, self.__replayBaseDir)\n for r in self.__replayHubList:\n print >>fd, r.xml(in2)\n print >>fd, \"%s</hubFiles>\" % indent\n print >>fd, \"%s<triggerConfig>%s</triggerConfig>\" % \\\n (indent, self.__trigCfg)\n for c in self.__comps:\n if not c.isHub():\n print >>fd, \"%s<runComponent name=\\\"%s\\\"/>\" % \\\n (indent, c.name())\n\n if self.__strayStream is not None:\n (name, prescale) = self.__strayStream\n in3 = in2 + indent\n\n print >>fd, \"%s<stream name=\\\"%s\\\">\" % (in2, name)\n print >>fd, \"%s<prescale>%d</prescale>\" % (in3, prescale)\n print >>fd, \"%s</stream>\" % in2\n\n if self.__senderOption is not None:\n (hub, fwdIsolatedHits) = self.__senderOption\n fwdName = \"forwardIsolatedHitsToTrigger\"\n if fwdIsolatedHits:\n fwdVal = \"true\"\n else:\n fwdVal = \"false\"\n\n in3 = in2 + indent\n in4 = in3 + indent\n\n print >>fd, \"%s<stringHub hubId=\\\"%d\\\">\" % (in2, hub)\n print >>fd, \"%s<sender>\" % in3\n print >>fd, \"%s<%s>%s</%s>\" % (in4, fwdName, fwdVal, fwdName)\n print >>fd, \"%s</sender>\" % in3\n print >>fd, \"%s</stringHub>\" % in2\n\n print >>fd, \"</runConfig>\"", "def _write_system(top_file: IO, molecule_map: Dict):\n top_file.write(\"[ system ]\\n\")\n top_file.write(\"; name \\n\")\n top_file.write(\"System name\\n\\n\")\n\n top_file.write(\"[ molecules ]\\n\")\n top_file.write(\"; Compound\\tnmols\\n\")\n for (\n mol_name,\n mol_data,\n ) in molecule_map.items():\n n_mols = mol_data[\"n_mols\"]\n top_file.write(f\"{mol_name}\\t{n_mols}\\n\")\n\n top_file.write(\"\\n\")", "def save_meta_file(gen_dict, f_name):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n filename = run_id+'_'+ f_name +'.meta'\r\n f = open(os.path.join(unique_op_dir, filename),'a')\r\n print('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n logger.info('Output stored in %s'%(str(os.path.join(unique_op_dir, filename))))\r\n for key, val in gen_dict.items():\r\n line = str(key)+\" : \"+str(val)+\"\\n\"\r\n f.write(line)", "def write(self):\n raise NotImplementedError", "def write(self, stream, root, order):\n json.dump(root, stream, indent=2)\n stream.write('\\n')", "def write_model_info(content=None):\n _info_dir = os.path.join(CKPT_DIR, UNIQUE_NAME)\n create_dir(_info_dir)\n if content is None:\n content = f\"Backbone: {BACKBONE}\\nLR: {LEARNING_RATE}\\n\" \\\n f\"Resolution: {IMAGE_SIZE}\\nAugmentations: {AUG_PROBABILITY}\"\n\n with open(os.path.join(_info_dir, 'info.txt'), 'a') as fp:\n fp.write(content + '\\n')", "def Write(self):\n template_mappings = {\n 'pypi_token': self._project_definition.pypi_token or ''}\n\n file_content = []\n\n template_data = self._GenerateFromTemplate('environment', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'pypi_token', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('matrix', template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('install', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name != 'l2tdevtools':\n template_data = self._GenerateFromTemplate(\n 'install_l2tdevtools', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name in self._PROJECTS_WITHOUT_BUILD:\n template_filename = 'build_off'\n else:\n template_filename = 'build'\n\n template_data = self._GenerateFromTemplate(\n template_filename, template_mappings)\n file_content.append(template_data)\n\n template_data = self._GenerateFromTemplate('test_script', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.name not in self._PROJECTS_WITHOUT_BUILD:\n template_data = self._GenerateFromTemplate('artifacts', template_mappings)\n file_content.append(template_data)\n\n if self._project_definition.pypi_token:\n template_data = self._GenerateFromTemplate(\n 'deploy_script', template_mappings)\n file_content.append(template_data)\n\n file_content = ''.join(file_content)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)", "def saveMetadata(self):\n pickle.dump(self.metadata, open(self.metadataFile, 'w+b'))", "def write(self, fp, **kwds):\n json.dump(self._dict, fp)", "def to_file(self, file_io):\n pickle.dump(self.__object, file_io)", "def _dump_states(self, train=True):\n\t\tprefix = self.config.experiment_dir_name+\"/datastreams/\"\n\t\ttry:\n\t\t\tos.mkdir(prefix)\n\t\texcept:\n\t\t\tpass\n\n\t\tprefix += \"{}.pickle\"\n\n\t\tif train:\n\t\t\tself.train_accuracy.dump(prefix.format(\"train_accuracy\"))\n\t\t\tself.train_epochs.dump(prefix.format(\"train_epochs\"))\n\t\t\tself.train_loss.dump(prefix.format(\"train_loss\"))\n\t\t\tself.train_confusion_matrix.dump(prefix.format(\"train_confusion_matrix\"))\n\t\t\tself.learning_rate.dump(prefix.format(\"learning_rate\"))\n\t\telse:\n\t\t\tself.val_accuracy.dump(prefix.format(\"val_accuracy\"))\n\t\t\tself.val_epochs.dump(prefix.format(\"val_epochs\"))\n\t\t\tself.val_loss.dump(prefix.format(\"val_loss\"))\n\t\t\tself.val_confusion_matrix.dump(prefix.format(\"val_confusion_matrix\"))\n\t\t\"\"\"\n\t\tSave dataset specific metadata into experiment dir\n\t\t\"\"\"\n\t\t# TODO: Redo this with more information from dataset meta.json file\n\t\tmeta_path = self.config.experiment_dir_name+\"/meta.json\"\n\t\t_meta = {}\n\t\t_meta[\"classes\"] = self.classes\n\t\t_meta[\"plot_platform\"] = self.config.plot_platform\n\t\t# _meta[\"dataset_dir\"] = self.dataset.dataset_folder\n\t\tif not os.path.exists(meta_path):\n\t\t\tfp = open(meta_path, \"w\")\n\t\t\tfp.write(json.dumps(_meta))\n\t\t\tfp.close()", "def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)", "def _save_target_info(self):\n \n #make sure the file exists\n path = self.communicator.image_store.project_path + \\\n self.target_file_name\n fout = open(path, 'w')\n\n print str(1)\n print str(len(self.target_list)-1)\n for i in range(1, len(self.target_list)):\n fout.write(self.target_list[i].format_info())\n fout.write(\"\\n\\n\")\n fout.close()", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def put(self) -> Dict:\n\n body = {\n \"input_schema\": self.input_schema,\n \"output_schema\": self.output_schema,\n }\n if self._dp_definition.get(\"parameter_schema\"):\n body[\"parameter_schema\"] = {\"params\": self.parameter_schema}\n\n name = self.name if self.name else self.workflow_fn.__name__\n\n # TODO: Description not supported by nacelle\n # if description is not None:\n # body_json[\"description\"] = description\n qualified_name = f\"{self.prefix}.{name}\"\n\n # TODO: Parse response fields: uuid, created, protocol, defaultAppParams, defaultAppMetrics\n response = self._client.update_workflow(workflow_name=qualified_name, body=body)\n assert \"uuid\" in response\n\n return response", "def write_dag(self, out=sys.stdout):\n for rec in sorted(self.values()):\n print(rec, file=out)", "def _write_data_out(solutions, unable_to_resolve, unresolvables):\n print('')\n print('------------------------')\n print('--- Progress So Far: ---')\n print('Solved: ' + str(len(solutions)))\n print('Error while resolving: ' + str(len(unable_to_resolve)))\n print('Unresolvable conflicts: ' + str(len(unresolvables)))\n print('Saving progress to json.')\n print('------------------------')\n print('')\n json.dump(solutions, open(fname_solutions, 'w'))\n json.dump(unable_to_resolve, open(fname_errors, 'w'))\n json.dump(unresolvables, open(fname_unresolvables, 'w'))", "def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)", "def printResults(self):\n if self.statiFile and len(self.printOrder) > 0:\n try:\n file = open(abspath(self.statiFile), \"w\")\n except Exception as e:\n raise ErrorOutput(e)\n else:\n for stat in self.printOrder:\n if stat == Stati.INST:\n file.write(str(self.instCount)+\"\\n\")\n elif stat == Stati.VAR:\n var_count = self.countVars()\n file.write(str(var_count) + \"\\n\")\n file.close()" ]
[ "0.60342133", "0.60284674", "0.58762455", "0.5604245", "0.55985683", "0.5569691", "0.55344164", "0.5529275", "0.55062336", "0.5501112", "0.545062", "0.5405541", "0.5363541", "0.5357599", "0.53369945", "0.5332138", "0.52984995", "0.52809745", "0.527305", "0.52714384", "0.52711064", "0.52611077", "0.5217401", "0.5215859", "0.52053964", "0.5195724", "0.5174621", "0.5159047", "0.5156295", "0.5149215", "0.5148757", "0.51449615", "0.5141974", "0.51404965", "0.51216775", "0.51205194", "0.51160634", "0.51067215", "0.5103673", "0.5096301", "0.5085438", "0.5083505", "0.5083424", "0.5078536", "0.5070762", "0.5058028", "0.50353706", "0.50353706", "0.5029329", "0.5020789", "0.50129074", "0.49885067", "0.49876446", "0.4985773", "0.49753684", "0.4972626", "0.49705178", "0.49598473", "0.49570143", "0.49553752", "0.4955024", "0.49521464", "0.49462068", "0.49225166", "0.49208233", "0.49129418", "0.49128592", "0.49055377", "0.48999527", "0.48990673", "0.4895871", "0.48940203", "0.48887283", "0.48886973", "0.48855153", "0.48839784", "0.48734766", "0.48710245", "0.48671076", "0.48651987", "0.48583704", "0.48534536", "0.48486128", "0.48403338", "0.4833721", "0.48318434", "0.48313895", "0.4826738", "0.4824398", "0.48181885", "0.4805608", "0.4801599", "0.4798992", "0.47962126", "0.47956127", "0.4792323", "0.47881618", "0.47872463", "0.47841033", "0.47817343" ]
0.48797187
76
Read CSV from the given input stream and return a workflow info dict.
def __call__(input_stream, config_variant=u""):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self, stream):\n rows = [row for row in csv.reader(stream)]\n if args.headings and rows:\n \"\"\"\n Turn the list of lists into a list of dictionaries making use of the heading row\n \"\"\"\n ret = []\n order = rows[0]\n for row in rows[1:]:\n ret.append({name: row[pos] for (pos, name) in enumerate(order)})\n else:\n ret = rows\n order = []\n self.validate(ret)\n return (ret, order)", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def read_instream(instream):\n # If you need to read a csv, create a DataFrame, or whatever it might be,\n # do it here.\n return instream.read()", "def extract(input):\n reader = csv.DictReader(input)\n return reader", "def CSVReader(self, input_file):\n f = open(input_file, 'r')\n reader = csv.reader(f)\n headers = reader.next()\n reader = csv.DictReader(f, headers)\n return reader", "def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}", "def parse_csv_input_file(input_file):\n with open(input_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n dict = {i: x for i, x in item.items()}\n yield(dict)", "def read_records_from_input(self, input_stream: BinaryIO) -> Iterator[dict]:\n\n self.reader = parse_xml(input_stream).getroot()\n\n for record in self.reader:\n yield {attribute.tag: attribute.text for attribute in record}", "def _enumerate_csv(self, csv_input):\n csv_file = open(csv_input, 'rb') \n csv_reader = csv.reader(csv_file)\n next(csv_reader, None)\n for row in reader:\n yield row", "def _read_input_csv(in_file):\n with open(in_file, \"rU\") as in_handle:\n reader = csv.reader(in_handle)\n reader.next() # header\n for line in reader:\n if line: # empty lines\n (fc_id, lane, sample_id, genome, barcode) = line[:5]\n yield fc_id, lane, sample_id, genome, barcode", "def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]", "def read_csvfile(inputfn):\n with open(inputfn, 'rU') as fd:\n datastruct = gen_csv_data(fd, returntype='list') # Make sure to store as list before closing file.\n return datastruct", "def load_csv(input):\n with open(input['csv'], 'r', encoding=input['encoding']) as f:\n invoice_dict = dict()\n reader = csv.reader(f, delimiter=';')\n\n for row in reader:\n invoice_id = row[0]\n\n if invoice_id in invoice_dict:\n invoice_dict[invoice_id].add_entry(row[1:])\n else:\n invoice_dict[invoice_id] = Invoice(row)\n\n return invoice_dict", "def read_csv():", "def loadCSV(input_file):", "def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines with data\n for input_line in data:\n record = {}\n for i in range(len(header)):\n record[header[i]] = input_line[i]\n output.append(record)\n return output", "def load_csv(input_filename_state):\n dataset = {}\n with open(input_filename_state) as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n location_col = -1\n week_ahead_col = -1\n quantile_col = -1\n value_col = -1 \n\n\n for i in range(len(header)):\n if header[i] == \"place\":\n location_col = i\n elif header[i] == \"week_ahead\":\n week_ahead_col = i\n elif header[i] == \"quantile\":\n quantile_col = i \n elif header[i] == \"value\":\n value_col = i\n \n for row in reader:\n state = row[location_col]\n\n # Skip the state if it is not listed in reichlab's state list.\n if state not in STATE_ID_MAPPING:\n continue\n state_id = STATE_ID_MAPPING[state]\n week_ahead = int(row[week_ahead_col])\n quantile = row[quantile_col]\n val = max(float(row[value_col]), 0)\n if week_ahead not in dataset:\n dataset[week_ahead] = {}\n if state_id not in dataset[week_ahead]:\n dataset[week_ahead][state_id] = {}\n dataset[week_ahead][state_id][quantile] = val\n return dataset", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def parse(self, employees, records, stream):\n # make a CSV reader from {stream}\n reader = csv.reader(stream)\n # skip the first three lines\n next(reader)\n next(reader)\n next(reader)\n # the next line is the first employee record\n line = next(reader)\n # start parsing\n while line:\n # get the zeroth field\n header = line[0]\n # check that it is an employee section\n assert header.startswith('Employee: ')\n # extract the useful info\n name = header[9:].strip()\n # pull the employee record\n line = self.getEmployeeRecord(\n employees=employees, records=records, name=name, reader=reader)\n\n # all done\n return", "def load(self):\n try:\n with open(\"protocol.csv\", mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=\"|\")\n\n for index, row in enumerate(csv_reader):\n if index != \"0\":\n if row[\"type\"] == \"Information\":\n item = Information(row[\"title\"], row[\"desc\"], row[\"given_by\"])\n if row[\"type\"] == \"Decision\":\n item = Decision(row[\"title\"], row[\"desc\"], row[\"result\"])\n if row[\"type\"] == \"Task\":\n item = Task(row[\"title\"], row[\"desc\"], row[\"owner\"], row[\"priority\"], row[\"due\"])\n item.creation_date = row[\"creation_date\"]\n item.creation_time = row[\"creation_time\"]\n self.add_item(item)\n except IOError:\n # create the file if it doesn't exist yet\n csv_file = open(\"protocol.csv\", \"w+\")\n csv_file.close()", "def load(csv_stream, strip_spaces=True, skip_blank_lines=True,\n encoding=\"utf-8\", delimiter=\",\", force_unique_col_names=False):\n def _force_unique(col_headers):\n seen_names = set()\n unique_col_headers = list()\n for i, col_name in enumerate(col_headers):\n if col_name in seen_names:\n col_name += \"_%s\" % i\n seen_names.add(col_name)\n unique_col_headers.append(col_name)\n return unique_col_headers\n\n def _pad_row(row):\n if len(row) < num_cols:\n for i in range(num_cols - len(row)):\n row.append('')\n return row\n\n def _process_row(row):\n if strip_spaces:\n return _pad_row([value.strip() for value in row])\n else:\n return _pad_row(row)\n\n csv_reader = csv.reader(csv_stream, delimiter=delimiter)\n\n column_headers = [header.strip() for header in csv_reader.next()]\n if force_unique_col_names:\n column_headers = _force_unique(column_headers)\n num_cols = len(column_headers)\n\n # Make a list to gather entries for each column in the data file...\n raw_text_cols = [list() for i in range(num_cols)]\n for row in csv_reader:\n processed_row = _process_row(row)\n # Add this new row if we either allow blank lines or if any field\n # in the line is not blank. We do this to the processed row,\n # because spaces may or may not be significant, depending on\n # whether strip_spaces is True.\n if (not skip_blank_lines) or any(processed_row):\n for i in range(num_cols):\n raw_text_cols[i].append(processed_row[i].decode(encoding))\n\n # Now take the raw data and put it into our Column...\n cols = [Column(raw_col) for raw_col in raw_text_cols]\n\n return Document(zip(column_headers, cols))", "def _read_csv(self, input_file, quotechar=None):\n with codecs.open(input_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f, delimiter=\",\", quotechar=quotechar)\n examples = []\n seq_id = 0\n header = next(reader) # skip header\n for line in reader:\n example = InputExample(\n guid=seq_id, label=line[0], text_a=line[1])\n seq_id += 1\n examples.append(example)\n return examples", "def read_csv(\n type: CSVTypes,\n csv_file: UploadFile = File(...),\n db: Session = Depends(get_db),\n authorization: str = Header(None),\n settings: config.Settings = Depends(get_settings),\n):\n if authorization != settings.upload_secret:\n raise HTTPException(401, \"Operação inválida!\")\n\n lines = 0\n\n with csv_file.file as file:\n content = file.read()\n content = content.decode(\"utf-8\")\n content = content.split(\"\\n\")\n if type == CSVTypes.results:\n lines = len(import_results_csv(content, db))\n elif type == CSVTypes.templates_results:\n lines = len(import_templates_results_csv(content, db))\n elif type == CSVTypes.hospitals:\n lines = len(import_hospitals_csv(content, db))\n else:\n raise HTTPException(400)\n\n log(\"[CSV] CSV foi importado.\", db)\n\n return {\"lines\": lines}", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def from_stream(cls, stream, sep=\",\"):\n if not stream:\n raise Exception(\"No data available. Please provide a filepath or a piped input for the connectivity matrix.\")\n\n connectivity_matrix = pd.read_csv(stream, sep=sep, index_col=0)\n\n if connectivity_matrix.values.shape[0] != connectivity_matrix.values.shape[1]:\n raise Exception(\"The connectivity matrix must be square. Current shape: (%d, %d)\" % connectivity_matrix.values.shape)\n\n return cls(connectivity_matrix)", "def load_from_dict(input_dict):\n return WorkflowStepInformation(input_dict)", "def ReadPreprocessingInformation(self, knowledge_base):\n for stream_number in range(1, self._last_preprocess):\n stream_name = 'preprocess.{0:06d}'.format(stream_number)\n if not self._HasStream(stream_name):\n raise IOError('No such stream: {0:s}'.format(stream_name))\n\n data_stream = _SerializedDataStream(\n self._zipfile, self._temporary_path, stream_name)\n\n system_configuration = self._ReadAttributeContainerFromStreamEntry(\n data_stream, 'preprocess')\n\n # TODO: replace stream_number by session_identifier.\n knowledge_base.ReadSystemConfigurationArtifact(\n system_configuration, session_identifier=stream_number)", "def get_reader(fname):\n\n if fname == \"-\":\n fh = sys.stdin\n else:\n fh = open(fname, \"r\")\n \n rdr = csv.reader(fh, dialect=\"psv\")\n return (rdr, fh)", "def read_stream(stream_name, input_stream, io_q):\n if not input_stream:\n io_q.put((stream_name, \"EXIT\"))\n return\n for line in input_stream:\n io_q.put((stream_name, line))\n if not input_stream.closed:\n input_stream.close()\n io_q.put((stream_name, \"EXIT\"))", "def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult", "def _read_input(self, in_file):\n result = {}\n with open(in_file, \"r\") as f:\n reader = csv.DictReader(f, delimiter=str(\"\\t\"))\n for row in reader:\n result[row[\"accession\"]] = {\n \"transcript_sequence\": row[\"transcript_sequence\"],\n \"cds_start_i\": int(row[\"cds_start_i\"]),\n \"cds_end_i\": int(row[\"cds_end_i\"]),\n }\n\n return result", "def read_csv(product_name=str, directory=DIRS['EOIR_DATA_DIR']):\n filename = ('%s.csv' % product_name)\n path = get_dir(os.path.join(directory, filename))\n with io.open(path, mode='r', encoding='utf-8-sig') as f:\n spec_dict = {}\n filtered = (line.replace(\"\\n\", '') for line in f) # Removes \\n from the created as a byproduct of encoding\n for line in filtered:\n field, value = line.split(',')\n if has_number(value) and value.find('\"') == -1:\n if value.find('x') != -1:\n if value.find('.') != -1:\n value = [float(i) for i in value.split('x')]\n else:\n value = [int(i) for i in value.split('x')]\n else:\n value = float(value)\n else:\n value = value.replace('\"', '')\n if value.find('/') != -1:\n value = [str(i) for i in value.split('/')]\n elif (value.lower()).find('true') != -1:\n value = True\n elif (value.lower()).find('false') != -1:\n value = False\n else:\n value = str(value)\n spec_dict['%s' % str(field)] = value\n f.close()\n return spec_dict", "def from_csv(self, csvfile, encoding='utf-8', dialect='excel', **kwds):\n if isinstance(csvfile, string_types):\n encoding = _normalize_decoder(encoding)\n reader, close_file = _from_csv_path(csvfile, encoding, dialect=dialect, **kwds)\n return Reader(reader, closefunc=close_file)\n\n reader = _from_csv_iterable(csvfile, encoding, dialect=dialect, **kwds)\n return Reader(reader)", "def _read_csv_meta(cls, fd: typing.TextIO):\n yaml_header = \"\"\n last_pos = fd.tell()\n line = fd.readline()\n while line.startswith(\"#\"):\n # remove leading comment and whitespace\n yaml_header += line[1:].lstrip()\n last_pos = fd.tell()\n line = fd.readline()\n fd.seek(last_pos)\n meta_data = sy.load(yaml_header, schema=cls._strictyaml_metadata_schema).data\n\n return meta_data, yaml_header.count(\"\\n\")", "def read(inputstream):\n current = None\n mapping = defaultdict(list)\n current_partitions = None\n for line in inputstream:\n # capacity = 2\n # name = 3\n line_parts = line.split()\n capacity = line_parts[2]\n name = line_parts[3]\n\n if len(name) == 3:\n current = capacity\n current_partitions = []\n mapping[current].append(current_partitions)\n else:\n current_partitions.append((capacity, name))\n return mapping", "def reader(fname, sd):\n with open(fname, 'rb') as f:\n rdr = csv.reader(f)\n hdr = None\n for l in rdr:\n # header has not been found\n if not hdr:\n # for each field defined in the semantic dictionary,\n # search for one of the aliases to be present in the line\n x = {k: _find_alias(l,sd[k]) for k in sd}\n # have we found a header? essentially: have we found a\n # match for one of the aliases of each mandatory field?\n if all([x[k] is not None for k in x if k[1]]):\n hdr = x\n continue\n # header has been found\n else:\n # check of one or more mandatory columns are missing?\n if any([_silent_get(l,hdr[k]) is '' for k in hdr if k[1]]):\n continue\n # yields a dictionary with field identifier as keys\n yield {k: l[hdr[k]] for k in hdr if hdr[k] is not None}", "async def prepare_input(self) -> Status:\n\n with open(self.input_file) as fin:\n reader = csv.DictReader(fin)\n fieldnames = reader.fieldnames\n if fieldnames is None:\n raise ValueError(\"fieldnames is None from csv reader\")\n for col in self.game.input_columns[self.player.role]:\n # Don't look for a literal column labeled \"features\"\n if col != InputColumn.features and str(col) not in fieldnames:\n raise MPCStartupError(f\"{col} column required in input CSV\")\n\n return Status.OK", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def _read_csv(input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f)\n lines = []\n for line in reader:\n lines.append(line)\n return lines[1:] # remove header", "def gen_csv_data(inputfilehandle, returntype='generator'):\n # First do some sniffing (I expect input smmc file to have headers!)\n snif = csv.Sniffer()\n csvdialect = snif.sniff(inputfilehandle.read(4048)) # The read _must_ encompass a full first line.\n csvdialect.lineterminator = '\\n' # Ensure correct line terminator (\\r\\n is just silly...)\n inputfilehandle.seek(0) # Reset file\n # Then, extract dataset:\n setreader = csv.DictReader(inputfilehandle, dialect=csvdialect)\n # Import data\n # Note: Dataset is a list of dicts.\n if returntype == 'list':\n return [row for row in setreader if len(row)>0]\n elif returntype == 'csvreader':\n return setreader\n else:\n return (row for row in setreader if len(row)>0)", "def upload_person_task_csv(stream):\n\n result = []\n reader = csv.DictReader(stream)\n empty_fields = set()\n\n for row in reader:\n # skip empty lines in the CSV\n if not any(row.values()):\n continue\n\n entry = {}\n for col in Person.PERSON_UPLOAD_FIELDS:\n try:\n entry[col] = row[col].strip()\n except (KeyError, IndexError, AttributeError):\n # either `col` is not in `entry`, or not in `row`, or\n # `.strip()` doesn't work (e.g. `row[col]` gives `None` instead\n # of string)\n entry[col] = None\n empty_fields.add(col)\n\n for col in Person.PERSON_TASK_EXTRA_FIELDS:\n entry[col] = row.get(col, None)\n entry['errors'] = None\n\n result.append(entry)\n\n return result, list(empty_fields)", "def read_into_dictionary(input_file):\n logger.debug(\"%s %s (%s)...\" % (LOG_INDENT, inspect.stack()[0][3], input_file))\n\n input_file_suffix = (pathlib.Path(input_file).suffix)\n ret_dict = {}\n if input_file_suffix == '.csv':\n logger.debug(\"%s opening file [%s]\" % (LOG_INDENT,input_file))\n reader = csv.reader(open(input_file, 'r'))\n for row in reader:\n # read in and strip of comments / blank lines etc..\n variable_name = row[0].strip()\n variable_value = row[1].strip()\n if not variable_name:\n continue\n if variable_name.startswith('#') or variable_value.startswith('#'):\n continue\n logger.debug(\"%s %s=%s\" % (LOG_INDENT,variable_name,variable_value))\n # save in dictionary\n ret_dict[variable_name] = variable_value\n return ret_dict", "def read_from_csv(self, input_file, delimiter):\n\n # read CSV as UTF-8 encoded file (see also http://stackoverflow.com/a/844443)\n with codecs.open(input_file, encoding='utf8') as fp:\n logger.info(\"Reading search results from \" + input_file + \"...\")\n\n reader = csv.reader(fp, delimiter=delimiter)\n\n # read header\n header = next(reader, None)\n if not header:\n raise IllegalArgumentError(\"Missing header in CSV file.\")\n\n query_index = header.index(\"query\")\n rank_index = header.index(\"rank\")\n url_index = header.index(\"url\")\n title_index = header.index(\"title\")\n snippet_index = header.index(\"snippet\")\n\n # read CSV file\n for row in reader:\n if row:\n self.values.append(\n SearchResult(row[query_index], row[rank_index],\n row[url_index], row[title_index], row[snippet_index])\n )\n else:\n raise IllegalArgumentError(\"Wrong CSV format.\")\n\n self.filename = os.path.basename(input_file)\n logger.info(str(len(self.values)) + \" search results have been imported.\")", "def csv_dict_reader(file_obj):\n #import re\n #file = open(file_obj)\n\n # reader = csv.DictReader(file_obj)\n # for line in reader:\n # print(line[\"Name\"])", "def get_context_from_csv(self):\n if re.search('^(http|https)://', self.project.CONTEXT_SOURCE_FILE):\n data = requests.get(self.project.CONTEXT_SOURCE_FILE)\n reader = csv.reader(\n data.iter_lines(), delimiter=',', quotechar='\"')\n ret = {rows[0]: rows[1] for rows in reader}\n else:\n try:\n with open(self.project.CONTEXT_SOURCE_FILE) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n ret = {rows[0]: rows[1] for rows in reader}\n except IOError:\n file = \"%s/%s\" % (\n os.path.abspath(self.path),\n self.project.CONTEXT_SOURCE_FILE)\n with open(file) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n ret = {rows[0]: rows[1] for rows in reader}\n ret.update({\n \"CONTEXT_SOURCE_FILE\": self.project.CONTEXT_SOURCE_FILE,\n })\n return ret", "def read_csv_file(input_csv_file_path):\n with open(input_csv_file_path, 'r', encoding='utf-8') as file_path:\n csv_reader = csv.reader(file_path)\n for row in itertools.islice(csv_reader, 1, None):\n yield (\n int(row[0]), row[2],\n datetime.datetime.strftime(\n datetime.datetime.strptime(row[-1], '%m/%d/%y'),\n '%Y-%m-%d'))", "def reader_for_streaming(io):\n if not hasattr(io, 'read'):\n raise TypeError('{0} must be an opened file.'.format(io))\n if hasattr(io, 'encoding'):\n raise TypeError('{0} must be opened in binary mode'.format(io))\n return reader.Reader.read_headers(io)", "def read_csv_file(self):\n pass", "def read_csv(self):\n with open(self.csv_file, 'rU') as file_object:\n reader = csv.reader(file_object, delimiter=self.delimiter)\n if self.has_header_row:\n header_row = next(reader, None)\n if self.has_duplicate_column_names:\n header_counts_dict = dict()\n new_header_row = []\n for each_header in header_row:\n try:\n header_counts_dict[each_header] += 1\n except KeyError:\n header_counts_dict[each_header] = 1\n frequency = header_counts_dict[each_header]\n if frequency==1:\n new_header_row.append(each_header)\n else:\n new_header_row.append(each_header+str(frequency))\n header_row = new_header_row\n else:\n header_row = self.provided_headers_list\n rows = [\n { header: value for header, value in zip(header_row, next_row)}\n for next_row in reader\n ]\n return header_row, rows", "def getInput():\n\tparser = OptionParser()\n\tparser.add_option('-i', '--input', dest='filepath')\n\tparser.add_option('-f', '--format', dest='dnldFormat', default='djvu')\n\tparser.add_option('-u', '--utility', dest='utility', default='aria2')\n\t(option, args) = parser.parse_args()\n\n\tif not option.filepath:\n\t\treturn parser.error('CSV file path not given, use --input=\"path.to.csv.file.for.download\"')\n\n\treturn {'src': option.filepath, 'format': option.dnldFormat, 'utility': option.utility}", "def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])", "def svevent_reader(in_file):\n with open(in_file) as in_handle:\n while 1:\n line = in_handle.next()\n if line.startswith(\">\"):\n break\n header = line[1:].rstrip().split(\"\\t\")\n reader = csv.reader(in_handle, dialect=\"excel-tab\")\n for parts in reader:\n out = {}\n for h, p in zip(header, parts):\n out[h] = p\n yield out", "def csv_iterator(f_csv, clean=True, _PARALLEL=False):\n\n with open(f_csv, encoding=\"utf-8\") as FIN:\n CSV = csv.DictReader(FIN)\n\n for row in CSV:\n yield row", "def from_csv(\n location : str,\n label_col : Union[None,str,int] = None,\n md5_checksum: Optional[str] = None,\n csv_reader : Callable[[Iterable[str]], Iterable[Sequence[str]]] = csv.reader, #type: ignore #pylance complains\n has_header : bool = True,\n default_meta: FullMeta = FullMeta(),\n defined_meta: Dict[Any,PartMeta] = {}) -> 'ClassificationSimulation[Context]':\n\n source: Source[Iterable[str]]\n\n if not location.lower().startswith('http'):\n source = DiskSource(location)\n else: \n source = HttpSource(location, \".csv\", md5_checksum, 'data')\n\n csv_rows = list(csv_reader(source.read()))\n\n with ExecutionContext.Logger.log('encoding data... '):\n return ClassificationSimulation.from_table(csv_rows, label_col, has_header, default_meta, defined_meta)", "def csv_serving_input_fn():\n csv_row = tf.placeholder(\n shape=[None],\n dtype=tf.string\n )\n features = parse_csv(csv_row)\n features.pop(LABEL_COLUMN)\n return tf.contrib.learn.InputFnOps(features, None, {'csv_row': csv_row})", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def load(datastream):", "def __read_csv(self) -> tuple:\n with open(self.csv_file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0].isspace():\n raise StopIteration\n yield row", "def extract_workflow_data(workflow):\n workflow_data = {}\n workflow_data[\"id\"] = workflow.id\n workflow_data['name'] = workflow.name\n workflow_data['created_at'] = workflow.created_at\n workflow_data['updated_at'] = workflow.updated_at\n workflow_data[\"state\"] = workflow.state\n return workflow_data", "def parse(self, filename: str, input_format='csv', **kwargs):\n if 'delimiter' not in kwargs:\n kwargs['delimiter'] = self._extention_types[input_format]\n if filename.endswith('.tar'):\n with tarfile.open(filename) as tar:\n for member in tar.getmembers():\n f = tar.extractfile(member)\n df = pd.read_csv(f, comment='#', **kwargs) # type: pd.DataFrame\n if member.name == 'nodes.csv':\n self.load_nodes(df)\n elif member.name == 'edges.csv':\n self.load_edges(df)\n else:\n raise Exception('Tar file contains unrecognized member {}'.format(member.name))\n else:\n df = pd.read_csv(filename, comment='#', **kwargs) # type: pd.DataFrame\n self.load(df)", "def read_csv(self, inputfile):\n d = csv.reader(inputfile)\n for row in d.read():\n self.translations[row[0]] = row[1]", "def job_reader(path) -> Generator[ParsedActionType, None, None]:\n try:\n with open(path, \"r\") as f:\n parser = Parser()\n for line in f:\n result = parser.process_line(line)\n if result is not None:\n yield result\n except IOError as err:\n print(\"Error opening/reading from file '{0}': {1}\"\n .format(err.filename, err.strerror))", "def _OpenRead(self):\n has_storage_metadata = self._ReadStorageMetadata()\n if not has_storage_metadata:\n # TODO: remove serializer.txt stream support in favor\n # of storage metadata.\n if self._read_only:\n logging.warning('Storage file does not contain a metadata stream.')\n\n stored_serialization_format = self._ReadSerializerStream()\n if stored_serialization_format:\n self.serialization_format = stored_serialization_format\n\n if self.serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n raise IOError('Unsupported serialization format: {0:s}'.format(\n self.serialization_format))\n\n self._serializer = json_serializer.JSONAttributeContainerSerializer\n\n for container_type, stream_name_prefix in (\n self._STREAM_NAME_PREFIXES.items()):\n stream_name_prefix = '{0:s}_data.'.format(stream_name_prefix)\n self._last_stream_numbers[container_type] = self._GetLastStreamNumber(\n stream_name_prefix)\n\n self._analysis_report_stream_number = self._GetLastStreamNumber(\n 'analysis_report_data.')\n self._last_preprocess = self._GetLastStreamNumber('preprocess.')\n\n last_session_start = self._GetLastStreamNumber('session_start.')\n last_session_completion = self._GetLastStreamNumber('session_completion.')\n\n # TODO: handle open sessions.\n if last_session_start != last_session_completion:\n logging.warning('Detected unclosed session.')\n\n self._last_session = last_session_completion\n\n last_task_start = self._GetLastStreamNumber('task_start.')\n last_task_completion = self._GetLastStreamNumber('task_completion.')\n\n # TODO: handle open tasks.\n if last_task_start != last_task_completion:\n logging.warning('Detected unclosed task.')\n\n self._last_task = last_task_completion", "def import_input_set(name, csv_fn, my_session):\n input_set = InputSet(name=name)\n n_added_input = 0\n with open(csv_fn, 'r') as csv_fh:\n for ln in csv_fh:\n ln = ln.rstrip()\n if len(ln) == 0:\n continue\n toks = ln.split(',')\n if len(toks) != 9:\n raise ValueError('Line did not have 9 tokens: \"%s\"' % ln)\n toks = list(map(lambda x: None if x == 'NA' else x, toks))\n acc_r, acc_s, url_1, url_2, url_3, checksum_1, checksum_2, checksum_3, retrieval_method = toks\n input = Input(acc_r=acc_r, acc_s=acc_s, url_1=url_1, url_2=url_2, url_3=url_3,\n checksum_1=checksum_1, checksum_2=checksum_2, checksum_3=checksum_3,\n retrieval_method=retrieval_method)\n my_session.add(input)\n input_set.inputs.append(input)\n n_added_input += 1\n log.info('Imported %d items from input set' % len(input_set.inputs), 'input.py')\n my_session.add(input_set)\n my_session.commit()\n return input_set.id, n_added_input", "def get_csv():\r\n csv_response = requests.get(CSV_URL).content.decode(\"utf-8\")\r\n\r\n reader = csv.DictReader(csv_response.splitlines())\r\n\r\n return reader", "def read_partition_csv(source):\n partition = {}\n # Load in the Partitions from the CSV\n with open(source, mode='r') as partitions_csv:\n csv_reader = csv.DictReader(partitions_csv)\n for row in csv_reader:\n dataset_as_string = row[\"Dataset\"] # Returns Row as a String\n partition[row[\"Partition\"]] = dataset_as_string[2:-2].split(\"', '\")\n \n return partition", "def get_csv_data(self):\n encoding = self.options.get(\n 'encoding', self.state.document.settings.input_encoding)\n error_handler = self.state.document.settings.input_encoding_error_handler\n if self.content:\n # CSV data is from directive content.\n if 'file' in self.options or 'url' in self.options:\n error = self.state_machine.reporter.error(\n '\"%s\" directive may not both specify an external file and'\n ' have content.' % self.name, nodes.literal_block(\n self.block_text, self.block_text), line=self.lineno)\n raise SystemMessagePropagation(error)\n source = self.content.source(0)\n csv_data = self.content\n elif 'file' in self.options:\n # CSV data is from an external file.\n if 'url' in self.options:\n error = self.state_machine.reporter.error(\n 'The \"file\" and \"url\" options may not be simultaneously'\n ' specified for the \"%s\" directive.' % self.name,\n nodes.literal_block(self.block_text, self.block_text),\n line=self.lineno)\n raise SystemMessagePropagation(error)\n source_dir = os.path.dirname(\n os.path.abspath(self.state.document.current_source))\n source = os.path.normpath(os.path.join(source_dir,\n self.options['file']))\n source = utils.relative_path(None, source)\n try:\n self.state.document.settings.record_dependencies.add(source)\n csv_file = io.FileInput(source_path=source,\n encoding=encoding,\n error_handler=error_handler)\n csv_data = csv_file.read().splitlines()\n except IOError, error:\n severe = self.state_machine.reporter.severe(\n u'Problems with \"%s\" directive path:\\n%s.'\n % (self.name, SafeString(error)),\n nodes.literal_block(self.block_text, self.block_text),\n line=self.lineno)\n raise SystemMessagePropagation(severe)\n elif 'url' in self.options:\n # CSV data is from a URL.\n # Do not import urllib2 at the top of the module because\n # it may fail due to broken SSL dependencies, and it takes\n # about 0.15 seconds to load.\n import urllib2\n source = self.options['url']\n try:\n csv_text = urllib2.urlopen(source).read()\n except (urllib2.URLError, IOError, OSError, ValueError), error:\n severe = self.state_machine.reporter.severe(\n 'Problems with \"%s\" directive URL \"%s\":\\n%s.'\n % (self.name, self.options['url'], SafeString(error)),\n nodes.literal_block(self.block_text, self.block_text),\n line=self.lineno)\n raise SystemMessagePropagation(severe)\n csv_file = io.StringInput(\n source=csv_text, source_path=source, encoding=encoding,\n error_handler=(self.state.document.settings.\\\n input_encoding_error_handler))\n csv_data = csv_file.read().splitlines()\n else:\n error = self.state_machine.reporter.warning(\n 'The \"%s\" directive requires content; none supplied.'\n % self.name, nodes.literal_block(\n self.block_text, self.block_text), line=self.lineno)\n raise SystemMessagePropagation(error)\n return csv_data, source", "def __init__(self, file_path=None, writer=None,\n output_encoding=\"utf-8\", input_encoding=\"utf-8\",\n try_encodings_hard=True, fallback_input_encodings=None,\n from_row=0, from_col=0, ignore_blank_rows=False,\n input_dialect=csv.excel):\n self.file_path = None\n self.output_encoding = output_encoding\n self.input_encoding = input_encoding\n\n # useful to know about this for any future work on encodings: https://docs.python.org/2.4/lib/standard-encodings.html\n if fallback_input_encodings is None and try_encodings_hard:\n fallback_input_encodings = [\"cp1252\", \"cp1251\", \"iso-8859-1\", \"iso-8859-2\", \"windows-1252\", \"windows-1251\", \"mac_roman\"]\n else:\n fallback_input_encodings = []\n self.fallback_input_encodings = fallback_input_encodings\n\n self.from_row = from_row\n self.from_col = from_col\n self.ignore_blank_rows = ignore_blank_rows\n self.input_dialect = input_dialect\n\n # Store the csv contents in a list of tuples, [ (column_header, [contents]) ]\n self.data = []\n\n # Get an open file object from the given file_path or file object\n if file_path is not None:\n if type(file_path) == file:\n self.file_path = file_path.name\n # NOTE: if you have passed in a file object, it MUST work - as in, it must be set to\n # read the right encoding, and everything. We will not try to parse it again if it\n # fails the first time. If it is closed, you will also need to be sure to set the input_encoding.\n # All round - better if you just give us the file path\n self.file_object = file_path\n if self.file_object.closed:\n self.file_object = codecs.open(self.file_object.name, 'r+b', encoding=self.input_encoding)\n\n # explicitly read this file in\n self._read_file(self.file_object)\n else:\n self.file_path = file_path\n if os.path.exists(file_path) and os.path.isfile(file_path):\n self._read_from_path(file_path)\n else:\n # If the file doesn't exist, create it.\n self.file_object = codecs.open(file_path, 'w+b', encoding=self.output_encoding)\n\n elif writer is not None:\n self.file_object = writer", "def _csv_to_dict(name):\n csv_path = _get_csv_path(name)\n result = []\n with open(csv_path) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n result.append(row)\n return result", "def read_csv(config, input_file_path):\n header = read_csv_header(input_file_path)\n\n general = config['general']\n date_cols_types = ['date_cols',\n 'first_exp_date_cols',\n 'last_exp_date_cols',\n 'index_date_col',\n 'lookback_date_col']\n date_cols = utils.generate_list_columns(header, config, date_cols_types)\n # it turns out we should read the dates first in as strings\n date_cols_types = {date_col: str for date_col in date_cols}\n df = pd.read_csv(input_file_path, dtype=date_cols_types)\n # convert string dates to dates using the date format\n # Large dataset, conversion done in parallel\n if len(date_cols) > 50 or (df.shape[0] > 20000 and len(date_cols) > 1):\n print('parallel!')\n # we have to do this in parallel otherwise it takes forever\n df[date_cols] = parse_utils.apply_parallel(df[date_cols],\n parse_utils.parse_dates,\n format=general['date_format'])\n # Small dataset, faster to convert in non-parallel fashion\n elif len(date_cols) > 0:\n df[date_cols] = df[date_cols].apply(pd.to_datetime,\n format=general['date_format'])\n return df", "def place_types_read_csv(self, csv_input):\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''])\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)", "def parse(stream, idx):\n\n # Skip comment lines\n stream = filter(lambda x: not x.startswith('#'), stream)\n\n # Ignore empty lines.\n stream = filter(lambda x: x.strip(), stream)\n\n # Format the stream.\n stream = csv.reader(stream, delimiter=delimiter)\n\n # Generate empty values on missing columns.\n for row in stream:\n try:\n yield (row[idx], None)\n except IndexError as exc:\n yield ('', None)", "def read(self):\n metadata = []\n data = np.empty(self.__shape)\n\n with open(self.__file, 'r') as f:\n reader = csv.reader(f)\n metadata = reader.__next__()\n for r, row in enumerate(reader):\n data[r] = np.array(row)\n\n return metadata, data", "def from_csv(self, user, row):\n if len(row) != 6:\n raise BadRequest(_(\"Invalid line\"))\n self.name = row[1].strip()\n self.target_host = row[2].strip()\n self.service, created = Service.objects.get_or_create(\n name=row[3].strip())\n self.enabled = (row[4].strip() == 'True')\n self.verify_recipients = (row[5].strip() == 'True')\n self.save(creator=user)", "def read_csv(csv_file):\n LOGGER.debug(\"Opening file: %s\", csv_file)\n with open(csv_file) as f:\n for line in f:\n yield line", "def read_strong_csv(strong_meta_csv_path):\n with open(strong_meta_csv_path, 'r') as fr:\n reader = csv.reader(fr, delimiter='\\t')\n lines = list(reader)\n \n meta_dict = {}\n for line in lines:\n [audio_name, begin_time, end_time, label] = line\n meta = {'begin_time': begin_time, 'end_time': end_time, 'label': label}\n if audio_name in meta_dict:\n meta_dict[audio_name].append(meta)\n else:\n meta_dict[audio_name] = [meta]\n \n return meta_dict", "def read_csv(csv_path, fieldnames=None, restkey=None,\n restval=None, dialect='excel', *args, **kwds):\n with CSVFile(os.path.expanduser(csv_path), fieldnames=fieldnames, restkey=restkey, restval=restval,\n dialect=dialect, *args, **kwds) as csvfile:\n return csvfile", "def open_some_data(the_file_name: str) -> dict:\n\n result: dict = open_csv(file_name=the_file_name)\n return result", "def load_input(self, number_of_rows_to_read):\n self.dataframe = pandas.read_csv(self.filename, nrows=number_of_rows_to_read)\n #self._describe_input_data()", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"timestamp\"], \"%b %d %Y\"),\n \"trans_type\": row[\"type\"],\n \"amount\": row[\"amount\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def read_trace(self, trace):\n csv_reader = csv.DictReader(self.csv_fd)\n for row in csv_reader:\n new_trace = RiscvInstructiontTraceEntry()\n new_trace.rd = row['rd']\n new_trace.rd_val = row['rd_val']\n new_trace.addr = row['addr']\n new_trace.binary = row['binary']\n new_trace.instr_str = row['str']\n trace.append(new_trace)", "def load_data(original_input_handle, cluster_input_handle):\n\n info('Loading original data from {}'.format(original_input_handle.name))\n\n original_data = pd.read_csv(original_input_handle, index_col=0)\n\n info('Loaded a table with shape {}'.format(original_data.shape))\n\n clusters = None\n\n if cluster_input_handle is not None:\n\n info('Loading cluster assignments from {}'.format(\n cluster_input_handle.name))\n\n clusters = pd.read_csv(cluster_input_handle, index_col=0, squeeze=True)\n\n info('Loaded {} entries'.format(clusters.size))\n\n return original_data, clusters", "def csv_parser(s):\n\n # Data is our output. It will be a list of lists.\n\n # Split csv into lines and store them in a list called 'lines'.\n \n # Remove the first element from lines, so that you have only the data lines left.\n \n # At this stage, we loop through the list called lines.\n # As you loop\n # i. split each line on the commas;\n # ii. convert the Subject variable to int.\n # iii. convert the Height variable to float.\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values ", "def _resolve_reader(self):\n self.fh = self.path.fs.open(self.path, 'rU')\n self.resolved = csv.reader(self.fh, delimiter=self.delimiter)", "def schemaless_reader(stream, schema):\n acquaint_schema(schema)\n return read_data(stream, schema)", "def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict", "def readcsv(csvfile):\n logger = log.getLogger('obslog.readcsv')\n\n if not os.path.exists(csvfile):\n logger.error('Cannot access %s', csvfile)\n raise SystemExit\n\n data = {}\n with open(csvfile, mode='r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n data[row['FITSFILE']] = row\n logger.debug('Data: %s', data)\n return data", "def __init__(self, csv_path):\n # Checking files\n fdops.check_if_file_exists(csv_path)\n\n # loading proposal data as a data frame\n self._df = pd.read_csv(csv_path)\n\n # Dictionary containing proposal properties\n self.props = self._get_properties(csv_path)", "def load_workflow_file(factory_registry, file_path):\n\n reader = WorkflowReader(factory_registry)\n\n workflow_model = reader.read(file_path)\n\n return workflow_model", "def parse_csv_from_file(file):\n csvFileReader = None\n employee_data = []\n\n # if FileStorage object (which has a save() method)\n if hasattr(file, 'save'):\n csvFileReader = csv.reader(codecs.iterdecode(file, 'utf-8'))\n # else if File object (which does not have a save() method)\n else:\n csvFileReader = csv.reader(file)\n\n for row in csvFileReader:\n employee_data.append({\n 'first_name': row[0],\n 'last_name': row[1],\n 'annual_salary': int(row[2]),\n 'super_rate': convert_to_float(row[3]),\n 'payment_period': row[4]\n })\n\n return employee_data", "def read_csv(path: str) -> list[dict[str, str]]:\n with open(path, 'r') as f:\n return list(csv.DictReader(f))", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(row[\"date\"], \"%d-%m-%Y\"),\n \"trans_type\": row[\"transaction\"],\n \"amount\": row[\"amounts\"],\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def readstream(self, stream):\n\n self._reset()\n\n try:\n # tokenize input stream\n self._lexer = SettingLexer()\n self._lexer.readstream(stream)\n\n # parse tokens into AST\n self._parse()\n return True\n\n except IOError:\n self._reset()\n return False", "def read(self, line):\n data = []\n if six.PY3 and type(line) == six.binary_type:\n line = line.decode('utf-8')\n\n csv_reader = csv.reader(six.StringIO(line),\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n skipinitialspace=True)\n for cr in csv_reader:\n data = [decode_string(f).strip() for f in cr]\n break\n\n return None, data", "def read_data(self) -> List[BankCSVRecord]:\n try:\n with open(self.source, \"r\") as csv_source:\n row_records = []\n reader = DictReader(csv_source)\n for row in reader:\n transformed_data = {\n \"timestamp\": datetime.strptime(\n row[\"date_readable\"], \"%d %b %Y\"\n ),\n \"trans_type\": row[\"type\"],\n \"amount\": int(row[\"euro\"]) + int(row[\"cents\"]) / 100,\n \"from\": row[\"from\"],\n \"to\": row[\"to\"],\n }\n row_records.append(BankCSVRecord(**transformed_data))\n return row_records\n except FileNotFoundError as e:\n raise ImporterSourceError(message=f\"File {self.source} not found\")\n except KeyError as e:\n raise ImporterSourceFormatError(\n message=\"Source file data does not match format\"\n )\n except Exception as e:\n raise ImporterError(message=\"Import failed!\") from e", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))" ]
[ "0.6305087", "0.616394", "0.59998536", "0.57927126", "0.5764348", "0.56994", "0.56341255", "0.5578651", "0.5536898", "0.55146277", "0.5498734", "0.5492436", "0.54921895", "0.5491747", "0.5480112", "0.5363468", "0.5362126", "0.5310172", "0.5305016", "0.5297404", "0.52790266", "0.52721584", "0.5221506", "0.5219369", "0.51705515", "0.51666623", "0.5163281", "0.5158731", "0.51510525", "0.5148425", "0.5127288", "0.5127243", "0.5108559", "0.51036984", "0.50950104", "0.5092869", "0.508639", "0.5086242", "0.50859076", "0.5079383", "0.50778294", "0.50720555", "0.5066097", "0.5054419", "0.5037762", "0.50342864", "0.5031467", "0.50252205", "0.5011409", "0.5009788", "0.5006657", "0.49976647", "0.49740642", "0.4972933", "0.49529648", "0.4947161", "0.49211693", "0.49211693", "0.49144992", "0.49138716", "0.4911841", "0.49082097", "0.49078622", "0.4896766", "0.48960212", "0.48862314", "0.48851323", "0.48826084", "0.48825732", "0.48787507", "0.48744735", "0.4873024", "0.48650256", "0.48643532", "0.48522317", "0.48492113", "0.48464638", "0.48463428", "0.48363748", "0.48297626", "0.48257527", "0.48248342", "0.48168975", "0.4816851", "0.48145387", "0.4810218", "0.48094243", "0.4804378", "0.47992668", "0.47990465", "0.4789358", "0.4773806", "0.4773093", "0.47687238", "0.47681984", "0.4767559", "0.4761018", "0.47577262", "0.47566018", "0.47566018" ]
0.4873241
71
Return a list of messages.
def __call__():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_messages(self):", "def get_messages(self):\n res = self.conn.cursor().execute(\"SELECT * FROM messages\")\n return res.fetchall()", "def get_msgs(self):\n msgs = []\n while True:\n try:\n msgs.append(self.get_msg(block=False))\n except Empty:\n break\n return msgs", "def get_msgs(self):\n msgs = []\n while True:\n try:\n msgs.append(self.get_msg(block=False))\n except Empty:\n break\n return msgs", "def _messages_list(self, queue):\n\n return queue.messages()", "def get_message_list(self):\n \n result = requests.get(\n url = root_url + '/{}'.format(\"message\"),\n headers = { 'Authorization': api_key },\n )\n\n message_list = result.json()\n\n self.message_list = message_list", "def get_all_msgs(self):\n data = self.database.select(self.tname)\n msgs = []\n for item in data:\n msgs.append((item[0], self.data_to_msg(item)))\n return msgs", "def getAllMessages(self):\n return self.db.getAllMessages()", "def list(self):\n try:\n response = self.service.users().messages().list(userId=self.user_id,\n q=self.query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = self.service.users().messages().list(userId=self.user_id, q=self.query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def get_messages(self):\r\n return self.messages", "def messages(self):\n return list(iter(self))", "def all_messages(self):\n request = {'token': self.token, 'include_received': True, 'include_read': True, 'include_sent': True}\n return Session.send_request('messages', request, Session.FULL_RESPONSE_OR_NONE)", "def get_message_list(self):\n count = 0\n for msg in self.mbox:\n if msg['From'].find(self.config['tgt_email']) > -1:\n dtime = arrow.get(msg['Date'], 'ddd, D MMM YYYY HH:mm:ss ZZ')\n message = dict({'from': msg['From'],\n 'date': dtime,\n 'subject': msg['Subject']})\n # boundary = msg.get_boundary()\n # if boundary is not None:\n # bounds = [m.start() for m\n # in re.finditer(boundary, str(msg))]\n # else:\n # bounds = list()\n # if len(bounds) > 2:\n # message['text'] = str(msg)[bounds[1]:bounds[2]]\n # else:\n # message['text'] = None\n pl = None\n if msg['Subject'].find(\":\") == -1:\n finished = False\n pl = msg.get_payload()\n while finished is False:\n if isinstance(pl, str):\n finished = True\n elif isinstance(pl, list):\n pl = pl[0].get_payload()\n else:\n raise ValueError(\"Non-list, non-str payload?\")\n break\n message['text'] = self.clean_text(str(pl))\n\n if message['text'] is not None:\n self.messages.append(message)\n count += 1\n # print count\n self.messages.sort(key=lambda item: item['date'])", "def messages(self):\n return Session.send_request('messages', {'token': self.token}, Session.FULL_RESPONSE_OR_NONE)", "def list_messages():\n session = Session()\n messages = session.query(Message).all()\n msg_lst = []\n for message in messages:\n msg_lst.append(create_message(message))\n return msg_lst", "def _get_messages(self):\n try:\n messages = self.channel.get_messages(int(self.get_argument('since_timestamp', 0)))\n\n except ValueError as e:\n messages = self.channel.get_messages()\n\n return messages", "def get_public_messages(self):\n messages = []\n for message in self.messages:\n if message.message_id != None:\n messages.append(message)\n return messages", "def get_all_messages():\n with open(\"data/messages.txt\", \"r\") as chat_list:\n messages = chat_list.readlines()\n return messages", "def messages(self):\n return self._messages", "def messages(self):\n return self._messages", "def messages(self):\n return self._messages", "def get_messages(self, limit=10):\n logging.info(f\"Retrieving Slack messages from {self.channel_name}...\")\n messages = self.get_messages_json(limit)\n return [msg[\"text\"] for msg in messages]", "def get_messages(self):\n return [MinimalMessage(self, message) for message in self.messages]", "def messages(self):\n return MessageNotification.messages", "def all_received_messages(self):\n request = {'token': self.token, 'include_read': True}\n return Session.send_request('messages', request, Session.FULL_RESPONSE_OR_NONE)", "def get_and_delete_messages (self):\n return []", "def get_and_delete_messages (self):\n return []", "def get_messages(self):\n return self.messages_received", "def get_messages(self):\n return self.messages_received", "def get_messages(self):\n return self.messages_received", "def get_messages(character):\n mail = character.db.mail\n try:\n messages = [item for item in mail if item[TIMESTAMP] <= item[MESSAGE].date_sent]\n # Let's clean up mail storage for this user while we're at it.\n character.db.mail = messages\n except TypeError:\n messages = []\n return messages", "def get_all_messages(self):\n cursor = self.get_cursor()\n query = 'WITH replies_query AS (SELECT replied_to, array_agg(mid) AS replies_list ' \\\n 'FROM replies INNER JOIN messages ON replies.reply = messages.mid ' \\\n 'GROUP BY replied_to), ' \\\n 'like_count AS (SELECT mid, COUNT(*) AS likes FROM vote ' \\\n 'WHERE upvote = TRUE GROUP BY mid), ' \\\n 'dislike_count AS (SELECT mid, COUNT(*) AS dislikes FROM vote ' \\\n 'WHERE upvote = FALSE GROUP BY mid) ' \\\n 'SELECT messages.mid, users.uid, cid, message, image, ' \\\n 'COALESCE(likes, 0) AS likes, COALESCE(dislikes, 0) AS dislikes, username, ' \\\n \"COALESCE(replies_list, '{}') AS replies, messages.created_on \" \\\n 'FROM messages LEFT OUTER JOIN like_count ON messages.mid = like_count.mid ' \\\n 'LEFT OUTER JOIN dislike_count ON messages.mid = dislike_count.mid ' \\\n 'LEFT OUTER JOIN photo ON messages.mid = photo.mid ' \\\n 'INNER JOIN users ON messages.uid = users.uid ' \\\n 'LEFT OUTER JOIN replies_query ON messages.mid = replies_query.replied_to ' \\\n 'ORDER BY messages.created_on DESC'\n cursor.execute(query)\n messages = cursor.fetchall()\n return messages", "def get_loaded_messages(self):\n self.chat.click()\n messages = []\n for message in self.chat.find_elements(By.XPATH, \"\"\"//*[@id=\"main\"]/div[3]/div/div/div[3]/*\"\"\"):\n messages.append(MessageElement(message))\n return messages", "def __get_loaded_messages(self):\n messages = []\n for message in self.chat.find_elements(By.XPATH, \"\"\"//*[@id=\"main\"]/div[3]/div/div/div[3]/*\"\"\"):\n messages.append(MessageElement(message))\n return messages", "def get_all_message(): \n return \"<br>\".join(messages)", "def get_user_messages(user_id):\n pass \n # user_message_list = []\n\n # for message in sent messages:", "def __msgtolist(self) -> List[str]:\n return self.msg.splitlines()", "def receive_messages(self):\n messages = self.incoming_messages\n self.incoming_messages = []\n return messages", "def getMessages(self):\n raise NotImplementedError(\"Child class must implement this\")", "def get_messages():\n dynamodb = boto3.client('dynamodb')\n messages = []\n _messages = []\n paginator = dynamodb.get_paginator('scan')\n for page in paginator.paginate(TableName=os.environ.get('MESSAGE_TABLE_NAME')):\n _messages.extend(page['Items'])\n\n if not _messages:\n return _messages\n\n for message in _messages:\n m = {\n message['timestamp']['N']: message['data']['S']\n }\n messages.append(m)\n\n # sort list of dict by timestamp\n messages = list(map(dict, sorted(list(i.items()) for i in messages)))\n\n _messages = []\n for message in messages:\n _, v = list(message.items())[0]\n _messages.append(v)\n\n return _messages", "def get_messages(self, number: int):\n self.sql_lock.acquire()\n query: str = \"SELECT * FROM messages order by id desc LIMIT ?\" \n messages = []\n for item in self.cursor.execute(query,(number, )):\n text, user, id = item\n\n messages.append({\"user\": user, \"text\": f\"message- {text}\", \"id\": id})\n \n self.sql_lock.release()\n return messages", "def get_messages(self):\n data = self.socket.recv(BUF_SIZE).decode()\n return data.split('\\0')", "def _messages(self):\n q = [json.loads(i)['message'] for i in self.client.kv.get(\n 'rhumba.q.testqueue', [])]\n return q", "def messages_list(from_username, to_username):\n User.query.get_or_404(from_username)\n User.query.get_or_404(to_username)\n\n messages = Message.find_all(from_username, to_username)\n serialized = [message.serialize() for message in messages]\n return (jsonify(messages=serialized), 200)", "def get_received_messages(self):\n return self.received_messages", "def get_all(self):\n request = get_current_request()\n messages = []\n for queue in self.queues:\n for peeked in request.session.peek_flash(queue):\n messages.append({'message': peeked, 'queue': queue,})\n request.session.pop_flash(queue)\n return messages", "def get_messages(self, max_messages):\n raw = self.redis_client.lrange(self.message_list, 0, max_messages)\n messages = (m for m in raw if m != b\"null\")\n messages = (m.decode(\"utf-8\") for m in messages)\n yield from map(json.loads, messages)", "def getMessages(self, topic=False):\n ret = []\n catalog = getToolByName(self.context, 'portal_catalog')\n theme = ''\n if topic:\n theme = getTheme(self.context)\n query = {\n 'portal_type': 'KeyMessage',\n 'review_state': 'published'\n }\n if theme:\n query['getThemes'] = theme\n brains = catalog.searchResults(query)\n for brain in brains:\n text = self._prepareText(brain)\n obj = brain.getObject()\n parent = obj.aq_parent\n ret.append({\n 'text': text,\n 'url': brain.getURL,\n 'parent_url': parent.absolute_url(),\n 'parent_title': parent.Title(),\n })\n return ret", "def get_messages(self, new=True):\n url = (\"https://api.imgur.com/3/account/{0}/notifications/\"\n \"messages\".format(self.name))\n result = self._imgur._send_request(url, params=locals(),\n needs_auth=True)\n return [Notification(msg_dict, self._imgur, has_fetched=True) for\n msg_dict in result]", "def getCacheMessages(self):\n return self.db.getCacheMsgs()", "def messages(self):\r\n return Messages(self)", "def messages(self):\r\n return Messages(self)", "def messages(self, uid=0, **kwargs):\n messages = Messages(self.base_uri, self.auth)\n return self.get_subresource_instances(uid, instance=messages,\n resource=\"messages\", params=kwargs)", "def get_all_messages(**kwargs):\n request = kwargs.pop('request')\n area = get_location_for_user(request.user)\n if not area == Location.tree.root_nodes()[0]:\n return Message.objects.exclude(connection__identity__in=getattr(settings, 'MODEM_NUMBERS', ['256777773260', '256752145316', '256711957281', '256790403038', '256701205129'])).\\\n exclude(connection__backend__name=\"yo8200\").filter(direction='I', connection__contact__reporting_location__in=area.get_descendants(include_self=True).all()).order_by('-date')\n\n return Message.objects.exclude(connection__identity__in=getattr(settings, 'MODEM_NUMBERS', ['256777773260', '256752145316', '256711957281', '256790403038', '256701205129'])).\\\n exclude(connection__backend__name=\"yo8200\").filter(direction='I').order_by('-date')", "def bus_messages(self):\n\n output = []\n for message in self.__bus_messages:\n if time.time() - message['time'] > BusController.MESSAGES_TTL:\n self.__bus_messages.remove(message)\n output.append(f\"l{message['sender'].line_num}-s{message['sender'].station_num} sent: {message['text']}\")\n while len(output)<BusController.MAX_MESSAGES_TO_DISPLAY:\n output.append(\"\")\n return output", "def get_unread_messages(self):\n self.chat.click()\n loaded_messages = self.__get_loaded_messages()\n for message in loaded_messages:\n try:\n if message.get_attribute(\"class\") == \"XFAMv focusable-list-item\":\n unread_index = loaded_messages.index(message)\n return loaded_messages[unread_index + 1:]\n except:\n continue\n return []", "def get_messages():\n incoming = request.get_json()\n messages = Message.get_messages_from_room_id(incoming['room_id'])\n messages = [{'user_id': message.user_id, \n 'sendTime': message.sendTime, 'content': message.content} for message in messages]\n for message in messages:\n user = User.get_user_with_user_id(message['user_id'])\n message['username'] = str(user.username)\n return jsonify(results = messages)", "def get_messages(self):\n return self.addresses", "def get_status_messages(self):\n return self.data[\"allMessagesForFrontend\"][\"messages\"]", "def listing_messages(listing_id):\n Listing.query.get_or_404(listing_id)\n\n auth_username = get_jwt_identity()\n all_messages = Message.find_by_listing(listing_id, auth_username)\n print(\"ALL MESSAGES: \", all_messages)\n serialized = [message.serialize() for message in all_messages]\n return (jsonify(messages=serialized), 200)", "def get_messages_body(self):\n msgs_body = []\n if not self.messages:\n u_print(\" Queue.get_messages_body() ERR - There is no messages or malformed messages on queue. \")\n u_print(json.dumps(self.messages, indent=4))\n sys.exit(1)\n\n try:\n for m in self.messages:\n msgs_body.append(m.body)\n except:\n raise\n\n return msgs_body", "def get_user_messages(self, speaker):\n # type: (str) -> [Message]\n return list(filter(lambda m: m.speaker == speaker, self.messages))", "def get_new_messages(self):\n inbox = list(self.reddit.inbox.unread(limit=10))\n inbox.reverse()\n return inbox", "def get_messages_of_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM messages WHERE project_id=?\", (project_id,))\n return res.fetchall()", "def message_get_all(context, filters=None, limit=None, offset=None,\n sort_key='created_at', sort_dir='desc'):\n messages = models.Message\n\n query = model_query(context,\n messages,\n read_deleted=\"no\",\n project_only=\"yes\")\n\n legal_filter_keys = ('request_id', 'resource_type', 'resource_id',\n 'action_id', 'detail_id', 'message_level',\n 'created_since', 'created_before')\n\n if not filters:\n filters = {}\n\n query = exact_filter(query, messages, filters, legal_filter_keys)\n\n query = utils.paginate_query(query, messages, limit,\n sort_key=sort_key,\n sort_dir=sort_dir,\n offset=offset)\n\n return query.all()", "def fetchLogs(self):\n return [record.msg for record in self.handler.buffer]", "def get_pending_messages(self):\n if self.connected and self.messenger.ready:\n new_messages = self.messenger.consume_messages()\n self.message_history += new_messages\n return new_messages\n return []", "def extract_messages(self,msg_list):\n msgs = []\n for m in msg_list:\n msgs.append(json.loads(str(m)))\n return msgs", "def list(request, format=None):\r\n user_messages = request.user.profile.recent_messages()\r\n if format and format == '.json':\r\n data = {\r\n 'messages': [msg.to_json() for msg in user_messages],\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')\r\n d = {\r\n 'objects': user_messages,\r\n 'title': 'Messages',\r\n }\r\n return render(request, 'usermessages/list.html', d)", "def get_messages(self, since_timestamp=0):\n return filter(lambda x: x.timestamp > since_timestamp,\n self.chat_messages)", "def messages(\n self, *, add_prefix: typing.Union[str, int] = None\n ) -> typing.List[Message]:\n if add_prefix is not None:\n return [\n Message(\n text=message.text,\n code=message.code,\n index=[add_prefix] + message.index,\n )\n for message in self._messages\n ]\n return list(self._messages)", "def get_messages(self, id, year=None, clean=False):\n year = self.get_year(id, switch='reviews') if year is None else year\n review = self.get_review(id, year)\n messages = list()\n for message in review['messages']:\n sender = message['sender']\n if sender in self.bots:\n continue\n text = helpers.clean(message['text']) if clean else message['text']\n messages.append((sender, text))\n return messages", "def read_messages(self):\n \n messages = copy.deepcopy(dict(self.unique_messages))\n self.unique_messages.clear()\n return messages", "def list_messages(chat_id):\n response = jsonify({\"messages\": list_messages_for_chat(chat_id)})\n return response", "def messages(self):\n return itertools.chain.from_iterable(\n mset.messages\n for topic, partitions in iteritems(self.msets)\n for partition_id, mset in iteritems(partitions)\n )", "def get_user_messages(user_id):\n return UserMessage.query.filter(user_id == UserMessage.user_id).all()", "def getPostedMessages(cls):\n with cls.messageLock:\n postedMessages = cls.postedMessages\n cls.postedMessages = \"\"\n return postedMessages", "def get_messages(self, channel_name=None, channel_id=None, skip_non_user=False):\n if not channel_id:\n channel_id = self.find_channel_id(channel_name)\n response = self.client.api_call(\n f'conversations.history?channel={channel_id}'\n )\n assert response['ok']\n\n messages = []\n\n for message in response['messages']:\n if skip_non_user and 'subtype' in message and message['subtype'] in cfg.SUBTYPES:\n continue\n\n thread_response = self.client.api_call(\n f'conversations.replies?'\n f'channel={channel_id}&'\n f'ts={message[\"ts\"]}'\n )\n assert thread_response['ok']\n messages.extend(thread_response['messages'])\n return messages", "def get_messages(self):\n\t\tcontents = self.archive.read_file('replay.message.events')\n\t\treturn self.protocol.decode_replay_message_events(contents)", "def get_messages_json(self, limit=10):\n params = self.params\n params[\"limit\"] = limit\n response = requests.get(self.url + \"conversations.history\", params=params)\n return response.json()[\"messages\"]", "def get_append_messages(self):\n\t\treturn self._appendMessages", "def get_messages(self, msg_type):\n with self.message_lock:\n self._processed_messages = len(self.messages)\n if msg_type is None:\n return [m for m in self.messages]\n else:\n return [m for m in self.messages if m.msg_type == msg_type]", "def messages_list(self, level=WARNING):\n result = []\n for message in self.messages:\n if message['level'] in self.equal_or_greater_levels[self.level_names[level]]:\n result.append(message)\n return result", "def list(request):\r\n usermessages = request.user.profile.recent_messages()\r\n d = {\r\n 'form': NewMessageForm(),\r\n 'usermessages': usermessages,\r\n 'title': 'Messages',\r\n }\r\n return render_to_response('usermessages/list.html', d, \r\n context_instance=RequestContext(request))", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "def get_messages_to_me(self, day):\n try:\n return self.messages_to_me[day]\n except KeyError:\n return []", "def load_received_messages(username):\n return [m for m in load_all_messages() if m[\"to\"] == username]", "def messages(self):\r\n return TicketMessages(self)", "def process_messages(self, messages):\n\n return messages", "def _create_messages(self, metrics):\n\n messages = []\n\n # Fill the list of messages\n for m in metrics:\n messages.append(str(m))\n\n logger.debug('Messages: %s', messages)\n\n return messages", "def logged_messages(self):\n return self._logged_messages", "def get_messages(self, page=0):\n endpoint = 'https://outlook.office.com/api/v2.0/me/messages'\n if page > 0:\n endpoint = endpoint + '/?%24skip=' + str(page) + '0'\n\n log.debug('Getting messages from endpoint: {} with Headers: {}'.format(endpoint, self._headers))\n\n r = requests.get(endpoint, headers=self._headers)\n\n check_response(r)\n\n return Message._json_to_messages(self, r.json())", "def recorded_messages(self):\n messages = []\n for time in sorted(self.reception_records):\n messages.extend(self.reception_records[time])\n return messages", "def get_messages(request):\n import urllib.parse\n if request.user.is_authenticated():\n msgs = BroadcastMessage.objects.current().for_auth_users()\n else:\n msgs = BroadcastMessage.objects.current().for_unauth_users()\n\n # exclude by those seen\n excluded_session = decode_excluded(request.session.get(\"excluded_broadcasts\", \"\"))\n excluded_cookie = decode_excluded(request.COOKIES.get(\"excluded_broadcasts\", \"\"))\n excluded = excluded_session | excluded_cookie\n msgs = msgs.exclude(pk__in=list(excluded))\n\n # filter them by the HTTP_REFERER\n url_parts = urllib.parse.urlparse(request.META.get('HTTP_REFERER', '/'))\n path = url_parts.path\n valid_messages = [msg for msg in msgs if re.match(msg.url_target, path)]\n msg_list = []\n for msg in valid_messages:\n msg_list.append(msg.msg_info())\n if msg.show_frequency == BroadcastMessage.SHOW_ONCE:\n excluded_cookie.add(msg.pk)\n elif msg.show_frequency == BroadcastMessage.SHOW_ONCE_SESSION:\n excluded_session.add(msg.pk)\n request.session['excluded_broadcasts'] = encode_excluded(excluded_session)\n response = HttpResponse(json.dumps(msg_list),\n content_type=\"application/json\")\n response.set_cookie('excluded_broadcasts', encode_excluded(excluded_cookie))\n return response", "def get_messages(\n event: Dict[str, Any]\n ) -> List[Dict[str, Any]]:\n reply_message = event.get(\"reply_message\", {})\n return [reply_message] if reply_message else event.get(\"fwd_messages\", [])", "def get(self):\n user_id = get_jwt_identity()\n user = UserModel.get_by_id(user_id)\n return [message.json() for message in MessageModel.get_package_messages(user)], 200", "def get(self):\n return read_filtered_msgs(request.args)", "def sendall(self):\n\n response = []\n for message in self.messages:\n response.append(self._send(message))\n return response", "def messages(self):\n if \"messages\" in self._prop_dict:\n return MessagesCollectionPage(self._prop_dict[\"messages\"])\n else:\n return None", "def show_messages(self):\n for msg in self.messages:\n print msg['text']", "def userAllMessages(self, uid):\n return self.userMessages(self.userMessageBox(self.getUuidFromUid(uid)), \"_?([a-f]|[0-9]){8}-([a-f]|[0-9]){4}-([a-f]|[0-9]){4}-([a-f]|[0-9]){4}-([a-f]|[0-9]){12}_[0-9]+\")" ]
[ "0.8454096", "0.8423979", "0.8322954", "0.8322954", "0.8192126", "0.8162427", "0.8145105", "0.81278396", "0.8071668", "0.8057453", "0.80292755", "0.7976051", "0.79759413", "0.7859721", "0.78407884", "0.77565014", "0.77484095", "0.770216", "0.7694379", "0.7694379", "0.7694379", "0.7679272", "0.7655272", "0.7623361", "0.7590027", "0.7572376", "0.7572376", "0.75684065", "0.75684065", "0.75684065", "0.7528409", "0.7521704", "0.7515029", "0.7496933", "0.74637187", "0.74293303", "0.7407258", "0.73767835", "0.73745275", "0.7367188", "0.7364568", "0.7348909", "0.73135865", "0.730996", "0.7266888", "0.7259536", "0.72441", "0.7191848", "0.71826357", "0.7159403", "0.7157077", "0.7157077", "0.7136726", "0.71261334", "0.7124086", "0.7102684", "0.70940566", "0.7090066", "0.7068633", "0.70435387", "0.70349133", "0.70007503", "0.69999593", "0.6997033", "0.6984706", "0.696869", "0.69506013", "0.6918365", "0.6901806", "0.6900256", "0.6882137", "0.68809086", "0.68782043", "0.68728524", "0.68663347", "0.6864444", "0.6857811", "0.68562347", "0.6821495", "0.6798033", "0.6790536", "0.67834455", "0.67776436", "0.67555374", "0.67335755", "0.6723499", "0.6707874", "0.6697199", "0.6695123", "0.6693966", "0.66848147", "0.66817886", "0.6680216", "0.66665965", "0.6666032", "0.6651546", "0.6644275", "0.66383845", "0.66338736", "0.66338044", "0.66319644" ]
0.0
-1
Calculates the correction factor for ambient air temperature and relative humidity Based on the linearization of the temperature dependency curve under and above 20 degrees Celsius, asuming a linear dependency on humidity,
def get_correction_factor(self, temperature, humidity): if temperature < 20: return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD return self.CORE * temperature + self.CORF * humidity + self.CORG
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def correction(self):\r\n \r\n # empirical coefficients:\r\n k3, k2, k1, k0 = 0.0892, 0.0544, 0.2511, -0.0017\r\n \r\n # thrust as a function of the azimuth angle and the loads:\r\n thrust = self.qn*np.sin(Turbine.t) + self.qt*np.cos(Turbine.t)\r\n \r\n # interpolator function for the thrust:\r\n function = interp1d(Turbine.t, thrust, kind='cubic')\r\n \r\n # vectorize the function so that it takes an array of angles:\r\n __function__ = np.vectorize(function)\r\n \r\n # thrust coefficient integrating according to phi:\r\n self.cth = simps(__function__(Turbine.p), Turbine.p)\r\n \r\n # induction factor:\r\n self.a = k3*self.cth**3 + k2*self.cth**2 + k1*self.cth + k0\r\n \r\n # correction factor:\r\n if self.a <= 0.15:\r\n self.ka = 1.0/(1.0 - self.a)\r\n else:\r\n self.ka = (1./(1 - self.a))*(0.65 + 0.35*exp(-4.5*(self.a - 0.15)))", "def fRwTemperatureCorrected(Rw_Temp1, Temp1, Temp2):\n\treturn Rw_Temp1 * ((Temp1 + 21.5) / (Temp2 + 21.5))", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def temperature() -> float:", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def apply_weather_correction(\n enduse,\n fuel_y,\n cooling_factor_y,\n heating_factor_y,\n enduse_space_heating,\n enduse_space_cooling\n ):\n if enduse in enduse_space_heating:\n fuel_y = fuel_y * heating_factor_y\n elif enduse in enduse_space_cooling:\n fuel_y = fuel_y * cooling_factor_y\n\n return fuel_y", "def factorizeLinearSum(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280):\n\n var = \"atm/TREFHT\"\n dT = eoi400 - e280\n\n dTCO2 = (2 * (e400 - e280) +\n (eo400 - eo280) +\n (ei400 - ei280) +\n 2 * (eoi400 - eoi280)) / 6\n\n dTtopo = (2 * (eo280 - e280) +\n (eo400 - e400) +\n (eoi280 - ei280) +\n 2 * (eoi400 - ei400)) / 6\n\n dTice = (2 * (ei280 - e280) +\n (ei400 - e400) +\n (eoi280 - eo280) +\n 2 * (eoi400 - eo400)) / 6\n\n return dT, dTCO2, dTtopo, dTice", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def compute_dewpoint(temperature, humidity):\n\n temp_C = (temperature - 32) * 5 / 9 # Convert temperature from deg F to deg C\n rh = humidity / 100\n\n b = 18.678\n c = 257.14 # deg C\n\n gamma = math.log(rh) + (b * temp_C) / (c + temp_C)\n tdp = c * gamma / (b -gamma)\n\n tdp_F = 9 / 5 * tdp + 32 # Convert temperature from deg C to deg F\n return tdp_F;", "def correct_temp(temp_tab):\n output = subprocess.check_output(\"cat /sys/class/thermal/thermal_zone0/temp\", shell=True)\n cpu_temp = int(output)/1000\n temp_calibrated = temp_tab - ((cpu_temp - temp_tab)/1.5)\n return temp_calibrated", "def __getTemperatureCalibrationCoefficients(self):\n src10 = self.read_byte_data(self.address, 0x10)\n src11 = self.read_byte_data(self.address, 0x11)\n src12 = self.read_byte_data(self.address, 0x12)\n c0 = (src10 << 4) | (src11 >> 4)\n c0 = getTwosComplement(c0, 12)\n c1 = ((src11 & 0x0F) << 8) | src12\n c1 = getTwosComplement(c1, 12)\n return c0, c1", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def water_correction_energies(fname, se_h2o_hof, se_h_hof, ref_h2o_ener,\n se_au=False, ref_au=True):\n check_for_keys(fname, REFEK, NATMK, SEEK)\n with h5.File(fname, 'r') as ifi:\n # This calculates the reference heat of formation\n # Note the reference is assumed to be in eH\n correction = ifi[REFEK][:] - ((ifi[NATMK][:]//3) * ref_h2o_ener)\n if ref_au:\n correction *= 627.509\n if se_au:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof) * 627.509\n else:\n correction -= (ifi[SEEK][:] - se_h_hof - (ifi[NATMK][:]//3) * se_h2o_hof)\n return correction", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def get_actual_air_conditioned_temperature(\n hc_period: np.ndarray,\n theta_ac: np.ndarray, v_supply: np.ndarray, theta_supply_h: np.ndarray, theta_supply_c: np.ndarray,\n l_d_h: np.ndarray, l_d_cs: np.ndarray,\n u_prt: float, a_prt: np.ndarray, a_hcz: np.ndarray, q: float) -> np.ndarray:\n\n rho = get_air_density()\n c = get_specific_heat()\n\n a_prt = a_prt.reshape(1, 5).T\n a_hcz = a_hcz[0:5].reshape(1, 5).T\n\n theta_ac_act_h = np.maximum(theta_ac + (c * rho * v_supply * (theta_supply_h - theta_ac) - l_d_h * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n theta_ac_act_c = np.minimum(theta_ac - (c * rho * v_supply * (theta_ac - theta_supply_c) - l_d_cs * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n return theta_ac_act_h * (hc_period == 'h') + theta_ac_act_c * (hc_period == 'c') + theta_ac * (hc_period == 'm')", "def linearbattfunc(x, IV):\n i1, i2, V1, V2 = IV\n di2 = a*io*(n*F)/(R*T)*(V1 - V2)\n #Kinetics\n di1 = -di2\n #charge neutrality\n dV1 = -i1/s\n #solids ohms law\n dV2 = -i2/K\n #liquids ohms law\n return di1, di2, dV1, dV2", "def vct_resistance_correction(\n data: pd.DataFrame, data_TT_MDL: pd.DataFrame\n) -> pd.DataFrame:\n df_resistance = data.groupby(by=\"test type\").get_group(\"resistance\")\n result1 = least_squares(\n fun=error, x0=[0], kwargs={\"y\": df_resistance[\"fx\"], \"u\": df_resistance[\"u\"]}\n )\n\n data[\"fx\"] -= res(result1.x, u=data[\"u\"])\n\n R_m = data_TT_MDL[\"Rm [N]\"]\n fx = -R_m\n result2 = least_squares(\n fun=error,\n x0=result1.x,\n kwargs={\"y\": fx, \"u\": fx.index},\n )\n\n R_factor = 1.14\n # R_factor = 1.14 * 1.43\n data[\"fx\"] += R_factor * res(result2.x, u=data[\"u\"])\n\n return data", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def factorizeScaledResidualAbs(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280):\n var = \"atm/TREFHT\"\n dT, dTCO2, dTtopo, dTice = factorize_Lunt2012(eoi400,ei400,eo400,e400,eoi280,ei280,eo280,e280)\n\n res = eoi400-e280 - (dTCO2+dTtopo+dTice)\n\n dTCO2new = dTCO2 + (res*abs(dTCO2) / (abs(dTCO2)+abs(dTtopo)+abs(dTice)))\n dTtoponew = dTtopo + (res*abs(dTtopo) / (abs(dTCO2)+abs(dTtopo)+abs(dTice)))\n dTicenew = dTice + (res*abs(dTice) / (abs(dTCO2)+abs(dTtopo)+abs(dTice)))\n\n\n return dT, dTCO2, dTtopo, dTice, dTCO2new, dTtoponew, dTicenew", "def calculate_dew_point(temp, hum):\n return temp - (100 - hum) / 5", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def _calculate_correction(self, telid):\n readout = self.subarray.tel[telid].camera.readout\n return integration_correction(\n readout.reference_pulse_shape,\n readout.reference_pulse_sample_width.to_value('ns'),\n (1/readout.sampling_rate).to_value('ns'),\n self.window_width.tel[telid],\n 0,\n )", "def relative_error(Eth_original, Eph_original,Eth_model, Eph_model,theta, phi, dsf=1,kf=-1):\n\n st = np.sin(theta).reshape((len(theta), 1))\n #\n # Construct difference between reference and reconstructed\n #\n if kf!=-1:\n dTh = (Eth_model[kf, :, :] - Eth_original[kf, ::dsf, ::dsf])\n dPh = (Eph_model[kf, :, :] - Eph_original[kf, ::dsf, ::dsf])\n #\n # squaring + Jacobian\n #\n dTh2 = np.real(dTh * np.conj(dTh)) * st\n dPh2 = np.real(dPh * np.conj(dPh)) * st\n\n vTh2 = np.real(Eth_original[kf, ::dsf, ::dsf] \\\n * np.conj(Eth_original[kf, ::dsf, ::dsf])) * st\n vPh2 = np.real(Eph_original[kf, ::dsf, ::dsf] \\\n * np.conj(Eph_original[kf, ::dsf, ::dsf])) * st\n\n mvTh2 = np.sum(vTh2)\n mvPh2 = np.sum(vPh2)\n\n errTh = np.sum(dTh2)\n errPh = np.sum(dPh2)\n else:\n dTh = (Eth_model[:, :, :] - Eth_original[:, ::dsf, ::dsf])\n dPh = (Eph_model[:, :, :] - Eph_original[:, ::dsf, ::dsf])\n #\n # squaring + Jacobian\n #\n dTh2 = np.real(dTh * np.conj(dTh)) * st\n dPh2 = np.real(dPh * np.conj(dPh)) * st\n\n vTh2 = np.real(Eth_original[:, ::dsf, ::dsf] \\\n * np.conj(Eth_original[:, ::dsf, ::dsf])) * st\n vPh2 = np.real(Eph_original[:, ::dsf, ::dsf] \\\n * np.conj(Eph_original[:, ::dsf, ::dsf])) * st\n\n mvTh2 = np.sum(vTh2)\n mvPh2 = np.sum(vPh2)\n\n errTh = np.sum(dTh2)\n errPh = np.sum(dPh2)\n\n errelTh = (errTh / mvTh2)\n errelPh = (errPh / mvPh2)\n errel =( (errTh + errPh) / (mvTh2 + mvPh2))\n\n return(errelTh, errelPh, errel)", "def alphaCorrection(q, yc, v):\n\ty90 = getClearWaterDepth(yc)\n\tyArr = map(lambda x: x[0], yc)\n\tcArr = map(lambda x: x[1], yc)\n\tvArr = map(lambda x: x[0], v)\n\n\tassert len(yc) == len(v)\n\n\ts = 0\n\tif yArr[0] > 0:\n\t\ts += yArr[0] / 1000 * (2 - cArr[0]) / 2 * (vArr[0] / 2) ** 3\n\n\tfor i in xrange(1, len(yArr)):\n\t\tprevY = yArr[i-1]\n\t\tprevC = cArr[i-1]\n\t\tprevV = vArr[i-1]\n\n\t\tcurY = yArr[i]\n\t\tcurC = cArr[i]\n\t\tcurV = vArr[i]\n\n\t\tif curC > 0.9:\n\t\t\t# reached Y90\n\t\t\tv90 = prevV + (0.9 - prevC) / (curC - prevC) * (curV - prevV)\n\t\t\tavgC = (0.9 + prevC) / 2\n\t\t\tavgV = (v90 + prevV) / 2\n\t\t\ts += (1 - avgC) * avgV ** 3 * (getY90(yc) - prevY) / 1000\n\t\t\tbreak\n\n\t\tavgV = (curV + prevV) / 2\n\t\tavgC = (curC + prevC) / 2\n\t\ts += (1 - avgC) * avgV ** 3 * (curY - prevY) / 1000\n\treturn s * (getClearWaterDepth(yc) / 1000) ** 2 / q ** 3", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def dewpointf(tempf, humidity):\n return round(tempf - ((100-humidity) / 2.778), 2)", "def correction_factors(meas_corr_curve_file_path, CCD_height, CCD_width, lower_wavelength, upper_wavelength):\r\n\r\n # Constructs a filepath and a dataframe for the correction curve data\r\n df = pd.read_csv(meas_corr_curve_file_path)\r\n df1 = pd.DataFrame({\"1\": df.Intensity.loc[0:CCD_width - 1]})\r\n for i in range(2, CCD_height + 1):\r\n df1[f'{i}'] = df.Intensity.loc[((i - 1)*CCD_width):(i*CCD_width - 1)].reset_index(drop=True)\r\n \r\n # Takes the mean intensity value for every wavelength in the correction curve and appends it to a list\r\n samplewavelength = df.Wavelength.loc[0:CCD_width - 1]\r\n df1['Wavelength'] = samplewavelength\r\n df1 = df1.set_index('Wavelength')\r\n df1['mean'] = df1.mean(axis=1)\r\n meas_lamp_radiance = df1['mean'].to_numpy()\r\n\r\n # Loops through the mean values and constructs the radiance via the given formula\r\n lampradiance = []\r\n for i in samplewavelength:\r\n # This is the formula for the correction factor, this formula needs to be changed along with the constants\r\n # if you use a different correction curve\r\n lampvalue = (i**(-5))*np.exp(a+b/i)*(c+d/i+e/(i**2)+f/(i**3)+g/(i**4)+h/(i**5))\r\n lampradiance.append(lampvalue)\r\n \r\n # Constructs a list with correction factors for every wavelength from the calculated radiances vs the measured radicances\r\n correctionfactors = []\r\n for i in range (0,len(samplewavelength)):\r\n correctionfactor = lampradiance[i]/meas_lamp_radiance[i]\r\n correctionfactors.append(correctionfactor)\r\n \r\n return correctionfactors", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def do_calibration(x,y,z,temperature,cp, optimal_t=25):\n # if temperature is used for calibration:\n if temperature is not None:\n # create an array of T - optimal_T (temperature minus the optimal temperature) i.e. the deviation in T from the optimum\n temp_dev = np.empty(len(temperature.data))\n for i in range(len(temperature.data)):\n temp_dev[i] = temperature.data[i] - optimal_t\n\n x.data = cp[\"x_offset\"] + (temp_dev * cp[\"x_temp_offset\"]) + (x.data * cp[\"x_scale\"])\n y.data = cp[\"y_offset\"] + (temp_dev * cp[\"y_temp_offset\"]) + (y.data * cp[\"y_scale\"])\n z.data = cp[\"z_offset\"] + (temp_dev * cp[\"z_temp_offset\"]) + (z.data * cp[\"z_scale\"])\n\n x.temp_offset = cp[\"x_temp_offset\"]\n y.temp_offset = cp[\"y_temp_offset\"]\n z.temp_offset = cp[\"z_temp_offset\"]\n\n # if temperature is not used for calibration:\n else:\n x.data = cp[\"x_offset\"] + (x.data * cp[\"x_scale\"])\n y.data = cp[\"y_offset\"] + (y.data * cp[\"y_scale\"])\n z.data = cp[\"z_offset\"] + (z.data * cp[\"z_scale\"])\n\n x.offset = cp[\"x_offset\"]\n x.scale = cp[\"x_scale\"]\n x.calibrated = True\n\n y.offset = cp[\"y_offset\"]\n y.scale = cp[\"y_scale\"]\n y.calibrated = True\n\n z.offset = cp[\"z_offset\"]\n z.scale = cp[\"z_scale\"]\n z.calibrated = True", "def crater_factor(current_weather):\n if current_weather == 'SUNNY':\n factor = 0.9\n elif current_weather == 'RAINY':\n factor = 1.2\n else:\n factor = 1\n return factor", "def temperature(altitude):\n if altitude <= 36152:\n t = 59-0.00356*altitude # deg F\n else:\n t = -70 # deg F\n t = t + 459.7 # R\n return t", "def get_capacitive_rain_sensor_temp(\n self, rain_sensor_temp: Optional[int] = None\n ) -> float:\n # TODO: these values were hardcoded but now are taken from the CW.\n # Check which way is the \"true\" way based on the sensor type (capacitive vs Hydredon)\n # rain_pull_up_resistance = 1\n # rain_res_at_25 = 1\n # rain_beta = 3450\n absolute_zero = 273.15\n\n if rain_sensor_temp is None:\n rain_sensor_temp = self.raw_rain_sensor_temp\n\n if rain_sensor_temp < 1:\n rain_sensor_temp = 1\n elif rain_sensor_temp > 1022:\n rain_sensor_temp = 1022\n\n r = self.rain_pull_up_resistance / ((1023 / rain_sensor_temp) - 1)\n r = math.log(r / self.rain_res_at_25)\n\n return 1 / (r / self.rain_beta + 1 / (absolute_zero + 25)) - absolute_zero", "def __correctTemperature(self):\n # Correct Temp\n self.write_byte_data(self.address, 0x0E, 0xA5)\n self.write_byte_data(self.address, 0x0F, 0x96)\n self.write_byte_data(self.address, 0x62, 0x02)\n self.write_byte_data(self.address, 0x0E, 0x00)\n self.write_byte_data(self.address, 0x0F, 0x00)", "def ambient_coefficient(self):\n return self._ambient_coefficient", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def raw_to_calibrated_temp(self, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n deg_C = ((t_fine * 5 + 128) >> 8)/100.\n return deg_C", "def _calculate_correction(self, telid):", "def interpolate_temperature(temperature):\n return min(1.0, max(0.0, (10 + temperature) / 45))", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def raw_to_calibrated_humidity(self, rawhumidity, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n\n adc_H = np.array(rawhumidity, dtype='int32')\n dig_H1 = self.calib_vals['dig_H1'].astype('int32')\n dig_H2 = self.calib_vals['dig_H2'].astype('int32')\n dig_H3 = self.calib_vals['dig_H3'].astype('int32')\n dig_H4 = self.calib_vals['dig_H4'].astype('int32')\n dig_H5 = self.calib_vals['dig_H5'].astype('int32')\n dig_H6 = self.calib_vals['dig_H6'].astype('int32')\n\n var = t_fine - 76800\n var = ((((adc_H << 14) - (dig_H4 << 20) - (dig_H5 * var)) + 16384) >> 15) * (((((((var * dig_H6) >> 10) * (((var *(dig_H3) >> 11) + 32768)) >> 10) + 2097152) * (dig_H2) + 8192) >> 14))\n var -= (((((var >> 15) * (var >> 15)) >> 7) * dig_H1) >> 4)\n var.ravel()[var.ravel()<0] = 0\n var.ravel()[var.ravel()>419430400] = 419430400\n return (var>>12)/1024.", "def test_temperature(self):\r\n self.assertEqual(Converter.TemperatureCtoF(50), 122)\r\n self.assertEqual(Converter.TemperatureCtoF(-50), -58)\r\n self.assertEqual(Converter.TemperatureFtoC(50), 10)\r\n self.assertAlmostEqual(Converter.TemperatureFtoC(-50), -45.55, places=0)", "def test_cape_cin_value_error():\n pressure = np.array([1012.0, 1009.0, 1002.0, 1000.0, 925.0, 896.0, 855.0, 850.0, 849.0,\n 830.0, 775.0, 769.0, 758.0, 747.0, 741.0, 731.0, 712.0, 700.0, 691.0,\n 671.0, 636.0, 620.0, 610.0, 601.0, 594.0, 587.0, 583.0, 580.0, 571.0,\n 569.0, 554.0, 530.0, 514.0, 506.0, 502.0, 500.0, 492.0, 484.0, 475.0,\n 456.0, 449.0, 442.0, 433.0, 427.0, 400.0, 395.0, 390.0, 351.0, 300.0,\n 298.0, 294.0, 274.0, 250.0]) * units.hPa\n temperature = np.array([27.8, 25.8, 24.2, 24, 18.8, 16, 13, 12.6, 12.6, 11.6, 9.2, 8.6,\n 8.4, 9.2, 10, 9.4, 7.4, 6.2, 5.2, 3.2, -0.3, -2.3, -3.3, -4.5,\n -5.5, -6.1, -6.1, -6.1, -6.3, -6.3, -7.7, -9.5, -9.9, -10.3,\n -10.9, -11.1, -11.9, -12.7, -13.7, -16.1, -16.9, -17.9, -19.1,\n -19.9, -23.9, -24.7, -25.3, -29.5, -39.3, -39.7, -40.5, -44.3,\n -49.3]) * units.degC\n dewpoint = np.array([19.8, 16.8, 16.2, 16, 13.8, 12.8, 10.1, 9.7, 9.7,\n 8.6, 4.2, 3.9, 0.4, -5.8, -32, -34.6, -35.6, -34.8,\n -32.8, -10.8, -9.3, -10.3, -9.3, -10.5, -10.5, -10, -16.1,\n -19.1, -23.3, -18.3, -17.7, -20.5, -27.9, -32.3, -33.9, -34.1,\n -35.9, -26.7, -37.7, -43.1, -33.9, -40.9, -46.1, -34.9, -33.9,\n -33.7, -33.3, -42.5, -50.3, -49.7, -49.5, -58.3, -61.3]) * units.degC\n cape, cin = surface_based_cape_cin(pressure, temperature, dewpoint)\n expected_cape, expected_cin = 2098.688061 * units('joules/kg'), 0.0 * units('joules/kg')\n assert_almost_equal(cape, expected_cape, 3)\n assert_almost_equal(cin, expected_cin, 3)", "def set_temp_compensation(self, temp: int = 20) -> str:\n response = 'ERROR'\n if type(temp) == float or int:\n response = self.query(f'T,{temp}')\n if temp < 10 or temp > 40:\n response = response + f'\\nNOTE: Unusual ocean temperature set: {temp} C.'\n else:\n print('Temp compensation factor should be a decimal/integer!')\n return response", "def farenheit(ctemp):\n return round(9.0/5.0 * ctemp + 32)", "def test_fdr_correction(self):\r\n pvals = array([.1, .7, .5, .3, .9])\r\n exp = array([.5, .7 * 5 / 4., .5 * 5 / 3., .3 * 5 / 2., .9])\r\n obs = fdr_correction(pvals)\r\n self.assertFloatEqual(obs, exp)", "def set_temp_compensation(self, temp: int = 25) -> str:\n response = 'ERROR'\n if type(temp) == float or int:\n response = self.query(f'T,{temp}')\n if temp < 10 or temp > 40:\n response = response + f'\\nNOTE: Unusual ocean temperature set: {temp} C.'\n else:\n print('Temp compensation factor should be a decimal/integer!')\n return response", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def calc_q_gain(Tfl, Tabs, q_rad_Whperm2, DT, Tin, Tout, aperture_area_m2, c1, c2, Mfl, delts, Cp_waterglycol, C_eff, Te):\n\n xgain = 1\n xgainmax = 100\n exit = False\n while exit == False:\n qgain_Whperm2 = q_rad_Whperm2 - c1 * (DT[1]) - c2 * abs(DT[1]) * DT[1] # heat production from solar collector, eq.(5)\n\n if Mfl > 0:\n Tout = ((Mfl * Cp_waterglycol * Tin) / aperture_area_m2 - (C_eff * Tin) / (2 * delts) + qgain_Whperm2 + (\n C_eff * Tfl[1]) / delts) / (Mfl * Cp_waterglycol / aperture_area_m2 + C_eff / (2 * delts)) # eq.(6)\n Tfl[2] = (Tin + Tout) / 2\n DT[2] = Tfl[2] - Te\n qdiff = Mfl / aperture_area_m2 * Cp_waterglycol * 2 * (DT[2] - DT[1])\n else:\n Tout = Tfl[1] + (qgain_Whperm2 * delts) / C_eff # eq.(8)\n Tfl[2] = Tout\n DT[2] = Tfl[2] - Te\n qdiff = 5 * (DT[2] - DT[1])\n\n if abs(qdiff < 0.1):\n DT[1] = DT[2]\n exit = True\n else:\n if xgain > 40:\n DT[1] = (DT[1] + DT[2]) / 2\n if xgain == xgainmax:\n exit = True\n else:\n DT[1] = DT[2]\n xgain += 1\n\n # FIXME: redundant...\n # qout = Mfl * Cp_waterglycol * (Tout - Tin) / aperture_area\n # qmtherm = (Tfl[2] - Tfl[1]) * C_eff / delts\n # qbal = qgain - qout - qmtherm\n # if abs(qbal) > 1:\n # qbal = qbal\n return qgain_Whperm2", "def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9", "def VarianceOfAbsAcceleration(self):\n H = []\n for i in range(len(self.omega_range)):\n \"\"\"Calculation of the Transmission matrix H\"\"\"\n H.append(np.linalg.inv((-self.omega_range[i] ** 2 * self.M\n - 1j * self.omega_range[i] * self.C\n + self.K)))\n \"\"\"squared absolute of the transmission matrix H multiplied with the diagonal of the mass matrix M (M*I)\"\"\"\n FRFacc = [H[wincr].dot(np.diagonal(self.M)) * self.omega_range[wincr] ** 2 for wincr in\n range(len(self.spectrum))]\n Habs2 = [(np.abs(np.ones(len(vector), dtype=float) - vector) ** 2) for vector in FRFacc]\n PSDexc = self.spectrum\n \"\"\"Response of all DOFs as PSD\"\"\"\n RespPSD = [Habs2[wincr] * PSDexc[wincr] for wincr in range(len(self.spectrum))]\n AccPSD = [abs(RespPSD[wincr] + 0*PSDexc[wincr]) for wincr in range(len(self.spectrum))]\n \"\"\"The variance of the response can be obtained with the integral of the response PSD. \n integral(PSD_response)\"\"\"\n variance = (np.trapz(AccPSD, self.omega_range, axis=0))\n return variance", "def soil_temp_factor(self, project_day):\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def get_supply_air_absolute_humidity_for_cooling(\n x_hs_out_c: np.ndarray, x_ac: np.ndarray, operation: np.ndarray) -> np.ndarray:\n\n return np.where(operation == 'c', x_hs_out_c, x_ac)", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def test_reference_conversion_factors():\n assert constants.eV == pytest.approx(1.602176565e-19)\n assert constants.eV * constants.N_A / constants.kcal == pytest.approx(23.06, 3e-5)\n assert constants.hartree * constants.N_A / constants.kcal == pytest.approx(627.5095)\n assert constants.hartree / constants.eV == pytest.approx(27.2114)\n assert constants.hartree * constants.centi / (\n constants.h * constants.c\n ) == pytest.approx(219474.63)", "def convert_f_to_c(temp_in_farenheit):\n \n temp=round((float(temp_in_farenheit)-32)*5/9,1)\n \n return (temp)", "def _calculate_correction(self, telid):\n return 1", "def get_attic_temperature(theta_sat: np.ndarray, theta_ac: np.ndarray) -> np.ndarray:\n\n # temperature difference coefficient\n h = 1.0\n\n return theta_sat * h + theta_ac * (1 - h)", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def relative_to_absolute_hum(rel_h, temp):\n A = 8.07131\n B = 1730.63\n C = 233.426\n Ph20_star = 10 ** (A - B / (C + temp))\n P = rel_h / 100. * Ph20_star\n return P.reshape(-1,1)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def ErrorCorrect(val,fEC):\n return val * fEC", "def target_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_low_c)", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def airfoilEffT(self):\n return float(Importer(Component='Evaluations',\n VariableName='Wing airfoil efficiency factor',\n Default=.95,\n Path=self.filePath).getValue)", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def get_duct_ambient_air_temperature(\n is_duct_insulated: bool, l_duct_in_r: np.ndarray, l_duct_ex_r: np.ndarray,\n theta_ac: np.ndarray, theta_attic: np.ndarray) -> np.ndarray:\n\n if is_duct_insulated:\n # If the duct insulated, the duct ambient temperatures are equals to the air conditioned temperatures.\n# return np.full((5, 8760), theta_ac)\n return np.tile(theta_ac, (5, 1))\n else:\n # If the duct NOT insulated, the duct ambient temperatures are\n # between the attic temperatures and the air conditioned temperatures.\n l_in = l_duct_in_r.reshape(1, 5).T\n l_ex = l_duct_ex_r.reshape(1, 5).T\n return (l_in * theta_ac + l_ex * theta_attic) / (l_in + l_ex)", "def test_scalar_dewpoint_from_relative_humidity():\n td = dewpoint_from_relative_humidity(10.6 * units.degC, 0.37)\n assert_almost_equal(td, 26. * units.degF, 0)", "def GetAdjFac(local_sensor_type, cal_sensor_type):\n if local_sensor_type == cal_sensor_type: #no need to adjust\n return 1\n elif local_sensor_type == 'ARS' and cal_sensor_type != 'ARS': #local sensor P is mm, but the model was calibrated based on in\n return round(1/25.4,3)\n elif local_sensor_type in ['SCAN', 'CRN'] and cal_sensor_type == 'ARS': #local senor P is inches, model calibrated in mm\n return 25.4\n else: #no need to adjust betweem 'CRN' and 'SCAN'\n return 1", "def test_getThermalExpansionFactorConservedMassByLinearExpansionPercent(self):\n hotTemp = 700.0\n dLL = self.component.material.linearExpansionFactor(\n Tc=hotTemp, T0=self._coldTemp\n )\n ref = 1.0 + dLL\n cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n self.assertAlmostEqual(cur, ref)", "def get_refrigerant_temperature_cooling(theta_ex: float, theta_surf_hex_c: float) -> (float, float, float, float):\n\n theta_ref_evp_c = np.clip(theta_surf_hex_c, -50.0, None)\n# theta_ref_cnd_c = np.clip(np.clip(theta_ex + 27.4 - 1.35 * theta_ref_evp_c, theta_ex, None), None, 65.0)\n theta_ref_cnd_c = np.clip(np.clip(theta_ex + 27.4 - 1.35 * theta_ref_evp_c, theta_ex, None), theta_ref_evp_c + 5.0, 65.0)\n theta_ref_sc_c = np.clip(0.772 * theta_ref_cnd_c - 25.6, 0.0, None)\n theta_ref_sh_c = np.clip(0.194 * theta_ref_cnd_c - 3.86, 0.0, None)\n\n return theta_ref_evp_c, theta_ref_cnd_c, theta_ref_sh_c, theta_ref_sc_c", "def friction_factor(v1: \"int\", v2: \"int\") -> \"int\":", "def target_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_high_c\"))\r\n return kelvin_to_celsius(self._target_temperature_high)", "def langevin_coefficients(\n temperature,\n dt,\n friction,\n masses):\n vscale = np.exp(-dt*friction)\n if friction == 0:\n fscale = dt\n else:\n fscale = (1-vscale)/friction\n kT = BOLTZ * temperature\n nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale\n invMasses = 1.0/masses\n sqrtInvMasses = np.sqrt(invMasses)\n\n ca = vscale\n cb = fscale*invMasses\n cc = nscale*sqrtInvMasses\n return ca, cb, cc", "def f2c_qa_function():\n F = float(input(\"Provide a Fahrenheit temperature in degrees: \"))\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def calc_relhum(dewpt,t):\n\n relhum=100.*(np.exp((const.es_Abolton*dewpt)/(const.es_Bbolton+dewpt))/np.exp((const.es_Abolton*t)/(const.es_Bbolton+t)))\n return relhum", "def __getPressureCalibrationCoefficients(self):\n src13 = self.read_byte_data(self.address, 0x13)\n src14 = self.read_byte_data(self.address, 0x14)\n src15 = self.read_byte_data(self.address, 0x15)\n src16 = self.read_byte_data(self.address, 0x16)\n src17 = self.read_byte_data(self.address, 0x17)\n src18 = self.read_byte_data(self.address, 0x18)\n src19 = self.read_byte_data(self.address, 0x19)\n src1A = self.read_byte_data(self.address, 0x1A)\n src1B = self.read_byte_data(self.address, 0x1B)\n src1C = self.read_byte_data(self.address, 0x1C)\n src1D = self.read_byte_data(self.address, 0x1D)\n src1E = self.read_byte_data(self.address, 0x1E)\n src1F = self.read_byte_data(self.address, 0x1F)\n src20 = self.read_byte_data(self.address, 0x20)\n src21 = self.read_byte_data(self.address, 0x21)\n c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)\n c00 = getTwosComplement(c00, 20)\n c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17\n c10 = getTwosComplement(c10, 20)\n c20 = (src1C << 8) | src1D\n c20 = getTwosComplement(c20, 16)\n c30 = (src20 << 8) | src21\n c30 = getTwosComplement(c30, 16)\n c01 = (src18 << 8) | src19\n c01 = getTwosComplement(c01, 16)\n c11 = (src1A << 8) | src1B\n c11 = getTwosComplement(c11, 16)\n c21 = (src1E < 8) | src1F\n c21 = getTwosComplement(c21, 16)\n return c00, c10, c20, c30, c01, c11, c21", "def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def m1_correction_factor(terrain_type, wind_speed):\n interpolation_vals = TABLE_M1[terrain_type]\n\n closest_above = 999\n closest_below = 0\n\n for k in list(interpolation_vals.keys()):\n if wind_speed <= k < closest_above:\n closest_above = k\n if wind_speed >= k > closest_below:\n closest_below = k\n\n if closest_above == 999:\n # Outside of range, return largest\n return interpolation_vals[closest_below]\n elif closest_above == closest_below:\n return interpolation_vals[closest_below]\n else:\n v1 = interpolation_vals[closest_below]\n v2 = interpolation_vals[closest_above]\n return v1 + (v2 - v1) * (wind_speed - closest_below) / (closest_above - closest_below)", "def le_calibration_func(etr, kc, ts):\n return etr * kc * (2.501 - 2.361E-3 * (ts - 273)) * 2500 / 9", "def do_correction(input_model, lin_model):\n # Create the output model as a copy of the input\n output_model = input_model.copy()\n\n # Propagate the DQ flags from the linearity ref data into the 2D science DQ\n propagate_dq_info(output_model, lin_model)\n\n # Apply the linearity correction coeffs to the science data\n apply_linearity(output_model, lin_model)\n\n return output_model", "def calc_temp_withlosses(t0, Q, m, cp, case):\n if m > 0:\n if case == \"positive\":\n t1 = t0 + Q / (m * cp)\n else:\n t1 = t0 - Q / (m * cp)\n else:\n t1 = 0\n return t1", "def life_correction(x, y, z, si_q, pmap_z, pmt_zspec):\n if len(z) != len(pmt_zspec):\n equal_vals = [np.isclose(z_p, z) for z_p in pmap_z]\n #if not np.any(equal_vals): print(pmap_z, z)\n charge_split = np.array([si_q[eq]/np.sum(si_q[eq]) * pmt_zspec[i] for i, eq in enumerate(equal_vals)]).flatten()\n charge_split = np.concatenate(charge_split)\n return np.sum(charge_split * lt_corr(z, x, y).value)\n \n return np.sum(pmt_zspec * lt_corr(z, x, y).value)", "def get_decided_outlet_supply_air_absolute_humidity_for_cooling(\n x_req_c: np.ndarray, v_supply: np.ndarray, x_hs_out_min_c: np.ndarray) -> np.ndarray:\n\n return np.maximum(np.sum(x_req_c * v_supply / v_supply.sum(axis=0), axis=0), x_hs_out_min_c)", "def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh", "def linear_heat_transfer(x, t, K_medium, rho_medium, c_medium, T_medium_initial, H_heat_transfer, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n\n h = H_heat_transfer/K_medium\n erfc_factor_1 = erfc(x/(2*np.sqrt(k*t)))\n\n #combine factors in logdomain, since the exp-factors quickly approach\n #infinity while erfc-factor goes to zero\n log_exp_factor_1 = h*x\n log_exp_factor_2 = k*t*h**2\n log_erfc_factor_2 = np.log(erfc(x/(2*np.sqrt(k*t)) + h*np.sqrt(k*t)))\n exp_erfc_factor = np.exp(log_exp_factor_1 + log_exp_factor_2 + log_erfc_factor_2)\n\n return (erfc_factor_1 - exp_erfc_factor)*(T_external_applied - T_medium_initial) + T_medium_initial" ]
[ "0.67288923", "0.67288923", "0.63658255", "0.6330217", "0.6249179", "0.61254853", "0.60804427", "0.591038", "0.5909975", "0.58495927", "0.58438057", "0.58438057", "0.57863986", "0.57804346", "0.5774944", "0.5772292", "0.5767601", "0.5738072", "0.5698512", "0.56778646", "0.56462497", "0.56458294", "0.5644073", "0.56332135", "0.56271404", "0.5626142", "0.561992", "0.56010747", "0.55888635", "0.5588817", "0.5580388", "0.55665505", "0.556037", "0.5532594", "0.5523525", "0.55168605", "0.5516542", "0.5512843", "0.5509815", "0.54921234", "0.5490533", "0.5488413", "0.54766476", "0.5471573", "0.5461478", "0.5455617", "0.5454021", "0.54488766", "0.5447273", "0.5445289", "0.544504", "0.54432935", "0.5434941", "0.5433642", "0.54159766", "0.5414458", "0.54144555", "0.5413418", "0.54090536", "0.54057574", "0.54023844", "0.54011774", "0.53858095", "0.5385312", "0.53803384", "0.5372293", "0.53645164", "0.5363674", "0.5362622", "0.53554606", "0.5350061", "0.53398615", "0.53394425", "0.53344345", "0.53318053", "0.5330462", "0.5327983", "0.53254265", "0.5324838", "0.5322221", "0.53080946", "0.52972597", "0.52963614", "0.52939063", "0.5292737", "0.5287081", "0.5285258", "0.5280172", "0.526967", "0.526646", "0.52652115", "0.5265186", "0.5264767", "0.52641714", "0.52639645", "0.525847", "0.5257304", "0.5253045", "0.52485967" ]
0.7517804
1
Returns the resistance of the sensor in kOhms // 1 if not value got in pin
def get_resistance(self): adc = ADC(self.pin) value = adc.read() if value == 0: return -1 return (4095./value - 1.) * self.RLOAD
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_distance():\n \n GPIO.output(pinTrigger, False) # pulse off\n time.sleep(0.2)\n\n GPIO.output(pinTrigger,True) # send 10us pulse\n time.sleep(10e-6)\n GPIO.output(pinTrigger,False)\n\n StartTime = time.time() # start timer\n\n while GPIO.input(pinEcho)==0: # keep timer reset\n StartTime = time.time()\n\n while GPIO.input(pinEcho) == 1:\n StopTime = time.time()\n\n if StopTime - StartTime >= 0.04:\n print(\"Too close!!!\")\n StopTime = StartTime\n break\n\n ElapsedTime = StopTime - StartTime\n\n distance = (ElapsedTime * 34326)/2\n\n print('{:2.1f} cm'.format(distance))\n #dots = int(distance/2)\n #print('.'*dots)\n\n return(distance)", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def min_humidity(self):\n return 0", "def READ_PRESSURE_SENSOR():\n return 15.246", "def input_resistance(self):\n return None", "def gpio_read_analogue(self, pin: int) -> float:\n return randint(0, 500) / 100", "def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))", "def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def moisture(self):\n if self.moisture_sensor is None:\n return None\n else:\n return self.moisture_sensor.percent", "def set_resistance(self):\n\t\tself.resistance = int(input(\"Enter the Value from (0-256)= \"))\n\t\tif self.resistance > 256 :\n\t\t\tself.resistance = int(input(\"Enter the Value from (0-256)= \"))\n\t\t\n\t\treturn self.resistance", "def get_on_resistance(self):\n is_nchannel = True\n stack = 4\n is_cell = False\n return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)", "def set_resistance(self, value):\n self.write(\":RES {}OHM\".format(value))", "def get_psi(data):\n max_sensor_psi = 100 # Also 30\n psi = (data - 0.51) * (max_sensor_psi / 4)\n psi = round(psi, 0)\n return psi", "def nitrogen_dioxide(self) -> float | None:\n return round_state(self._get_sensor_value(API_NO2))", "def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def unit_of_measurement(self):\r\n return self._sensor_cfg[1]", "def unit_of_measurement(self):\r\n return self._sensor_cfg[1]", "def state(self):\n result = self.probe.get_data(SENSOR_TYPES[self.sensor][2])\n round_to = SENSOR_TYPES[self.sensor][3].get(\"round\")\n if round_to is not None:\n result = round(result, round_to)\n return result", "def distance_sensor(unit):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Range\", Range, 2)\n\tdistance = data.range\n\t# transfer sensor data to target unit\n\tif unit == \"cm\":\n\t\tresult = distance / 10.0\n\telse:\n\t\tresult = distance\n\n\tdelete_sensor(sensor_name)\n\treturn result", "def get_analog(self,pin):\n try:\n cmd = protocol.GET_ANALOG.format(pin)\n response = self.__send_and_receive(cmd)\n values = response.split(' ')\n printf(values, type=DEBUG)\n val = values[1][1:]\n return int(float(val))\n except Exception as e:\n printf(\"Error {}\".format(e))\n return None", "def temperature() -> float:", "def task(node_dict):\n # always check that the sensor has been initialized\n if node_dict['pn'].heading == empty_value:\n # if sensor is not reading, return no motor command\n return 0\n # compute heading difference\n hdiff = heading_diff(r_target, node_dict['pn'].heading)\n # p-control\n hout = hdiff * P\n # limit output if necassary\n if abs(hout) > r_max:\n hout = copysign(r_max, hout)\n return hout", "def read_odometer(self):\n msg = f\"Car has {self.odometer_reading} miles on it.\"\n return msg", "def read_led(self, pin):\n value = 0 #Default to nowt\n if self.iface.connected:\n try:\n value = self.iface.get_PWM_dutycycle(pin)\n except (AttributeError, IOError, pigpio.error):\n logging.error(\" Cannot read PWM of pin #%s\" % (pin,))\n else:\n logging.error(\" Interface not connected. Cannot read PWM of pin #%s.\" % (pin,))\n return value", "def read_odometer(self):\r\n print(\"This car has \"+str(self.odometer_reading)+\" miles on it.\")", "def get_distance(self):\n\n # Activate trigger\n self.trigger()\n\n # Detect rising edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.RISING, timeout=2)\n if channel is None:\n # Timeout on wait of rising interrupt\n return None\n else:\n # Rising edge detected, save pulse start\n pulse_start = time.time()\n\n\n # Detect falling edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.FALLING, timeout=2)\n if channel is None:\n # Timeout on wait of falling interrupt\")\n return None\n else:\n # Falling edge detected, save pulse end\n pulse_end = time.time()\n\n # Calculated pulse width in microseconds (x1mln)\n pulse_width = (pulse_end - pulse_start)*1000000\n\n # Return distance in cm\n return pulse_width / 58", "def state(self):\n value = getattr(self.coordinator.senec, self._sensor)\n try:\n rounded_value = round(float(value), 2)\n return rounded_value\n except ValueError:\n return value", "def read_core_vbat(self) -> float:", "def get_speed(self) -> float: \r\n if self.distance < self.distance_stop:\r\n print(\"STOP: Obstacle detected ({} cm)\".format(self.distance))\r\n return 0\r\n elif self.distance < self.distance_slow: \r\n return self.speed * 0.8\r\n else:\r\n return self.speed", "def locked_temp_min_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_min_c\"))\r\n return kelvin_to_celsius(self._locked_temp_min)", "def sensed(self):\n return not self.pin.read() if self.pinishigh else self.pin.read()", "def read_temp(self, ctrl_pin):\n bytes_ = self.read_bytes(ctrl_pin)\n int_ = struct.unpack('>H', bytes_)[0]\n if int_ & 0x04 > 1:\n temp_celsius = -1\n else:\n temp_celsius = (int_ >> 3) * 0.25\n return temp_celsius", "async def get_distance() -> int:\n\n _initialize_sensor()\n pulse_start, pulse_end = await _get_echo_time(False), await _get_echo_time(True)\n signal_delay = pulse_end - pulse_start\n distance = _compute_distance(signal_delay)\n return int(distance)", "def sulphur_dioxide(self) -> float | None:\n return round_state(self._get_sensor_value(API_SO2))", "def level(self):\n return self.__pin.pwm", "def hp(self):\n if self.positive:\n return self.degree + (self.minute / 100) + (self.second / 10000)\n else:\n return -(self.degree + (self.minute / 100) + (self.second / 10000))", "def get_in_RSSI(self) -> int:\n if self.in_RSSI != None:\n return self.in_RSSI\n return 0", "def humidity(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"humidity\"))\r\n return round(self._humidity * 100)", "def distmeter_err(self):\n from astropy import units\n return self.distmpc_err * units.Mpc.in_units(\"m\")", "def read_sensor(self, maze):\n return maze.distance_to_nearest_beacon(*self.xy)", "def read_odometer(self):\n print(\"This car has \" + str(self.odometer_reading) + \" miles on it.\")", "def read_odometer(self):\n print(\"This car has \" + str(self.odometer_reading) + \" miles on it.\")", "def read_odometer(self):\n print(\"This car has \" + str(self.odometer_reading) + \" miles on it.\")", "def read_odometer(self):\n print(\"This car has \" + str(self.odometer_reading) + \" miles on it.\")", "def read_odometer(self):\n print(\"This car has \" + str(self.odometer_reading) + \" miles on it.\")", "def read_odometer(self):\n print(\"This car has \" + str(self.odometer_reading) + \" miles on it.\")", "async def analog_read(self, pin):\n return self.analog_pins[pin].current_value", "def temperature():\n snmp.temperature()\n return 0", "def posture_sensor(axis):\n\n\treturn 0.0", "def get_value(self):\n return self.sensor.get_value()", "def read_odometer(self):\n print(f\"This car has {self.odometer} miles on it.\")", "def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance", "def get_out_RSSI(self) -> int:\n if self.out_RSSI != None:\n return self.out_RSSI\n return 0", "def omtrek(self):\n x = pi*self.r**2\n return x", "def kilometres_available(self):\n return self.fuel / self.litres_per_kilometre", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def get_distance(self, sensor):\n if sensor not in self.distance_sensors:\n raise ValueError('sensor should be one of {}!'.format(self.distance_sensors))\n\n return 255 - self._io.last_state['distance'][sensor]", "def sensorStrength(self):\n # TODO: also return type of sensor\n radar = self._getAttribute(Attribute.scanRadarStrength)\n ladar = self._getAttribute(Attribute.scanLadarStrength)\n magnetometric = self._getAttribute(Attribute.scanMagnetometricStrength)\n gravimetric = self._getAttribute(Attribute.scanGravimetricStrength)\n\n return radar or ladar or magnetometric or gravimetric", "def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)", "def molar_mass_dry_air():\n return 28.9647", "def get_power(self):\n #GPIO.setmode(GPIO.BOARD)\n #GPIO.setup(self.input_pin, GPIO.IN)\n return 0", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def true_value(self):\n return self.driver_station.getStickAxis(self.joystick_port, self.axis_number) * self.multiplier", "def resistance(stock):\n output= stock_max(stock)-(stock_max(stock)*.05)\n return output", "def raw_sensor_temp(self):\n\n # return the value in millicelsius\n return float(self.raw_sensor_strings[1].split(\"=\")[1])", "def GetKelvin(self):\n return self.GetCelcius() + 273.15", "def solar_meter(self):\n return self._solar_meter", "def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def read_odometer(self):\n print(f\"This car has {self.odometer_reading} miles on it.\")", "def read_odometer(self):\n print(f\"This car has {self.odometer_reading} miles on it.\")", "def temp(self):\n\t\ttemp_out = self.read16(MPU9250_ADDRESS, TEMP_DATA)\n\t\ttemp = temp_out / 333.87 + 21.0 # these are from the datasheets\n\t\treturn temp", "def determine_intensity_single_channel(pi, pin_light, i2c_multiplexer_handle, i2c_sensor_handle, channel_number):\n pi.write(pin_light, 1)\n i2c_multiplexer_select_channel(pi,\n i2c_multiplexer_handle, channel_number)\n intensity = i2c_sensor_handle.ch0_light\n timepoint = time.time()\n time.sleep(0.25)\n pi.write(pin_light, 0)\n return timepoint, intensity", "def calibrate_meter(self):\r\n print(\"meter calibrated\")", "def ventilation_rate_per_second(self):\n return self.volume * self.outdoor_air_ventilation * 1000 / 3600", "def GetCelcius(self):\n ADCMax = (self.ADDevice.ADSamples * 1023) /(2**self.ADDevice.ADBitshift)\n sample=self.Get()\n R = self.RefVoltage / ADCMax\n Volt = sample*R-.5 \n return Volt/self.VoltPerDegree", "def get_wind_sensor(self) -> int:\n self.serial.write(b\"V!\")\n wind_sensor = self.__extract_int(self.__read_response(1)[0], b\"!w\")\n\n return wind_sensor", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def calc_anisotropy(self):\n ## no Anisotropic values, we have a spherical isotropic atom\n if self.U is None:\n return 1.0\n\n evals = linalg.eigenvalues(self.U)\n ansotropy = min(evals) / max(evals)\n return ansotropy", "def max_humidity(self):\n return 60", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def circumference(self, lat):\n return 2 * np.pi * self.rsphere * np.cos(np.deg2rad(lat))", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def target_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_high_c\"))\r\n return kelvin_to_celsius(self._target_temperature_high)", "def get_single_resistance_metering(self, res_type='float'):\n self.amplifier.get_discrete_acquisition()\n return self.get_resistance(res_type=res_type)", "def unit_of_measurement(self):\n return self.sensor_type[\"unit\"]", "def read_line(self):\n self.read_calibrated()\n\n avg = 0\n summ = 0\n online = False\n\n for i in range(0, self.NUM_SENSORS):\n val = self.sensorValues[i]\n if val > 500: online = True\n if val > 50:\n multiplier = i * 1000\n avg += val * multiplier\n summ += val\n\n if online == False:\n if self.lastValue < (self.NUM_SENSORS-1)*1000/2:\n return 0\n else:\n return (self.NUM_SENSORS-1)*1000\n\n self.lastValue = avg/summ\n return self.lastValue", "def get_servo_pct(pi, pin):\n return pulsewidth2pct(pi.get_servo_pulsewidth(pin))", "async def rain_rate(self, value):\n if not value:\n return 0\n return await self.rain(value * 60)", "def unit_of_measurement(self):\n return SENSOR_TYPES[self.sensor][1]", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def read_odometer(self):\n print(f\"This car has {self.odometer_reading} miles on it\")", "def test_str_kelvin_per_percent(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"kelvin_per_percent\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xFA,\n 0xBD,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -441384.96)\n self.assertEqual(sensor.unit_of_measurement(), \"K/%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def duty(self):\n if self._chanRMT < 0:\n return self._pin.duty()\n else:\n return self._dutyRMT", "def distmeter(self):\n return self._distance.to(\"m\").value", "def get_radius(self):\r\n return 1" ]
[ "0.7418249", "0.64756715", "0.63563746", "0.62588537", "0.6185685", "0.6180163", "0.61774236", "0.5992633", "0.59730357", "0.59586394", "0.5931926", "0.5876045", "0.58549595", "0.58542717", "0.5832019", "0.5829989", "0.58182263", "0.5806874", "0.5794576", "0.5794576", "0.576387", "0.5760282", "0.57326984", "0.5732564", "0.56976", "0.5657945", "0.561256", "0.56043583", "0.5603248", "0.55982035", "0.55925554", "0.5590518", "0.5580798", "0.55780387", "0.5565286", "0.5564631", "0.556097", "0.55606467", "0.5556879", "0.555253", "0.55509716", "0.5550691", "0.5540752", "0.55316013", "0.55316013", "0.55316013", "0.55316013", "0.55316013", "0.55316013", "0.55253494", "0.55185956", "0.5517135", "0.55160046", "0.5499441", "0.5497427", "0.54933643", "0.54922986", "0.5486906", "0.5484928", "0.54840636", "0.54826313", "0.5480086", "0.5477478", "0.5470184", "0.54683954", "0.5465385", "0.5461708", "0.5459199", "0.5446878", "0.5441304", "0.543684", "0.54356563", "0.5434271", "0.5434271", "0.5434029", "0.5433104", "0.54305613", "0.5421051", "0.5416801", "0.5412443", "0.54103154", "0.54086673", "0.5407677", "0.540331", "0.540331", "0.5400326", "0.5398548", "0.5389096", "0.5388524", "0.53809756", "0.5377479", "0.5374459", "0.53713804", "0.5365796", "0.5364831", "0.536206", "0.5356215", "0.53515506", "0.53514624", "0.5348078" ]
0.71054703
1
Gets the resistance of the sensor corrected for temperature/humidity
def get_corrected_resistance(self, temperature, humidity): return self.get_resistance()/ self.get_correction_factor(temperature, humidity)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)", "def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))", "def get_rel_humidity(\n self, sensitivity: Optional[str] = None, rhel_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or rhel_sensor is None:\n sensitivity, rhel_sensor = self.get_rel_humidity_sensor()\n if sensitivity == \"hh\":\n rh = rhel_sensor * 125 / 65536 - 6\n elif sensitivity == \"h\":\n rh = rhel_sensor * 125 / 100 - 6\n else:\n raise CloudWatcherException(f\"Unknown rhel sensor type {sensitivity}\")\n return rh", "def temperature() -> float:", "def _calculate_heater_resistance(self, target_temp):\n if target_temp > 400: #Maximum temperature\n target_temp = 400\n\n var1 = (calGH1 / 16.0) + 49.0\n var2 = ((calGH2 / 32768.0) * 0.0005) + 0.00235\n var3 = calGH3 / 1024.0\n var4 = var1 * (1.0 + (var2 * target_temp))\n var5 = var4 + (var3 * self.calAmbTemp)\n res_heat = 3.4 * ((var5 * (4 / (4 + calResHeatRange)) * (1 / (1 + (calResHeatVal * 0.002)))) - 25)\n\n return int(res_heat)", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def humidity(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"humidity\"))\r\n return round(self._humidity * 100)", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625", "def relative_humidity(self):\n humidity_string = self._current_observation['relative_humidity']\n return float(humidity_string.strip('%'))", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def humidity(self, update_temperature=True):\n if (self.t_fine is None) or update_temperature:\n self.temperature()\n\n adc_H = float(self.raw_humidity())\n var_H = self.t_fine - 76800.0\n var_H = (\n (adc_H - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * var_H)) *\n (self.dig_H2 / 65536.0 * (\n 1.0 + self.dig_H6 / 67108864.0 * var_H *\n (1.0 + self.dig_H3 / 67108864.0 * var_H)))\n )\n var_H = var_H * (1.0 - self.dig_H1 * var_H / 524288.0)\n\n if (var_H > 100.0):\n var_H = 100.0\n elif (var_H < 0.0):\n var_H = 0.0\n\n return round(var_H, 3)", "def humidity(self):\r\n self._read_temperature()\r\n hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)\r\n #print(\"Humidity data: \", hum)\r\n adc = float(hum[0] << 8 | hum[1])\r\n #print(\"adc:\", adc)\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n var1 = float(self._t_fine) - 76800.0\r\n #print(\"var1 \", var1)\r\n var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)\r\n #print(\"var2 \",var2)\r\n var3 = adc - var2\r\n #print(\"var3 \",var3)\r\n var4 = self._humidity_calib[1] / 65536.0\r\n #print(\"var4 \",var4)\r\n var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)\r\n #print(\"var5 \",var5)\r\n var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5\r\n #print(\"var6 \",var6)\r\n var6 = var3 * var4 * (var5 * var6)\r\n humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)\r\n\r\n if humidity > _BME280_HUMIDITY_MAX:\r\n return _BME280_HUMIDITY_MAX\r\n if humidity < _BME280_HUMIDITY_MIN:\r\n return _BME280_HUMIDITY_MIN\r\n # else...\r\n return humidity", "def get_rel_humidity_sensor(self) -> Tuple[str, int]:\n self.serial.write(b\"h!\")\n rhel_sensor = self.__read_response(1)[0]\n if rhel_sensor[0:3] == b\"!hh\":\n rhel_sensor = self.__extract_int(rhel_sensor, b\"!hh\")\n # if we get 65536, the sensor is not connected\n if rhel_sensor == 65535:\n raise CloudWatcherException(\n \"High precision RHEL/temp sensor not connected\"\n )\n return \"hh\", rhel_sensor\n else:\n rhel_sensor = self.__extract_int(rhel_sensor, b\"!h\")\n # if we get 100, the sensor is not connected\n if rhel_sensor == 100:\n raise CloudWatcherException(\n \"Low precision RHEL/temp sensor not connected\"\n )\n return \"h\", rhel_sensor", "def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def READ_PRESSURE_SENSOR():\n return 15.246", "def humidity(self):\n names = ['anc_air_relative_humidity']\n return self.sensor.get_with_fallback('humidity', names)", "def get_temp(self):\n\t\traw_temp = self.read_i2c_word(self.TEMP_OUT0)\n\n\t\t# Get the actual temperature using the formule given in the\n\t\t# MPU-6050 Register Map and Descriptions revision 4.2, page 30\n\t\tactual_temp = (raw_temp / 340.0) + 36.53\n\n\t\treturn actual_temp", "def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def getHumidity(self):\n return self.humidity", "def humidity(self):\n return self._humidity", "def read_humidity(self):\n self._force_read(False)\n\n humADC = (self._read_register_1ubyte(self.BME680_HUM_MSB) << 8) | (self._read_register_1ubyte(self.BME680_HUM_LSB))\n\n return float(self._compensate_humidity(humADC))", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def humidity_sensor():\n\n\tsensor_name = \"humiture\"\n\treg_addr = 26\n\tdata_len = 4\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t# get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Humidity', RelativeHumidity, 2)\n\thumidity = data.relative_humidity\n\n\tdelete_sensor(sensor_name)\n\treturn humidity", "def get_single_resistance_metering(self, res_type='float'):\n self.amplifier.get_discrete_acquisition()\n return self.get_resistance(res_type=res_type)", "def raw_sensor_temp(self):\n\n # return the value in millicelsius\n return float(self.raw_sensor_strings[1].split(\"=\")[1])", "def get_humidity(self):\n return self._sense_hat.get_humidity()", "def get_wind_sensor(self) -> int:\n self.serial.write(b\"V!\")\n wind_sensor = self.__extract_int(self.__read_response(1)[0], b\"!w\")\n\n return wind_sensor", "def raw_to_calibrated_humidity(self, rawhumidity, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n\n adc_H = np.array(rawhumidity, dtype='int32')\n dig_H1 = self.calib_vals['dig_H1'].astype('int32')\n dig_H2 = self.calib_vals['dig_H2'].astype('int32')\n dig_H3 = self.calib_vals['dig_H3'].astype('int32')\n dig_H4 = self.calib_vals['dig_H4'].astype('int32')\n dig_H5 = self.calib_vals['dig_H5'].astype('int32')\n dig_H6 = self.calib_vals['dig_H6'].astype('int32')\n\n var = t_fine - 76800\n var = ((((adc_H << 14) - (dig_H4 << 20) - (dig_H5 * var)) + 16384) >> 15) * (((((((var * dig_H6) >> 10) * (((var *(dig_H3) >> 11) + 32768)) >> 10) + 2097152) * (dig_H2) + 8192) >> 14))\n var -= (((((var >> 15) * (var >> 15)) >> 7) * dig_H1) >> 4)\n var.ravel()[var.ravel()<0] = 0\n var.ravel()[var.ravel()>419430400] = 419430400\n return (var>>12)/1024.", "def get_temperature_sensor(self) -> Tuple[str, int]:\n self.serial.write(b\"t!\")\n temp_sensor = self.__read_response(1)[0]\n if temp_sensor[0:3] == b\"!th\":\n temp_sensor = self.__extract_int(temp_sensor, b\"!th\")\n # if we get 65536, the sensor is not connected\n if temp_sensor == 65535:\n raise CloudWatcherException(\n \"High precision RHEL/temp sensor not connected\"\n )\n return \"th\", temp_sensor\n else:\n temp_sensor = self.__extract_int(temp_sensor, b\"!t\")\n # if we get 100, the sensor is not connected\n if temp_sensor == 100:\n raise CloudWatcherException(\n \"Low precision RHEL/temp sensor not connected\"\n )\n return \"t\", temp_sensor", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def read_temp(self):\n return 19.0\n data = self.read(_TEMP_REG, 2)\n temp = ((data[0] * 256) + data[1]) / 16\n if temp > 2047:\n temp -= 4096\n return temp * 0.0625", "def get_temperature(self):\n \n # Get temp readings from both sensors\n humidity_temp = self._sense_hat.get_temperature_from_humidity()\n pressure_temp = self._sense_hat.get_temperature_from_pressure()\n \n # avg_temp becomes the average of the temperatures from both sensors\n # We need to check for pressure_temp value is not 0, to not ruin avg_temp calculation\n avg_temp = (humidity_temp + pressure_temp) / 2 if pressure_temp else humidity_temp\n \n # Get the CPU temperature\n cpu_temp = self._get_cpu_temp()\n \n # Calculate temperature compensating for CPU heating\n adj_temp = avg_temp - (cpu_temp - avg_temp) / 1.5\n \n # Average out value across the last three readings\n return self._get_smooth(adj_temp)", "def min_humidity(self):\n return 0", "def humidity(self):\r\n try:\r\n return str(self.connect()['main']['humidity'])\r\n except:\r\n return '@weather_humidity'", "def read_temperature(self):\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n\n return float(self._compensate_temperature(tempADC))", "def get_temperature(self, sensor: int = 0) -> float:\n\n return self.send(self.cmd.GET_HEATING_ACT)", "def get_chip_temperature(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_GET_CHIP_TEMPERATURE, (), '', 10, 'h')", "def hp34401a_read_voltage_rng_res(hp_meter , v_range, v_resolution):\n hp_meter.write(\"MEAS:VOLT:DC? \" + str(v_range) + \" , \" + str(v_resolution))\n return float(hp_meter.read())", "def temperature():\n snmp.temperature()\n return 0", "def get_relative_humidity(theta_ex: np.ndarray, x_ex: np.ndarray) -> np.ndarray:\n\n return read_conditions.get_relative_humidity(theta_ex, x_ex)", "def temp(self):\n\t\ttemp_out = self.read16(MPU9250_ADDRESS, TEMP_DATA)\n\t\ttemp = temp_out / 333.87 + 21.0 # these are from the datasheets\n\t\treturn temp", "def read_temperature(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 2)\n return lsm9ds1.to_int16(data)", "def calculate_dew_point(temp, hum):\n return temp - (100 - hum) / 5", "def resistance(stock):\n output= stock_max(stock)-(stock_max(stock)*.05)\n return output", "def get_temperature(\n self, sensitivity: Optional[str] = None, temp_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or temp_sensor is None:\n sensitivity, temp_sensor = self.get_temperature_sensor()\n if sensitivity == \"th\":\n temp = temp_sensor * 175.72 / 65536 - 46.85\n elif sensitivity == \"t\":\n temp = temp_sensor * 1.7572 - 46.85\n else:\n raise CloudWatcherException(\n f\"Unknown temperature sensor type {sensitivity}\"\n )\n\n return temp", "def target_temperature(self) -> int:\r\n # TODO: Find a better way to do this. This is ugly.\r\n if self._hvac_mode == \"cool\":\r\n return self.target_temperature_low\r\n elif self._hvac_mode == \"heat\":\r\n return self.target_temperature_high\r\n elif self._hvac_mode == \"heat-cool\":\r\n # TODO: Fix this so that heat or cool is chosen.\r\n if self._ambient_temperature >= self._target_temperature:\r\n return self.target_temperature_low\r\n elif self._ambient_temperature <= self._target_temperature:\r\n return self.target_temperature_high\r\n elif self._hvac_mode == \"eco\":\r\n if self._ambient_temperature >= self._target_temperature:\r\n return self.eco_temperature_low\r\n elif self._ambient_temperature <= self._target_temperature:\r\n return self.eco_temperature_high\r\n elif self._hvac_mode == \"off\":\r\n return self.ambient_temperature\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))", "def get_temperature(self):\n pass", "def temperature(altitude):\n if altitude <= 36152:\n t = 59-0.00356*altitude # deg F\n else:\n t = -70 # deg F\n t = t + 459.7 # R\n return t", "def read_humidity(self):\n hRaw = self._read_multiple_bytes_as_array(self.BME280_HUM_MSB, 2)\n\n return float(self._compensate_humidity((hRaw[0] << 8) + hRaw[1]))", "def absolute_humidity(temp, rh):\r\n e_s = temp_to_saturated_vapor_pressure(temp)\r\n e = e_s * rh / 100.\r\n rho = mbar_to_pa(e) / (R_s * degc_to_kelvin(temp))\r\n return kg_to_g(rho)", "def sensorStrength(self):\n # TODO: also return type of sensor\n radar = self._getAttribute(Attribute.scanRadarStrength)\n ladar = self._getAttribute(Attribute.scanLadarStrength)\n magnetometric = self._getAttribute(Attribute.scanMagnetometricStrength)\n gravimetric = self._getAttribute(Attribute.scanGravimetricStrength)\n\n return radar or ladar or magnetometric or gravimetric", "def dewpointf(tempf, humidity):\n return round(tempf - ((100-humidity) / 2.778), 2)", "def raw_humidity(self):\n data = self._bus.read_i2c_block_data(self.addr, self.HUM, 2)\n return (data[0] << 8) + data[1]", "def target_humidity(self):\n return self._client.hum_setpoint", "def read(self):\n # One method of getting a resource is calling get_resource from the client instance. get_resource\n # takes the lwm2m uri string as a parameter. The uri is the object id, then the instance id, then\n # the resource id.\n max_resource = lwm2m_client.get_resource(\"3323/1/5602\")\n # Resources can also be accessed using the index operator from the client instance.\n min_resource = lwm2m_client[3323][1][5601]\n \n pressure = self.pressure_sensor.read_psi()\n \n max_resource.value = max(max_resource.value, pressure)\n min_resource.value = min(min_resource.value, pressure)\n logger.debug(\"PressureValue read called: pressure = {}, max = {}, min = {}\".format(pressure, max_resource.value, min_resource.value))\n return pressure", "def getTemperature(self):\n return self.temperature", "def relative_humidity_from_dewpoint(dew, t2m):\n e_dew = water_vapour(dew)\n e_t2m = water_vapour(t2m)\n return e_dew / e_t2m", "def _calculate_gas_resistance(self, gasResADC, gasRange):\n var1 = (1340.0 + 5.0 * calRangeSwErr) * self.const_array1[gasRange]\n gasres = var1 * self.const_array2[gasRange] / (gasResADC - 512.0 + var1)\n\n return gasres", "def fRwTemperatureCorrected(Rw_Temp1, Temp1, Temp2):\n\treturn Rw_Temp1 * ((Temp1 + 21.5) / (Temp2 + 21.5))", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def get_sensors_data(self):\n\n temp_in_celsius = self.get_temperature()\n\n return (\n round(temp_in_celsius, 1), \n round(self.to_fahrenheit(temp_in_celsius), 1), \n round(self.get_humidity(), 0), \n round(self.get_pressure(), 1)\n )", "def get_resistance(self, res_type='float'):\n if res_type == 'text':\n # Output to be a string\n # Transform value (in Ohm) as a int string\n val = str(int(self.amplifier.res))\n\n # Compute displayable unit of the value\n unit = (len(val) - 1) // 3\n length = len(val) - unit * 3\n if unit <= 0:\n unit = ' Ohm'\n elif unit == 1:\n unit = ' kOhm'\n elif unit == 2:\n unit = ' MOhm'\n elif unit == 3:\n unit = ' GOhm'\n elif unit == 4:\n unit = ' TOhm'\n else:\n unit = ' 1E{} Ohm'.format(unit * 3)\n\n # Change the unit of the value\n if len(val) < length + 3:\n text_value = val[:length] + '.' + val[length:] + unit\n else:\n text_value = val[:length] + '.' + val[length:length + 2] + unit\n\n return text_value\n\n elif res_type == 'float':\n # Output to be a float\n return self.amplifier.res", "def get_humidity(self):\n return randint(25, 50)", "def readhumidity(self, cTemp):\r\n\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\twhile (data & 0x01) != 0 :\r\n\t\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\tdata1 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\tdata2 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\t\r\n\t\t# Convert the data to 12-bits\r\n\t\thumidity = ((data1 * 256 + (data2 & 0xF0)) / 16.0)\r\n\t\t\r\n\t\tif humidity < 0x180 :\r\n\t\t\thumidity = 0x180\r\n\t\telif humidity > 0x7C0 :\r\n\t\t\thumidity = 0x7C0\r\n\t\telse :\r\n\t\t\thumidity = humidity\r\n\t\t\r\n\t\thumidity = (humidity / 16.0) - 24.0\r\n\t\tlinearhumidity = humidity - (((humidity * humidity) * (-0.00393)) + (humidity * 0.4008) - 4.7844)\r\n\t\ttempcomphumidity = linearhumidity + ((cTemp - 30.00) * (linearhumidity * 0.00237 + 0.1973))\r\n\t\t\r\n\t\treturn {'h' : humidity, 'l' : linearhumidity, 't' : tempcomphumidity}", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def get_humidity(self, relative=True):\n data = self.get_data()\n\n if relative:\n return data['humidity']\n\n return calculate_abs_humidity(pressure=data['pressure'],\n temperature=data['temperature'],\n rel_humidity=data['humidity'])", "def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)", "def current_humidity(self):\n return self._client.get_indoor_humidity()", "def getTemperature(self):\n with self.lock:\n temp = self.temp\n return temp", "def get_machine_temp(self):\n try:\n self.boardcon.flushInput()\n self._write(chr(self.outgoing_temp))\n temp = ord(self._read(1)) + (0.1 * ord(self._read(1)))\n except:\n logger.error(\"Cannot read Temperature for /dev/ttyUSB{0}\"\n .format(self.dev_id)\n )\n temp = 0.0\n return temp", "def temperature_sensor():\n\n\tsensor_name = \"humiture\"\n\treg_addr = 26\n\tdata_len = 4\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\t# get sensor data\n\tdata = rospy.wait_for_message('MediumSize/SensorHub/Temperature', Temperature, 2)\n\ttemperature = data.temperature\n\n\tdelete_sensor(sensor_name)\n\treturn temperature", "def get_external_temp():\n baseurl = \"http://api.openweathermap.org/data/2.5/weather\"\n query = \"?q=salhouse&mode=xml\"\n url = baseurl + query\n r = requests.get(url)\n root = ET.fromstring(r.text)\n kelvin = float(root[1].attrib.get('value'))\n celcius = kelvin - 272.15\n return celcius", "def _pwr_std_temp(rpm, MP, altitude):\n # get the power at sea level (i.e. point B on the left side of the Lycoming power chart)\n \n # get pwr at two even hundreds of rpm, and then interpolate\n if rpm >= 2600:\n rpm1 = 2600\n elif rpm <= 1800:\n rpm1 = 1800\n else:\n rpm1 = rpm - rpm % 100\n\n rpm2 = rpm1 + 100\n \n pwr_SL1 = _pwr_sl(rpm1, MP)\n pwr_SL2 = _pwr_sl(rpm2, MP)\n # print \"SL Pwr 1=\", pwr_SL1\n # print \"SL Pwr 2=\", pwr_SL2\n \n # get power at full throttle at this rpm and MP at altitude (i.e. point A on the right side of the Lycoming power chart)\n # density ratio at point A on the right side of the Lycoming power chart)\n pwr_FT1, DR_FT1 = _hp_at_MP_and_altitude(rpm1, MP)\n pwr_FT2, DR_FT2 = _hp_at_MP_and_altitude(rpm2, MP)\n # print \"FT pwr 1=\", pwr_FT1\n # print \"FT pwr 2=\", pwr_FT2\n # print \"DR FT 1=\", DR_FT1\n # print \"DR FT 2=\", DR_FT2\n \n # density ratio at sea level\n DR_sl = 1\n \n # density ratio for the actual conditions (i.e. point D on the right side of the Lycoming power chart)\n DR_test = SA.alt2density_ratio(altitude)\n # print \"DR_test=\", DR_test\n \n # function is unstable if the DR at FT is close to 1. This sends the slope off to unpredictable values.\n slope1=(pwr_FT1 - pwr_SL1) / (DR_FT1 - DR_sl)\n slope2=(pwr_FT2 - pwr_SL2) / (DR_FT2 - DR_sl)\n \n if MP > 28:\n if slope1 < -80:\n slope1=-62\n elif slope1> -60:\n slope1=-62\n if slope2< -80:\n slope2 = -62\n elif slope2> -60:\n slope2=-62\n \n # print \"slope1=\", slope1\n # print \"slope2=\", slope2\n \n pwr_std_temp1 = pwr_SL1 + (DR_test - DR_sl) * slope1\n pwr_std_temp2 = pwr_SL2 + (DR_test - DR_sl) * slope2\n # print \"Pwr Std Temp 1=\", pwr_std_temp1\n # print \"Pwr Std Temp 2=\", pwr_std_temp2\n pwr_std_temp = pwr_std_temp1 + (rpm - rpm1) * (pwr_std_temp2 - pwr_std_temp1) / (rpm2 - rpm1)\n\n return pwr_std_temp", "def current_humidity(self):\n return self._current_humidity", "def get_absolute_humidity(region: int) -> np.ndarray:\n\n return read_conditions.read_absolute_humidity(region)", "def getDHT():\n dht_humidity, cels = Adafruit_DHT.read(DHT_TYPE, DHT_PIN)\n if cels and dht_humidity:\n dht_temp = cels_fahr(cels)\n else:\n checkDebug(\"*** Unable to get DHT values! ***\")\n dht_temp = 0\n dht_humidity = 0\n return dht_temp, dht_humidity", "async def feels_like(self, temperature, humidity, windspeed):\n if temperature is None or humidity is None or windspeed is None:\n return 0\n\n e_value = (\n humidity * 0.06105 * math.exp((17.27 * temperature) / (237.7 + temperature))\n )\n feelslike_c = temperature + 0.348 * e_value - 0.7 * windspeed - 4.25\n return await self.temperature(feelslike_c)", "def get_temp(self):\n lines = self._get_temp_raw()\n\n while not self._is_successful_read(lines):\n time.sleep(0.2)\n lines = self._get_temp_raw()\n \n try: \n temp_file_location = lines[1].find('t=')\n except: \n print(\"ERROR: w1_slave file corrupted. No t= found.\")\n \n if temp_file_location is not -1:\n temp_string = lines[1][temp_file_location+2:]\n temp = float(temp_string) / 1000.0\n return temp", "def get_temperature(self):\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_GET_TEMPERATURE, (), '', 'i')", "def temperature(self) -> SmartSsdTemperature:\n return self._temperature", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def input_resistance(self):\n return None", "def low_temperature(self):\r\n return self._yesterdays_weather.get_low_temperature()", "def get_humidity_tripped(self, sensor):\n if sensor >= self. num_humidities or sensor < 0:\n raise I2CException('Illegal sensor index {} specified'.format(sensor))\n\n return self.__humidity_trips[sensor]", "def __getRawTemperature(self):\n t1 = self.read_byte_data(self.address, 0x03)\n t2 = self.read_byte_data(self.address, 0x04)\n t3 = self.read_byte_data(self.address, 0x05)\n t = (t1 << 16) | (t2 << 8) | t3\n t = getTwosComplement(t, 24)\n return t", "def target_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_low_c)", "def get_humidity(self):\n\n svc = \"urn:micasaverde-com:serviceId:HumiditySensor1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentLevel\")", "def target_temperature(self):\n return self.atag.dhw_target_temperature", "def temperature():\n\tsensors = commands.getstatusoutput('sensors -u | grep -E temp[0-9]_input')\n\n\tif sensors[0] == 1:\n\t\traise Exception('lm-sensors is not setup. Run sensors-detect')\n\n\tif sensors[0] == 127:\n\t\traise Exception('lm-sensors is not installed')\n\n\ttemps = re.findall(r\"(\\d{2}.\\d+)\",sensors[1],re.M)\n\n\tif not temps:\n\t\traise Exception('No temperature sensors found')\n\n\tfor i,temp in enumerate(temps):\n\t\ttemps[i] = float(temp)\n\t\ttemps[i] = int(temps[i])\n\n\treturn max(temps)", "def _value_as_humidity(self):\n return int(round(float(self._value)))" ]
[ "0.7635503", "0.74764", "0.69839656", "0.68464166", "0.6839649", "0.67363864", "0.6672957", "0.663751", "0.65953994", "0.65851945", "0.6578019", "0.65570176", "0.65442514", "0.6527985", "0.64984643", "0.64984643", "0.6488228", "0.6481326", "0.6471587", "0.646373", "0.639092", "0.63794535", "0.63506013", "0.63406694", "0.63393354", "0.63317955", "0.632059", "0.630704", "0.6301418", "0.6294441", "0.6291499", "0.62868947", "0.62746954", "0.62614596", "0.62447035", "0.6243039", "0.6237848", "0.6231664", "0.6226976", "0.6213069", "0.6203158", "0.6187395", "0.6164825", "0.61322445", "0.61265254", "0.610064", "0.60970986", "0.6051532", "0.6029923", "0.60237354", "0.60109156", "0.6001813", "0.5996555", "0.59927064", "0.5981848", "0.59777737", "0.5977735", "0.5976528", "0.59726965", "0.596895", "0.5954706", "0.59466517", "0.5945293", "0.59402436", "0.5935744", "0.59315455", "0.5913501", "0.59103453", "0.5900541", "0.59001654", "0.5893804", "0.58864003", "0.5879651", "0.5879651", "0.5878992", "0.58717257", "0.5856716", "0.58553064", "0.5850455", "0.5848875", "0.5839419", "0.5834306", "0.58322823", "0.58291596", "0.582672", "0.582409", "0.58229214", "0.5822396", "0.5818623", "0.5800962", "0.57976663", "0.5791264", "0.57886785", "0.5780613", "0.5777327", "0.57743096", "0.57710177", "0.5764023", "0.5759413" ]
0.7932213
1
Returns the ppm of CO2 sensed (assuming only CO2 in the air)
def get_ppm(self): return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def p2(self) -> float:\n return self.distortion_coefficients[4]", "def read_ch2_pressure(self):\n sensor = self.ch2_index + 1\n return self.vgc.read_sensor(sensor)", "def m2(self):\n return self.mass[1]", "def Pp(nccd):\n return (128.1-56.9) * (nccd - 1) / (6-1) + 56.9", "def getP2(self):\n return self.points[1]", "def phaseChi2(self, pars):\n\t\treturn self.modeChi2(pars, PHASE)", "def _p_value(self):\n pval = chi2.sf(self.chi_square, self.degrees_of_freedom)\n\n return pval", "def p2_pits(self):\n return self.state[self.M + 1:-1]", "def eCO2(self) -> int:\n return self.iaq_measure()[0]", "def ppm(self):\n\t\treturns self.bottle.ppm", "def _ice_d2gdp2(temp,pres):\n # Reduced variables\n tn = temp/_TTP\n pn = pres/_PTPE\n _PI0 = _PATM/_PTPE\n g_pp = 0.\n \n # Power series and entropy components\n for (l,n) in enumerate(_GCOEFFS[0]):\n if l > 1:\n g_pp += n * l*(l-1) * (pn-_PI0)**(l-2) / _PTPE**2\n \n # Residual terms including complex numbers\n s = _GCOEFFS[2][2] * 2. / _PTPE**2\n tk = _GCOEFFS[3][1]\n term = (tk-tn)*numpy.log(tk-tn) + (tk+tn)*numpy.log(tk+tn)\n term -= 2*tk*numpy.log(tk) + tn**2/tk\n g_pp += _TTP * (s*term).real\n return g_pp", "def dispersion(self, p):\n return p**2 / (2*self.mass)", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def point_to_ppm(point, procs, proc2s):\n \n # It seems that F1 is related to the Y axis, while F2 is related to the X axis\n \n begin = (float(proc2s[\"OFFSET\"]), float(procs[\"OFFSET\"]))\n # End is begin-sw_p/sf, so step is (end-begin)/si, which simplifies to\n # (-sw_p/sf+1)/si\n step = [(-float(p[\"SW_p\"])/float(p[\"SF\"]))/float(p[\"SI\"]) \n for p in [proc2s, procs] ]\n \n return [begin[i]+step[i]*point[i] for i in (0,1)]", "def k2(self) -> float:\n return self.distortion_coefficients[1]", "def ppm(self):\n if self._ppm is not None:\n return(self._ppm)\n if self.larmor is None:\n return(None)\n if self._f is not None:\n self._ppm = -self._f/(self.larmor)*1.0e6 + self._ppmshift\n return(self._ppm)\n\n return(None)", "def local_co2(self):\n return self.PV_CO2 * self.ratio_local_cons + self.grid_co2 * (1. - self.ratio_local_cons)", "def probabilities_2nu_matter_std(sth, Dm2, VCC, energy, L):\n x = 2.0*VCC*(energy*1.e9)/Dm2\n cth = sqrt(1.0-sth*sth)\n s2th = 2.0*sth*cth\n s2thsq = s2th*s2th\n c2th = sqrt(1.0-s2thsq)\n\n Dm2m = Dm2*sqrt(s2thsq+pow(c2th-x, 2.0))\n s2thmsq = s2thsq / (s2thsq+pow(c2th-x, 2.0))\n\n arg = 1.27*Dm2m*L/energy#/4.0\n\n Pem = s2thmsq * pow(sin(arg), 2.0)\n Pme = Pem\n Pee = 1.0-Pem\n Pmm = 1.0-Pme\n\n prob = [Pee, Pem, Pme, Pmm]\n\n return prob", "def distmpc(self):\n return self._distance.to(\"Mpc\").value", "def p(self) -> float:\n return self._pwr.real", "def get_corrected_ppm(self, temperature, humidity):\n return self.PARA * math.pow((self.get_corrected_resistance(temperature, humidity)/ self.RZERO), -self.PARB)", "def get_corrected_ppm(self, temperature, humidity):\n return self.PARA * math.pow((self.get_corrected_resistance(temperature, humidity)/ self.RZERO), -self.PARB)", "def pois_metric(pipe_diameter, delta_p, pipe_length):\n mu = 0.001 # water @ 25 degrees C\n pois = mu * 10\n flow_rate_lam = (math.pi * (pipe_diameter ** 4) * delta_p) / (128 * pois * pipe_length)\n\n return flow_rate_lam", "def phi2_coefficient(L):\r\n\r\n if 0 < L < 120:\r\n return L / 120\r\n if L >= 120:\r\n return 1", "def gals_per_arcmin2(self):\n return self._gals_per_arcmin2", "def findpc(self):\n u = -(-can.C.len() + self.P.len() + can.R)/self.V.len()\n if u >= 0:\n return self.P + self.V.scale(u), u\n else:\n u = (can.C.len() - self.P.len() + can.R)/self.V.len()\n return self.P + self.V.scale(u), u", "def calc_psd2d(self):\n print(\"Calculating 2D power spectral density ... \", end=\"\", flush=True)\n rows, cols = self.shape\n imgf = np.fft.fftshift(np.fft.fft2(self.image))\n # NOTE: normalize w.r.t. image size\n norm = rows * cols * self.pixel[0]**2\n self.psd2d = (np.abs(imgf) ** 2) / norm\n print(\"DONE\", flush=True)\n return self.psd2d", "def JC(lcsc1c2, Pc1, Pc2):\n\n JC = 1/(2*lcsc1c2 - (Pc1 + Pc2))\n return JC", "def _two_sided_p_value(t, df):\n return 2 * scipy.stats.t.cdf(-np.abs(t), df=df)", "def pc_nproduced(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_nproduced(self)", "def get_pressure(self):\n value = self.synth.cbox.get_adcs()[self.id_]\n value = value / self.conf['PSICONV']\n log.debug(\"Current pressure on regulator %d = %f\",\n self.id_, value)\n return value", "def get_pressure(self):\n return self._sense_hat.get_pressure() * 0.0295300", "def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh", "def _fatality_prob(self, O2_conc):\n if O2_conc >= 0.18: # Lowest oxygen concentration above 18%\n Fi = 0\n elif O2_conc <= 0.088: # 8.8% of oxygen is assumed to be 100% fatal\n Fi = 1\n else:\n # Fi formula, reverse engineered using 8.8% and 18% thresholds\n Fi = 10**(6.5-76*O2_conc)\n return Fi", "def mp2(mol):\n E_SCF, C_a, C_b, ea, eb = hartree_fock.UHF(mol)\n S, T, V, g_ao = integrals.compute_integrals(mol)\n I_phys, C, eps = spin_orbital_setup.spin_orbital(C_a, C_b, ea, eb, g_ao)\n nocc = mol.ndocc * 2 + mol.nsocc\n\n gmo = np.einsum('pQRS, pP -> PQRS', \n np.einsum('pqRS, qQ -> pQRS', \n np.einsum('pqrS, rR -> pqRS', \n np.einsum('pqrs, sS -> pqrS', I_phys, C), C), C), C) \n \n # Form 4-index tensor of orbital energy denominators\n n = np.newaxis\n o = slice(None, nocc)\n v = slice(nocc, None)\n eps = 1 / (-eps[v, n, n, n] - eps[n, v, n, n] + eps[n, n, o, n] + eps[n, n, n, o])\n # Compute energy\n E_mp2 = (1 / 4) * np.einsum('ijab, abij ->', gmo[o, o, v, v], gmo[v, v, o, o] * eps)\n\n mp2_total_energy = E_mp2 + E_SCF\n print(\"MP2 Correlation Energy: \" + str(E_mp2))\n print(\"MP2 Total Energy: \" + str(mp2_total_energy))\n return mp2_total_energy", "def radial2(self) -> float:\n return self.distortion_coefficients[0]", "def _calc_pval(self):\n t = self.beta / self.stderr_beta\n return (2. * (1. - stats.t.cdf(np.abs(t), self.n - 2)))[0]", "def P(self):\n return self.generic_getter(get_pressure, \"p\", \"convert_pressure\")", "def graphite_entropic_change_PeymanMPM(sto, c_s_max):\n\n du_dT = 10 ** (-3) * (\n 0.28\n - 1.56 * sto\n - 8.92 * sto ** (2)\n + 57.21 * sto ** (3)\n - 110.7 * sto ** (4)\n + 90.71 * sto ** (5)\n - 27.14 * sto ** (6)\n )\n\n return du_dT", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def calculateCoM(dpt):\n\n dc = dpt.copy()\n dc[dc < 0] = 0\n dc[dc > 10000] = 0\n cc = ndimage.measurements.center_of_mass(dc > 0)\n num = np.count_nonzero(dc)\n com = np.array((cc[1]*num, cc[0]*num, dc.sum()), np.float)\n\n if num == 0:\n return np.array((0, 0, 0), np.float)\n else:\n return com/num", "def calculateCoM(dpt):\n\n dc = dpt.copy()\n dc[dc < 0] = 0\n dc[dc > 10000] = 0\n cc = ndimage.measurements.center_of_mass(dc > 0)\n num = np.count_nonzero(dc)\n com = np.array((cc[1]*num, cc[0]*num, dc.sum()), np.float)\n\n if num == 0:\n return np.array((0, 0, 0), np.float)\n else:\n return com/num", "def get_digp2(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_DIGP2,\n 2,\n addrsize=16\n )\n val = 0\n val = val << 8 | byte_list[0]\n val = val << 8 | byte_list[1]\n # Unsigned > Signed integer\n val = _sign(val, 16)\n return val", "def part2(input):\n ps = PlanetSystem(input)\n c = ps.total_cycle_time()\n return c", "def cohensd2problarger(d):\n\n return stats.norm.cdf(d / np.sqrt(2))", "def a_to_P(M1, M2, a):\n mu = c.GGG * (M1 + M2) * c.Msun_to_g\n n = np.sqrt(mu/(a**3 * c.Rsun_to_cm**3))\n return 2.0*np.pi / n / c.day_to_sec", "def ps(image):\n\timage = image.astype(float)\n\tps_img = abs(pow(fft2(image), 2))\n\treturn ps_img", "def get_chamber_pressure(self):\n raise NotImplementedError", "def getDPSA2(ChargeSA):\n return getPPSA2(ChargeSA)-getPNSA2(ChargeSA)", "def carbon_prime(C,p,p0):\r\n \r\n if p > p0:\r\n return C\r\n else:\r\n return .03", "def ppm_to_point(ppm, procs, proc2s):\n \n # It seems that F1 is related to the Y axis, while F2 is related to the X axis\n \n begin = (float(proc2s[\"OFFSET\"]), float(procs[\"OFFSET\"]))\n # End is begin-sw_p/sf, so step is (end-begin)/si, which simplifies to\n # (-sw_p/sf+1)/si\n step = [(-float(p[\"SW_p\"])/float(p[\"SF\"]))/float(p[\"SI\"]) \n for p in [proc2s, procs] ]\n \n return [(ppm[i]-begin[i])/step[i] for i in (0,1)]", "def plot_co2perc(data_frame):\n figco2, axco2 = plot_var(\n data_frame=data_frame,\n x_var=\"flow\",\n y_var=\"CO2 %\",\n label_var=\"mpr\",\n pivot=\"distance\",\n x_label=\"Flow [veh/m]\",\n y_label=r\"Change in CO$_2$ [\\%]\",\n t_label=\"Distance [m]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n\n return figco2, axco2", "def ps2D2psxhat(ps2Di):\n\t\t\tpsrelproj3D = np.array([ps2Di[0], ps2Di[1], 0], float)\n\t\t\tpsrel = np.dot(psbasis, psrelproj3D)\n\t\t\tps = psrel + reqv\n\t\t\t#ps = np.array([ps3D[0], 0, ps3D[1], ps3D[2]], float)\n\t\t\treturn ps", "def mass(self):\n return self._P", "def sr_to_mpc2(z, cosmo):\n return cosmo.comoving_distance(z) / (1 * un.sr)", "def ndp2(self):\n if self._ndp2 is None:\n if self.nplan > 1:\n self._ndp2 = self.ndp3 // 2\n else:\n self._ndp2 = self.ndp3\n\n return self._ndp2", "def a_psilc02(self, psi_l):\n\t if psi_l < self.PSILA0:\n\t return 0.\n\t elif self.PSILA0 <= psi_l <= self.PSILA1 :\n\t return (psi_l - self.PSILA0)/(self.PSILA1 - self.PSILA0)\n\t else: \n\t return 1.", "def get_pressure(self): # This function implements the equations needed to convert the digital data into mbars\n self.digital_pressure_data()\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n temperature, dT=self.get_temperature()\n OFF = ((C_2 * (2**16)) + ((C_4 * dT)/2**7))\n SENS = (C_1 * (2**15)) + ((C_3 * dT)/(2**8))\n pressure=(((self.presadc*(SENS/(2**21)))-OFF)/(2**15))/100\n return pressure, temperature", "def p1(self) -> float:\n return self.distortion_coefficients[3]", "def pc_nproduced_avg(self):\n return _spacegrant_swig.general_burster_2_sptr_pc_nproduced_avg(self)", "def pc_nproduced_avg(self):\n return _spacegrant_swig.invert_bit_sptr_pc_nproduced_avg(self)", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calc_CO2_compensation_point(self, Tleaf):\n\n # Rubisco specificity for CO2 relative to O2\n tau = self.Q10_func(self.gamma25, self.Q10_gamma, Tleaf)\n gamma = self.Oa / (2.0 * tau)\n\n return gamma", "def _p_value(self):\n p_value = chi2.sf(self.test_statistic, 2)\n\n return p_value", "def poynting(t, dt):\r\n p1satbar, p2satbar, dp1satbar, dp2satbar = psat(t, dt)\r\n poy1 = math.exp(vl1 * (p - p1satbar)/(r * t))\r\n poy2 = math.exp(vl2 * (p - p2satbar)/(r * t))\t\r\n dpoy1 = math.exp(vl1 * (p - dp1satbar)/(r * dt))\r\n dpoy2 = math.exp(vl2 * (p - dp2satbar)/(r * dt))\r\n return poy1, poy2, dpoy1, dpoy2", "def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)", "def pc_nproduced(self):\n return _spacegrant_swig.general_burster_2_sptr_pc_nproduced(self)", "def convert_co2_to_miles_driven(co2_saved):\n pounds_in_metric_ton = 2204.62\n tons_co2_per_gallon = 0.0089\n avg_gas_mileage_us_fleet = 22\n mileage_equivalent = co2_saved / pounds_in_metric_ton / tons_co2_per_gallon * avg_gas_mileage_us_fleet\n \n return mileage_equivalent", "def pseudo_r2(self):\n y_reg = self.time_series(len(self.data))\n SSres = ((self.data - y_reg)**2).sum()\n SStot = ((self.data - self.data.mean())**2).sum()\n return 1 - SSres/SStot", "def get_utilization(self, node: int) -> float:\n return self.busy[node].pmf(1)", "def get_Pn(f, L, S_lp, S_ac): \r\n # single-link optical metrology noise (Hz^{-1}), Equation (10)\r\n P_oms = S_lp**2 \r\n # single test mass acceleration noise, Equation (11)\r\n P_acc = S_ac**2*(1. + 0.1e-3/f) \r\n # total noise in Michelson-style LISA data channel, Equation (12)\r\n Pn = (P_oms + 4.*P_acc/(2.*pi*f)**4.)/L**2. \r\n return Pn", "def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))", "def pss(self):\n return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \\\n ((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))", "def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5", "def probabilities_2nu_vacuum_std(sth, Dm2, energy, L):\n arg = 1.27*Dm2*L/energy#/4.0\n cth = sqrt(1.0-sth*sth)\n s2th = 2.0*sth*cth\n\n Pem = s2th*s2th * pow(sin(arg), 2.0)\n Pme = Pem\n Pee = 1.0-Pem\n Pmm = 1.0-Pme\n\n prob = [Pee, Pem, Pme, Pmm]\n\n return prob", "def mod(o2, po2=GUE_PO2):\n pctg = o2 / 100\n return math.floor(((po2 / pctg) * 10) - 10)", "def get_power(self):\r\n return self.p", "def DPc(R,Pc):\n return r2*(K2**B2/(K2**B2 + (A)**B2))*(S/(S + R*Pc + Pc)) \\\n *(R*M)/(K3 + R*M)*Pc - gc*Pc", "def ampm(self):\n return self._pm", "def I2_u2(self) -> complex:\n return self.I2_u1() * cmath.rect(1, -120 / 180 * cmath.pi)", "def sic_povm(d):\n return weyl_heisenberg_povm(load_fiducial(d))", "def readMol2TotalCharge(self, mol2File):\n charge = 0.0\n ll = []\n cmd = '%s -i %s -fi mol2 -o tmp -fo mol2 -c wc -cf tmp.crg -pf y' % \\\n (self.acExe, mol2File)\n if self.debug:\n self.printMess(\"Debugging...\")\n cmd = cmd.replace('-pf y', '-pf n')\n\n self.printDebug(cmd)\n\n log = getoutput(cmd)\n\n if log.isspace():\n tmpFile = open('tmp.crg', 'r')\n tmpData = tmpFile.readlines()\n for line in tmpData:\n ll += line.split()\n charge = sum(map(float,ll))\n elif self.debug:\n self.printQuoted(log)\n\n self.printDebug(\"readMol2TotalCharge: \" + str(charge))\n\n return charge", "def pc_nproduced_avg(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_nproduced_avg(self)", "def overpotential2(x, doh):\n # | - overpotential2\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, -x + 2.46, -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|", "def overpotential2(x, doh):\n # | - overpotential2\n dooh = ooh_oh_scaling(doh)\n dg14 = [doh, x, -x + 2.46, -dooh + 4.92]\n m = max(dg14)\n return(m - 1.23)\n #return doh*do\n #__|", "def molar_mass_dry_air():\n return 28.9647", "def co2_equivalent(self):\n return self.indoor_air_quality[0]", "def psd(self, frequency):\n #if frequency < 1 / self.obstime: return np.nan\n #if frequency > 1 / self.cadence: return np.nan\n outs = np.ones(len(frequency))\n outs[frequency < 1/self.obstime] = np.nan\n outs[frequency > 1/self.cadence] = np.nan\n return (2 * 1./self.cadence * self.rms**2)*outs", "def calc_o2_working_cap(isot_dict):\n\n out_dict = {}\n out_dict['is_porous'] = isot_dict['is_porous']\n if out_dict['is_porous']:\n\n ip5 = isot_dict['isotherm']['pressure'].index(5.0)\n ip140 = isot_dict['isotherm']['pressure'].index(140.0)\n\n # conversion factors form mol/kg to cm3STP/cm3 and wt%\n conv1 = isot_dict['conversion_factor_molec_uc_to_cm3stp_cm3'] / isot_dict['conversion_factor_molec_uc_to_mol_kg'] # pylint: disable=line-too-long\n conv2 = get_molec_uc_to_mg_g(isot_dict) / isot_dict['conversion_factor_molec_uc_to_mol_kg'] / 10\n\n wc_140bar_average = isot_dict['isotherm']['loading_absolute_average'][ip140] - isot_dict['isotherm'][\n 'loading_absolute_average'][ip5]\n wc_140bar_dev = sqrt(isot_dict['isotherm']['loading_absolute_dev'][ip5]**2 +\n isot_dict['isotherm']['loading_absolute_dev'][ip140]**2)\n wc_140bar_fract = wc_140bar_average / isot_dict['isotherm']['loading_absolute_average'][ip140]\n\n out_dict.update({\n 'enthalpy_of_adsorption_5bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip5],\n 'enthalpy_of_adsorption_5bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip5],\n 'enthalpy_of_adsorption_5bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'enthalpy_of_adsorption_140bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip140],\n 'enthalpy_of_adsorption_140bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip140],\n 'enthalpy_of_adsorption_140bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'wc_140bar_cm3stp/cm3_average': wc_140bar_average * conv1,\n 'wc_140bar_cm3stp/cm3_dev': wc_140bar_dev * conv1,\n 'wc_140bar_cm3stp/cm3_unit': 'cm3 STP/cm3',\n 'wc_140bar_wt%_average': wc_140bar_average * conv2,\n 'wc_140bar_wt%_dev': wc_140bar_dev * conv2,\n 'wc_140bar_wt%_unit': 'g/g/100',\n 'wc_140bar_mol/kg_average': wc_140bar_average,\n 'wc_140bar_mol/kg_dev': wc_140bar_dev,\n 'wc_140bar_mol/kg_unit': 'mol/kg',\n 'wc_140bar_fraction': wc_140bar_fract,\n 'wc_140bar_fraction_unit': '-',\n })\n return Dict(dict=out_dict)", "def pc_nproduced_avg(self):\n return _spacegrant_swig.message_debug_sptr_pc_nproduced_avg(self)", "def npoin2(self):\n if self._npoin2 is None:\n if self.nplan > 1:\n self._npoin2 = self.npoin3//self.nplan\n else:\n self._npoin2 = self.npoin3\n\n return self._npoin2", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def pc_nproduced_avg(self):\n return _spacegrant_swig.binary_sink_sptr_pc_nproduced_avg(self)", "def _cost_petrol(self):\n return self.distance * self.petrol_usage * self.petrol_cost", "def pressure(self):\r\n self._read_temperature()\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped\r\n var1 = float(self._t_fine) / 2.0 - 64000.0\r\n var2 = var1 * var1 * self._pressure_calib[5] / 32768.0\r\n var2 = var2 + var1 * self._pressure_calib[4] * 2.0\r\n var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0\r\n var3 = self._pressure_calib[2] * var1 * var1 / 524288.0\r\n var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0\r\n var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]\r\n if var1 == 0:\r\n return 0\r\n if var1:\r\n pressure = 1048576.0 - adc\r\n pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1\r\n var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0\r\n var2 = pressure * self._pressure_calib[7] / 32768.0\r\n pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0\r\n\r\n pressure /= 100\r\n if pressure < _BME280_PRESSURE_MIN_HPA:\r\n return _BME280_PRESSURE_MIN_HPA\r\n if pressure > _BME280_PRESSURE_MAX_HPA:\r\n return _BME280_PRESSURE_MAX_HPA\r\n return pressure\r\n else:\r\n return _BME280_PRESSURE_MIN_HPA", "def get_power(self) -> float:\n\n #:READ[n][:CHANnel[m]][:SCALar]: POWer[:DC]?\n return float(self._inst.query(\":READ:POW?\"))", "def pc_nproduced(self) -> \"float\":\n return _beamforming_swig.phasedarray_sptr_pc_nproduced(self)", "def getWPSA2(ChargeSA):\n temp=0.0\n for i in ChargeSA:\n temp=temp+i[2]\n if temp == 0.0:\n return 0.0\n return getPPSA2(ChargeSA)*temp/1000.0" ]
[ "0.69014174", "0.614412", "0.61084104", "0.59173745", "0.5897229", "0.5883357", "0.5818302", "0.5813648", "0.57905394", "0.572007", "0.5702351", "0.5689262", "0.5672273", "0.56600434", "0.5656385", "0.5643109", "0.5642826", "0.55892974", "0.55835027", "0.5540734", "0.5529631", "0.5529631", "0.5525461", "0.5516866", "0.5510978", "0.5488735", "0.5469861", "0.54663664", "0.54618293", "0.54588", "0.5453405", "0.544619", "0.5445543", "0.54073906", "0.5390489", "0.53807414", "0.5379204", "0.53568006", "0.53545713", "0.5353329", "0.53431237", "0.53431237", "0.5335648", "0.5330836", "0.53287506", "0.53260076", "0.5311737", "0.5310121", "0.53097606", "0.52742517", "0.52721155", "0.526847", "0.5266371", "0.5260024", "0.5258361", "0.52555454", "0.5255507", "0.52553546", "0.5251279", "0.52510387", "0.5224131", "0.5215335", "0.5215335", "0.5204202", "0.51965606", "0.51946557", "0.5188935", "0.51883584", "0.5185309", "0.5185102", "0.51836276", "0.5176269", "0.5156825", "0.5154128", "0.5148787", "0.51437646", "0.5143072", "0.51412404", "0.51380193", "0.5130979", "0.51205134", "0.5118488", "0.5109248", "0.5106821", "0.5106322", "0.5106322", "0.5105371", "0.5101587", "0.50988686", "0.50954884", "0.5094871", "0.5084819", "0.5083346", "0.5081576", "0.50812054", "0.50796163", "0.50697786", "0.50670207", "0.5055991" ]
0.6490881
2
Returns the ppm of CO2 sensed (assuming only CO2 in the air) corrected for temperature/humidity
def get_corrected_ppm(self, temperature, humidity): return self.PARA * math.pow((self.get_corrected_resistance(temperature, humidity)/ self.RZERO), -self.PARB)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_ch2_pressure(self):\n sensor = self.ch2_index + 1\n return self.vgc.read_sensor(sensor)", "def p2(self) -> float:\n return self.distortion_coefficients[4]", "def get_pressure(self): # This function implements the equations needed to convert the digital data into mbars\n self.digital_pressure_data()\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n temperature, dT=self.get_temperature()\n OFF = ((C_2 * (2**16)) + ((C_4 * dT)/2**7))\n SENS = (C_1 * (2**15)) + ((C_3 * dT)/(2**8))\n pressure=(((self.presadc*(SENS/(2**21)))-OFF)/(2**15))/100\n return pressure, temperature", "def pressure(self):\r\n self._read_temperature()\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped\r\n var1 = float(self._t_fine) / 2.0 - 64000.0\r\n var2 = var1 * var1 * self._pressure_calib[5] / 32768.0\r\n var2 = var2 + var1 * self._pressure_calib[4] * 2.0\r\n var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0\r\n var3 = self._pressure_calib[2] * var1 * var1 / 524288.0\r\n var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0\r\n var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]\r\n if var1 == 0:\r\n return 0\r\n if var1:\r\n pressure = 1048576.0 - adc\r\n pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1\r\n var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0\r\n var2 = pressure * self._pressure_calib[7] / 32768.0\r\n pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0\r\n\r\n pressure /= 100\r\n if pressure < _BME280_PRESSURE_MIN_HPA:\r\n return _BME280_PRESSURE_MIN_HPA\r\n if pressure > _BME280_PRESSURE_MAX_HPA:\r\n return _BME280_PRESSURE_MAX_HPA\r\n return pressure\r\n else:\r\n return _BME280_PRESSURE_MIN_HPA", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_pressure(self):\n return self._sense_hat.get_pressure() * 0.0295300", "def h2o_from_rh_and_temp(RH, TEMP):\n TC = TEMP - 273.15\n frh = RH / 100.\n svp_millibar = 6.11 * 10**((7.5 * TC)/(TC+237.3))\n svp_pa = svp_millibar * 100\n vp_pa = svp_pa * frh\n molecule_per_cubic_m = vp_pa * Avogadro / R / TEMP\n molecule_per_cubic_cm = molecule_per_cubic_m * centi**3\n #print RH, TEMP, molecule_per_cubic_cm\n return molecule_per_cubic_cm", "def __getRawPressure(self):\n p1 = self.read_byte_data(self.address, 0x00)\n p2 = self.read_byte_data(self.address, 0x01)\n p3 = self.read_byte_data(self.address, 0x02)\n p = (p1 << 16) | (p2 << 8) | p3\n p = getTwosComplement(p, 24)\n return p", "def get_pressure(self):\n value = self.synth.cbox.get_adcs()[self.id_]\n value = value / self.conf['PSICONV']\n log.debug(\"Current pressure on regulator %d = %f\",\n self.id_, value)\n return value", "def get_pressurelsb(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_PRESSURELSB,\n 1,\n addrsize=8\n )\n val = 0\n val = val << 8 | byte_list[0]\n return val", "def get_chamber_pressure(self):\n raise NotImplementedError", "def relative_humidity_from_dewpoint(dew, t2m):\n e_dew = water_vapour(dew)\n e_t2m = water_vapour(t2m)\n return e_dew / e_t2m", "def _p_value(self):\n pval = chi2.sf(self.chi_square, self.degrees_of_freedom)\n\n return pval", "def get_value(p, t, q, c, v):\n\n gas = chemistry.ConstituentProperties(c)\n Psat_gas = gas.Psat(t)\n\n if c.upper() == 'H2S':\n if p < 43. and p * q * v > Psat_gas: # Pressure greater than saturation pressure\n return str(1.0)\n elif p < 43. and p * q * v < Psat_gas:\n return str(v)\n else:\n return str(0.8)\n else:\n return str(1.0)", "def pressureStatic(cls, address=0x77):\n bme280Tmp = adafruit_bme280.Adafruit_BME280_I2C(cls._i2c, address)\n return bme280Tmp.pressure", "def eCO2(self) -> int:\n return self.iaq_measure()[0]", "def read_pressure(self):\n self._force_read(False)\n\n presADC = (self._read_register_1ubyte(self.BME680_PRESS_MSB) << 12) | (self._read_register_1ubyte(self.BME680_PRESS_LSB) << 4) | (self._read_register_1ubyte(self.BME680_PRESS_XLSB) >> 4)\n\n return float(self._compensate_pressure(presADC))", "async def get_pressure(self) -> float: # type: ignore\n ...", "def probabilities_2nu_matter_std(sth, Dm2, VCC, energy, L):\n x = 2.0*VCC*(energy*1.e9)/Dm2\n cth = sqrt(1.0-sth*sth)\n s2th = 2.0*sth*cth\n s2thsq = s2th*s2th\n c2th = sqrt(1.0-s2thsq)\n\n Dm2m = Dm2*sqrt(s2thsq+pow(c2th-x, 2.0))\n s2thmsq = s2thsq / (s2thsq+pow(c2th-x, 2.0))\n\n arg = 1.27*Dm2m*L/energy#/4.0\n\n Pem = s2thmsq * pow(sin(arg), 2.0)\n Pme = Pem\n Pee = 1.0-Pem\n Pmm = 1.0-Pme\n\n prob = [Pee, Pem, Pme, Pmm]\n\n return prob", "def read_pressure(self):\n pRaw = self._read_multiple_bytes_as_array(self.BME280_PRESS_MSB, 3)\n\n return float(self._compensate_pressure((pRaw[0] << 12) + (pRaw[1] << 4) + (pRaw[2] >> 4)))", "def _fatality_prob(self, O2_conc):\n if O2_conc >= 0.18: # Lowest oxygen concentration above 18%\n Fi = 0\n elif O2_conc <= 0.088: # 8.8% of oxygen is assumed to be 100% fatal\n Fi = 1\n else:\n # Fi formula, reverse engineered using 8.8% and 18% thresholds\n Fi = 10**(6.5-76*O2_conc)\n return Fi", "def Pp(nccd):\n return (128.1-56.9) * (nccd - 1) / (6-1) + 56.9", "def partial_pressure(fraction=3, tem=283.15, pre=1.21325):\n pwater = np.exp(77.345 + 0.0057 * tem - 7235 / tem) / (tem ** 8.2) / 100000\n # partial pressure of H2O in air by relation, [Bar]\n p_hcl = fraction * 10 ** -5 * pre\n # firstly use 3ppm concentration to do estimation [Bar]\n return tem, pre, pwater, p_hcl", "def _ice_d2gdp2(temp,pres):\n # Reduced variables\n tn = temp/_TTP\n pn = pres/_PTPE\n _PI0 = _PATM/_PTPE\n g_pp = 0.\n \n # Power series and entropy components\n for (l,n) in enumerate(_GCOEFFS[0]):\n if l > 1:\n g_pp += n * l*(l-1) * (pn-_PI0)**(l-2) / _PTPE**2\n \n # Residual terms including complex numbers\n s = _GCOEFFS[2][2] * 2. / _PTPE**2\n tk = _GCOEFFS[3][1]\n term = (tk-tn)*numpy.log(tk-tn) + (tk+tn)*numpy.log(tk+tn)\n term -= 2*tk*numpy.log(tk) + tn**2/tk\n g_pp += _TTP * (s*term).real\n return g_pp", "def humidity(self):\r\n self._read_temperature()\r\n hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)\r\n #print(\"Humidity data: \", hum)\r\n adc = float(hum[0] << 8 | hum[1])\r\n #print(\"adc:\", adc)\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n var1 = float(self._t_fine) - 76800.0\r\n #print(\"var1 \", var1)\r\n var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)\r\n #print(\"var2 \",var2)\r\n var3 = adc - var2\r\n #print(\"var3 \",var3)\r\n var4 = self._humidity_calib[1] / 65536.0\r\n #print(\"var4 \",var4)\r\n var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)\r\n #print(\"var5 \",var5)\r\n var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5\r\n #print(\"var6 \",var6)\r\n var6 = var3 * var4 * (var5 * var6)\r\n humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)\r\n\r\n if humidity > _BME280_HUMIDITY_MAX:\r\n return _BME280_HUMIDITY_MAX\r\n if humidity < _BME280_HUMIDITY_MIN:\r\n return _BME280_HUMIDITY_MIN\r\n # else...\r\n return humidity", "def specific_humidity_from_dewpoint(dew, ps):\n e = water_vapour(dew)\n return (0.622 * e) / (ps - 0.378 * e)", "def phaseChi2(self, pars):\n\t\treturn self.modeChi2(pars, PHASE)", "def get_digp2(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_DIGP2,\n 2,\n addrsize=16\n )\n val = 0\n val = val << 8 | byte_list[0]\n val = val << 8 | byte_list[1]\n # Unsigned > Signed integer\n val = _sign(val, 16)\n return val", "def local_co2(self):\n return self.PV_CO2 * self.ratio_local_cons + self.grid_co2 * (1. - self.ratio_local_cons)", "def read(self):\n # One method of getting a resource is calling get_resource from the client instance. get_resource\n # takes the lwm2m uri string as a parameter. The uri is the object id, then the instance id, then\n # the resource id.\n max_resource = lwm2m_client.get_resource(\"3323/1/5602\")\n # Resources can also be accessed using the index operator from the client instance.\n min_resource = lwm2m_client[3323][1][5601]\n \n pressure = self.pressure_sensor.read_psi()\n \n max_resource.value = max(max_resource.value, pressure)\n min_resource.value = min(min_resource.value, pressure)\n logger.debug(\"PressureValue read called: pressure = {}, max = {}, min = {}\".format(pressure, max_resource.value, min_resource.value))\n return pressure", "def raw_humidity(self):\n data = self._bus.read_i2c_block_data(self.addr, self.HUM, 2)\n return (data[0] << 8) + data[1]", "def P(self):\n return self.generic_getter(get_pressure, \"p\", \"convert_pressure\")", "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def _two_sided_p_value(t, df):\n return 2 * scipy.stats.t.cdf(-np.abs(t), df=df)", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def m2(self):\n return self.mass[1]", "def part2(input):\n ps = PlanetSystem(input)\n c = ps.total_cycle_time()\n return c", "def radial2(self) -> float:\n return self.distortion_coefficients[0]", "def poll_co2(self):\n response = req.get('https://api.co2signal.com/v1/latest?countryCode=DK-DK2', headers={'auth-token': os.environ['CO2_AUTH']})\n carbon_time = datetime.now()\n resp = response.json()\n try:\n data = resp['data']\n data['time'] = datetime.now()\n if 'carbonIntensity' in data:\n carbon = data['carbonIntensity']\n else:\n if hasattr(self, 'grid_co2'):\n print('Warning: using previous step grid CO2')\n carbon = self.grid_co2\n data['carbonIntensity'] = carbon\n else:\n print('Warning: using DEFAULT_GRID_CO2')\n carbon = DEFAULT_GRID_CO2\n data['carbonIntensity'] = carbon\n add_time(data, data['time'])\n #self.grid_co2 = carbon\n #co2_data = data\n except Exception as e:\n print('Error: couldnt retrieve current CO2 intensity')\n print(str(e))\n if hasattr(self, 'grid_co2'):\n print('Warning: using previous step grid CO2')\n carbon = self.grid_co2\n else:\n print('Warning: using DEFAULT_GRID_CO2')\n carbon = DEFAULT_GRID_CO2\n\n data = {}\n data['time'] = datetime.now()\n add_time(data, data['time'])\n data['carbonIntensity'] = carbon\n #self.grid_co2 = DEFAULT_GRID_CO2\n #co2_data = data\n return data['carbonIntensity'], data", "def plot_co2perc(data_frame):\n figco2, axco2 = plot_var(\n data_frame=data_frame,\n x_var=\"flow\",\n y_var=\"CO2 %\",\n label_var=\"mpr\",\n pivot=\"distance\",\n x_label=\"Flow [veh/m]\",\n y_label=r\"Change in CO$_2$ [\\%]\",\n t_label=\"Distance [m]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n\n return figco2, axco2", "def k2(self) -> float:\n return self.distortion_coefficients[1]", "def getP2(self):\n return self.points[1]", "def get_utilization(self, node: int) -> float:\n return self.busy[node].pmf(1)", "def readMol2TotalCharge(self, mol2File):\n charge = 0.0\n ll = []\n cmd = '%s -i %s -fi mol2 -o tmp -fo mol2 -c wc -cf tmp.crg -pf y' % \\\n (self.acExe, mol2File)\n if self.debug:\n self.printMess(\"Debugging...\")\n cmd = cmd.replace('-pf y', '-pf n')\n\n self.printDebug(cmd)\n\n log = getoutput(cmd)\n\n if log.isspace():\n tmpFile = open('tmp.crg', 'r')\n tmpData = tmpFile.readlines()\n for line in tmpData:\n ll += line.split()\n charge = sum(map(float,ll))\n elif self.debug:\n self.printQuoted(log)\n\n self.printDebug(\"readMol2TotalCharge: \" + str(charge))\n\n return charge", "def digital_pressure_data(self): # This function will give the initial digital format for pressure data\n self._bus.write_byte(self._addr,0x48) \n time.sleep(0.05) \n presadcbytes=self._bus.read_i2c_block_data(self._addr,0x00) \n time.sleep(0.05) \n self.presadc=(presadcbytes[0]<<16)+(presadcbytes[1]<<8)+presadcbytes[2]", "def phi2_coefficient(L):\r\n\r\n if 0 < L < 120:\r\n return L / 120\r\n if L >= 120:\r\n return 1", "def convert_co2_to_miles_driven(co2_saved):\n pounds_in_metric_ton = 2204.62\n tons_co2_per_gallon = 0.0089\n avg_gas_mileage_us_fleet = 22\n mileage_equivalent = co2_saved / pounds_in_metric_ton / tons_co2_per_gallon * avg_gas_mileage_us_fleet\n \n return mileage_equivalent", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)", "def read_ch1_pressure(self):\n sensor = self.ch1_index + 1\n return self.vgc.read_sensor(sensor)", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def mp2(mol):\n E_SCF, C_a, C_b, ea, eb = hartree_fock.UHF(mol)\n S, T, V, g_ao = integrals.compute_integrals(mol)\n I_phys, C, eps = spin_orbital_setup.spin_orbital(C_a, C_b, ea, eb, g_ao)\n nocc = mol.ndocc * 2 + mol.nsocc\n\n gmo = np.einsum('pQRS, pP -> PQRS', \n np.einsum('pqRS, qQ -> pQRS', \n np.einsum('pqrS, rR -> pqRS', \n np.einsum('pqrs, sS -> pqrS', I_phys, C), C), C), C) \n \n # Form 4-index tensor of orbital energy denominators\n n = np.newaxis\n o = slice(None, nocc)\n v = slice(nocc, None)\n eps = 1 / (-eps[v, n, n, n] - eps[n, v, n, n] + eps[n, n, o, n] + eps[n, n, n, o])\n # Compute energy\n E_mp2 = (1 / 4) * np.einsum('ijab, abij ->', gmo[o, o, v, v], gmo[v, v, o, o] * eps)\n\n mp2_total_energy = E_mp2 + E_SCF\n print(\"MP2 Correlation Energy: \" + str(E_mp2))\n print(\"MP2 Total Energy: \" + str(mp2_total_energy))\n return mp2_total_energy", "def get_brightnesstemperature(self, channel):\n K1 = {\n \"10\": 3040.136402, # Constant K1 [W m-2 um-1].\n \"11\": 2482.375199,\n \"12\": 1935.060183,\n \"13\": 866.468575,\n \"14\": 641.326517,\n }\n\n K2 = {\n \"10\": 1735.337945, # Constant K2 [K].\n \"11\": 1666.398761,\n \"12\": 1585.420044,\n \"13\": 1350.069147,\n \"14\": 1271.221673,\n }\n\n return K2[channel] / np.log((K1[channel] / self.get_radiance(channel)) + 1)", "def poynting(t, dt):\r\n p1satbar, p2satbar, dp1satbar, dp2satbar = psat(t, dt)\r\n poy1 = math.exp(vl1 * (p - p1satbar)/(r * t))\r\n poy2 = math.exp(vl2 * (p - p2satbar)/(r * t))\t\r\n dpoy1 = math.exp(vl1 * (p - dp1satbar)/(r * dt))\r\n dpoy2 = math.exp(vl2 * (p - dp2satbar)/(r * dt))\r\n return poy1, poy2, dpoy1, dpoy2", "def get_servo_pct(pi, pin):\n return pulsewidth2pct(pi.get_servo_pulsewidth(pin))", "def correct_main_pressure_differential(dP:pd.Series,\n P:pd.Series) -> pd.Series:\n \n denominator = 0.02 + 0.285*P*1e-6 - 0.004\n return 5972.16*((dP+1493)/373250 - 0.004)*1/denominator", "def calc_o2_working_cap(isot_dict):\n\n out_dict = {}\n out_dict['is_porous'] = isot_dict['is_porous']\n if out_dict['is_porous']:\n\n ip5 = isot_dict['isotherm']['pressure'].index(5.0)\n ip140 = isot_dict['isotherm']['pressure'].index(140.0)\n\n # conversion factors form mol/kg to cm3STP/cm3 and wt%\n conv1 = isot_dict['conversion_factor_molec_uc_to_cm3stp_cm3'] / isot_dict['conversion_factor_molec_uc_to_mol_kg'] # pylint: disable=line-too-long\n conv2 = get_molec_uc_to_mg_g(isot_dict) / isot_dict['conversion_factor_molec_uc_to_mol_kg'] / 10\n\n wc_140bar_average = isot_dict['isotherm']['loading_absolute_average'][ip140] - isot_dict['isotherm'][\n 'loading_absolute_average'][ip5]\n wc_140bar_dev = sqrt(isot_dict['isotherm']['loading_absolute_dev'][ip5]**2 +\n isot_dict['isotherm']['loading_absolute_dev'][ip140]**2)\n wc_140bar_fract = wc_140bar_average / isot_dict['isotherm']['loading_absolute_average'][ip140]\n\n out_dict.update({\n 'enthalpy_of_adsorption_5bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip5],\n 'enthalpy_of_adsorption_5bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip5],\n 'enthalpy_of_adsorption_5bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'enthalpy_of_adsorption_140bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip140],\n 'enthalpy_of_adsorption_140bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip140],\n 'enthalpy_of_adsorption_140bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'wc_140bar_cm3stp/cm3_average': wc_140bar_average * conv1,\n 'wc_140bar_cm3stp/cm3_dev': wc_140bar_dev * conv1,\n 'wc_140bar_cm3stp/cm3_unit': 'cm3 STP/cm3',\n 'wc_140bar_wt%_average': wc_140bar_average * conv2,\n 'wc_140bar_wt%_dev': wc_140bar_dev * conv2,\n 'wc_140bar_wt%_unit': 'g/g/100',\n 'wc_140bar_mol/kg_average': wc_140bar_average,\n 'wc_140bar_mol/kg_dev': wc_140bar_dev,\n 'wc_140bar_mol/kg_unit': 'mol/kg',\n 'wc_140bar_fraction': wc_140bar_fract,\n 'wc_140bar_fraction_unit': '-',\n })\n return Dict(dict=out_dict)", "def test_wet_psychrometric_rh():\n p = 1013.25 * units.mbar\n dry_bulb_temperature = 20. * units.degC\n wet_bulb_temperature = 18. * units.degC\n psychrometric_rh = relative_humidity_wet_psychrometric(p, dry_bulb_temperature,\n wet_bulb_temperature)\n assert_almost_equal(psychrometric_rh, 82.8747 * units.percent, 3)", "def get_pressurexlsb(self):\n byte_list = self.i2c.readfrom_mem(\n self.device_address,\n self.REGISTER_PRESSUREXLSB,\n 1,\n addrsize=8\n )\n val = 0\n val = val << 8 | byte_list[0]\n return val", "def hz2cents(pitchInHz, tonic=261.626):\n cents = 1200*np.log2(1.0*pitchInHz/tonic)\n return cents", "def pe_energy_dist(Ehere, Elow, Ehigh, y2):\n \n fE0 = 6.*(Ehere - Elow)*(Ehigh - Ehere) / (Ehigh - Elow)**3\n \n fE = fE0 / y2\n \n return fE", "def _calc_pval(self):\n t = self.beta / self.stderr_beta\n return (2. * (1. - stats.t.cdf(np.abs(t), self.n - 2)))[0]", "def getPureComponentVaporPressure(self,Temperature):\n\t\tA = self.Antoine_params[0]\n\t\tB = self.Antoine_params[1]\n\t\tC = self.Antoine_params[2]\n\t\t\n\t\t# Antoine's Equation\n\t\tPmmHg = 10**(A - B / (C + Temperature - 273.15))\n\t\treturn PmmHg * 133.322 # to get Pa", "def _get_pressure(self):\n assert self.serial_connection.isOpen()\n\n self.serial_connection.write('PR1' + self.CR + self.LF)\n acknowledgement = self.serial_connection.readline()\n self._check_acknowledgement(acknowledgement)\n\n self.serial_connection.write(self.ENQ)\n err_msg_and_pressure = self.serial_connection.readline().rstrip(self.LF).rstrip(self.CR)\n\n err_msg = err_msg_and_pressure[0]\n pressure = float(err_msg_and_pressure[3:])\n\n if err_msg != '0':\n message = 'Pressure query resulted in an error: ' + self.MEASUREMENT_STATUS[err_msg]\n raise IOError(message)\n\n self.serial_connection.write(self.CR + self.LF)\n return pressure", "def pseudo_r2(self):\n y_reg = self.time_series(len(self.data))\n SSres = ((self.data - y_reg)**2).sum()\n SStot = ((self.data - self.data.mean())**2).sum()\n return 1 - SSres/SStot", "def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625", "def pois_metric(pipe_diameter, delta_p, pipe_length):\n mu = 0.001 # water @ 25 degrees C\n pois = mu * 10\n flow_rate_lam = (math.pi * (pipe_diameter ** 4) * delta_p) / (128 * pois * pipe_length)\n\n return flow_rate_lam", "def get_heat_capacity_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n # There is a precision problem with \"-\" (minus) operator\n # We'll use asymptotic formula for high temperatures to avoid that problem\n y_low = y[y < THRESHOLD]\n vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]\n tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]\n # high temperatures - low numbers\n C_P_low = 5 * gbar * np.sqrt(2) / (36 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low\n C_P_low *= (\n 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)\n - 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2\n )\n C_P_low *= _1d_call(_fdk, y_low, k=3 / 2) / _1d_call(_fdk, y_low, k=1 / 2) ** 2\n # low temperatures - high numbers\n C_P_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)\n return np.concatenate((C_P_low, C_P_high)).reshape(y.shape)", "def ccdtemp(n=2):\n temp = camera.GetTemperature()\n camera.status.update()\n mesg, f1, f2, f3, f4 = camera.GetTemperatureStatus()\n print \"Sensor Temp=%6.1f, TargetTemp=%6.1f, AmbientTemp=%6.1f, CoolerVolts=%6.2f\" % (f1,f2,f3,f4)\n return temp", "def pressure(self):\n return float(self._current_observation['pressure_mb'])", "def humidity(self, update_temperature=True):\n if (self.t_fine is None) or update_temperature:\n self.temperature()\n\n adc_H = float(self.raw_humidity())\n var_H = self.t_fine - 76800.0\n var_H = (\n (adc_H - (self.dig_H4 * 64.0 + self.dig_H5 / 16384.0 * var_H)) *\n (self.dig_H2 / 65536.0 * (\n 1.0 + self.dig_H6 / 67108864.0 * var_H *\n (1.0 + self.dig_H3 / 67108864.0 * var_H)))\n )\n var_H = var_H * (1.0 - self.dig_H1 * var_H / 524288.0)\n\n if (var_H > 100.0):\n var_H = 100.0\n elif (var_H < 0.0):\n var_H = 0.0\n\n return round(var_H, 3)", "def lookup_effective_mass_area_factor(self, cm):\n\n if cm == 0.0:\n return 0.0\n elif 0.0 < cm <= 165000.0:\n return 2.5\n else:\n return 3.2", "def get_pressure(self):\n return randint(980, 1050)", "def calcPower(self, inputs):\n if self.getAtt('available', inputs):\n possible_charge_rate = self.getAtt('possible_charge_rate', inputs)\n Vm = self.getAtt('Vm', inputs)\n P = possible_charge_rate * Vm\n if not self.stayConnected:\n P = P * self.calculateVoltageIndex(Vm) * self.calculateTrafoIndex()\n return P\n return 0.0", "def _get_bpm_from_soundstretch(output):\n \n output = output.split(\"\\n\")\n for line in output:\n if 'Detected BPM rate ' in line:\n bpm = line[18:]\n return float(bpm)\n return None # Could not parse output", "def read_core_vbat(self) -> float:", "def thermal_velocity(charge, temperature, mass):\n return np.sqrt(2*abs(charge)*temperature/mass)", "def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def calc_CO2_compensation_point(self, Tleaf):\n\n # Rubisco specificity for CO2 relative to O2\n tau = self.Q10_func(self.gamma25, self.Q10_gamma, Tleaf)\n gamma = self.Oa / (2.0 * tau)\n\n return gamma", "def block2_price(self):\n return self._safe_value(VAR_BLOCK2PRICE, float)", "def p(self) -> float:\n return self._pwr.real", "def raw_to_calibrated_pressure(self, rawpressure, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n\n adc_P = np.array(rawpressure, dtype='int64')\n dig_P1 = self.calib_vals['dig_P1'].astype('int64')\n dig_P2 = self.calib_vals['dig_P2'].astype('int64')\n dig_P3 = self.calib_vals['dig_P3'].astype('int64')\n dig_P4 = self.calib_vals['dig_P4'].astype('int64')\n dig_P5 = self.calib_vals['dig_P5'].astype('int64')\n dig_P6 = self.calib_vals['dig_P6'].astype('int64')\n dig_P7 = self.calib_vals['dig_P7'].astype('int64')\n dig_P8 = self.calib_vals['dig_P8'].astype('int64')\n dig_P9 = self.calib_vals['dig_P9'].astype('int64')\n\n var1 = t_fine - 128000\n var2 = var1 * var1 * dig_P6\n var2 += ((var1*dig_P5)<<17)\n var2 += ((dig_P4)<<35)\n var1 = ((var1 * var1 * dig_P3)>>8) + ((var1 * dig_P2)<<12)\n var1 = ((((1)<<47)+var1))*(dig_P1)>>33\n\n p = 1048576-adc_P\n p = (((p<<31)-var2)*3125)//var1\n var1 = (dig_P9 * (p>>13) * (p>>13)) >> 25\n var2 = (dig_P8 * p) >> 19\n p = ((p + var1 + var2) >> 8) + (dig_P7<<4)\n return p/256000.", "def get_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n pressure = (\n gbar * np.sqrt(2) / (3 * np.pi ** 2) * tt ** (2.5) * _1d_call(_fdk, y, k=1.5)\n )\n return pressure", "def read_temp(self):\n return 19.0\n data = self.read(_TEMP_REG, 2)\n temp = ((data[0] * 256) + data[1]) / 16\n if temp > 2047:\n temp -= 4096\n return temp * 0.0625", "def get_pH(self):\n rawline = self.f.readline()\n while rawline:\n rematch = self.solvphre.match(rawline)\n if rematch:\n return float(rematch.groups()[0])\n rawline = self.f.readline()", "def hp(self):\n minute_int, second = divmod(self.minute, 1)\n if self.positive:\n return self.degree + (minute_int / 100) + (second * 0.006)\n else:\n return -(self.degree + (minute_int / 100) + (second * 0.006))", "def plc_temp(coil_df):", "def ppm(self):\n if self._ppm is not None:\n return(self._ppm)\n if self.larmor is None:\n return(None)\n if self._f is not None:\n self._ppm = -self._f/(self.larmor)*1.0e6 + self._ppmshift\n return(self._ppm)\n\n return(None)", "def calc_humidity_ratio(rh_percent, dry_bulb_C, patm_mbar):\n patm_hPa = patm_mbar\n\n A, m, Tn = get_phycometric_constants(dry_bulb_C)\n T_dry = dry_bulb_C\n\n p_ws_hPa = A * 10 ** ((m * T_dry) / (T_dry + Tn))\n p_w_hPa = p_ws_hPa * rh_percent / 100\n B_kgperkg = 0.6219907\n x_kgperkg = B_kgperkg * p_w_hPa / (patm_hPa - p_w_hPa)\n return x_kgperkg", "def getWPSA2(ChargeSA):\n temp=0.0\n for i in ChargeSA:\n temp=temp+i[2]\n if temp == 0.0:\n return 0.0\n return getPPSA2(ChargeSA)*temp/1000.0", "def q_2_rh(temp, pressure, qair):\n mr = qair / (1-qair)\n e = mr * pressure / (0.62197 + mr)\n # convert temperature to saturated vapor pressure\n es = 611.2 * np.exp(17.67 * (temp - 273.15) / (temp - 29.65))\n rh = e / es\n rh[rh > 1] = 1\n rh[rh < 0] = 0\n return rh", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def _p_value(self):\n p_value = chi2.sf(self.test_statistic, 2)\n\n return p_value", "def read_ch3_pressure(self):\n sensor = self.ch3_index + 1\n return self.vgc.read_sensor(sensor)", "def READ_PRESSURE_SENSOR():\n return 15.246", "def probabilities_2nu_vacuum_std(sth, Dm2, energy, L):\n arg = 1.27*Dm2*L/energy#/4.0\n cth = sqrt(1.0-sth*sth)\n s2th = 2.0*sth*cth\n\n Pem = s2th*s2th * pow(sin(arg), 2.0)\n Pme = Pem\n Pee = 1.0-Pem\n Pmm = 1.0-Pme\n\n prob = [Pee, Pem, Pme, Pmm]\n\n return prob", "def p2_pits(self):\n return self.state[self.M + 1:-1]" ]
[ "0.68633485", "0.64311475", "0.6035399", "0.601689", "0.5868461", "0.5868461", "0.58088636", "0.5721188", "0.56800145", "0.56064653", "0.55609304", "0.55468017", "0.5532125", "0.55313677", "0.5518863", "0.55117184", "0.54998475", "0.5495386", "0.5493003", "0.54853326", "0.54842556", "0.5464459", "0.5447632", "0.5423992", "0.541727", "0.54087204", "0.5404828", "0.5402177", "0.54013246", "0.5379466", "0.5373339", "0.5362243", "0.5358415", "0.53576565", "0.5345347", "0.5340364", "0.53249425", "0.53210217", "0.5306082", "0.52879834", "0.5287981", "0.5266486", "0.52638304", "0.5261937", "0.5246909", "0.52405316", "0.5239772", "0.5237249", "0.5236344", "0.52319765", "0.52293736", "0.52124274", "0.5182866", "0.5169387", "0.516636", "0.5161648", "0.51547366", "0.5152989", "0.5150324", "0.514445", "0.5130428", "0.5127084", "0.5122223", "0.51115733", "0.5102535", "0.5101299", "0.5099058", "0.509317", "0.5092918", "0.5091899", "0.50909793", "0.50906855", "0.50886726", "0.5087659", "0.5086449", "0.5086069", "0.5069823", "0.5066956", "0.50651926", "0.50609136", "0.50608486", "0.5058889", "0.5055034", "0.5052068", "0.50458604", "0.50403136", "0.5039668", "0.50391674", "0.5038711", "0.5038414", "0.503261", "0.50292283", "0.50287837", "0.50048715", "0.50036645", "0.49889913", "0.49818334", "0.49758977", "0.49757066" ]
0.6452072
2
Returns the resistance RZero of the sensor (in kOhms) for calibratioin purposes
def get_rzero(self): return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def r(self) -> float:\n return self._ohms.real", "def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def r0(self):\n return self.p[0] / self.p[1]", "def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def omtrek(self):\n x = pi*self.r**2\n return x", "def residual(us):\n return self.h_S(z0, us) - h_P", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def get_rel_humidity(\n self, sensitivity: Optional[str] = None, rhel_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or rhel_sensor is None:\n sensitivity, rhel_sensor = self.get_rel_humidity_sensor()\n if sensitivity == \"hh\":\n rh = rhel_sensor * 125 / 65536 - 6\n elif sensitivity == \"h\":\n rh = rhel_sensor * 125 / 100 - 6\n else:\n raise CloudWatcherException(f\"Unknown rhel sensor type {sensitivity}\")\n return rh", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))", "def Z_rms(self) -> np.float64:\n return np.sqrt(np.sum(self.ionic_fractions * self.charge_numbers**2))", "def get_nuclear_potential(self, r):\n\n return -self.nuclear_charge/r", "def get_R(self):\n return self.R_min * tf.exp(self.R_ * self.log_R_range)", "def rrint(self):\n if len(self.data.peaks):\n return (np.diff(self.data._masked) / self.data.fs).compressed()", "def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def get_RM_K(vsini_kms, rp_Rearth, Rs_Rsun):\n D = (rp_Rearth * u.Rearth.to(u.m) / Rs_Rsun * u.Rsun.to(u.m)) ** 2\n return (vsini_kms * D / (1 - D)) * 1e3", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def input_resistance(self):\n return None", "def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def rv_from_r0v0(mu, R0, V0, t):\n #...Magnitudes of R0 and V0:\n r0 = norm(R0)\n v0 = norm(V0)\n #...Initial radial velocity:\n vr0 = np.dot(R0, V0)/r0\n #...Reciprocal of the semimajor axis (from the energy equation):\n alpha = 2/r0 - pow(v0,2)/mu\n #...Compute the universal anomaly:\n x = kepler_U(mu, t, r0, vr0, alpha)\n #...Compute the f and g functions:\n f, g = calc_f_g(mu, x, t, r0, alpha)\n #...Compute the final position vector:\n R = f*R0 + g*V0\n #...Compute the magnitude of R:\n r = norm(R)\n #...Compute the derivatives of f and g:\n fdot, gdot = calc_fdot_gdot(mu, x, r, r0, alpha)\n #...Compute the final velocity:\n V = fdot*R0 + gdot*V0\n return R, V", "def resistance(stock):\n output= stock_max(stock)-(stock_max(stock)*.05)\n return output", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def rsi(self) -> float:\n return self._rsi", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r", "def anisotropy_solution(r):\n return r**2", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')", "def temperature() -> float:", "def min_humidity(self):\n return 0", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0", "def calibrate_high(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage <= 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n # if enough values are okay:\n else:\n # get \"high range\" coefficients\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n # calculate density\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def get_sensor_response(self, road_object_position) -> float: # reference to the current point in the road\n\n if self._have_position():\n\n response = (np.array([road_object_position[0], road_object_position[1]])\n - np.array([self._sensor_pos_vector3[0], self._sensor_pos_vector3[1]]))\n # difference and transform it\n # in a np array -> narray for further calculations (take in consideration only the x and y axis)\n response_mag = np.sum(response*response) ** 0.5 # magnitude\n mapped_responde = response_mag / sr_cnst.consts_obj.MAX_ROAD_MARGIN\n # how the distance > MAX_ROAD_MARGIN -> the value it's mapped between [0,1]\n # if the value is >1 it means the robot it's out of it's boundaries\n return mapped_responde\n else:\n raise Exception(\"Sensor position not initialized!\")", "def GetCelcius(self):\n ADCMax = (self.ADDevice.ADSamples * 1023) /(2**self.ADDevice.ADBitshift)\n sample=self.Get()\n R = self.RefVoltage / ADCMax\n Volt = sample*R-.5 \n return Volt/self.VoltPerDegree", "def compute_CRR(self, FAR):\r\n print('Computing CRR')\r\n return (np.ones((1, len(FAR))) - FAR)[0]", "def Rcoords(self):\n if self.radial > 0 and self.radial < len(self.ThRZmesh.getPositions(label=\"R\")):\n R = (self.radialInner() + self.radialOuter()) / 2.0\n else:\n # n = 0\n runLog.warning(\n \"Error: Radial Index ({}) location not INSIDE mesh \".format(self.radial)\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def estimate_R0(self, model, disease=None, **kwargs) -> ValueStd:\n return self._estimate_R(fit.estimate_R0, model, disease, **kwargs)", "def off_resamp(self):\n if self._resamp is None:\n # self._resamp = self.distributions.uniform(0., 1.) * units.deg\n # return self._resamp\n self._resamp = self.distributions.uniform(80., 160.) * units.deg\n return self._resamp", "def READ_PRESSURE_SENSOR():\n return 15.246", "def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)", "def resamp(self):\n if self._resamp is None:\n # self._resamp = self.distributions.uniform(0., 1.) * units.deg\n # return self._resamp\n # first make resamp appropriate for low-e orbits.\n amp_max = (-403.632 + 9.09917 * self.phi0.to('deg').value - 0.0442498 *\n self.phi0.to('deg').value ** 2 - 0.0883975 / self.phi0.to('deg').value) * units.deg\n amp_max[self.e < 0.05] = 15 * units.deg\n amp_min = (79.031 * numpy.exp(-(self.phi0.to('deg').value - 121.3435) ** 2 / (2 * 15.51349 ** 2))) * units.deg\n amp_min[self.e < 0.05] = 0 * units.deg\n self._resamp = amp_max - self.distributions.linear(0.25, 1) * (amp_max - amp_min)\n self._resamp[self.e < 0.05] = 15 * units.deg\n return self._resamp", "def Get_Meas_Res_RMS(self, mode, ch=1):\n rdStr = self.query(f':MEAS{ch}:RES:RMS? {mode}')\n return rdStr", "def ST_zero_mag(self):\n return 21.1", "def ST_zero_mag(self):\n return 21.1", "def get_on_resistance(self):\n is_nchannel = True\n stack = 4\n is_cell = False\n return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def hp(self):\n if self.positive:\n return self.degree + (self.minute / 100) + (self.second / 10000)\n else:\n return -(self.degree + (self.minute / 100) + (self.second / 10000))", "def relative_rmse(self) -> float:\n rrmse = self.rmse() / np.mean(self.true)\n return float(rrmse)", "def calibrate_decide(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage < 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n else:\n\n # get rho using full range coeffs\n XKw = coeffs['path_len'] * coeffs['Kwf']\n logV0 = np.log(coeffs['V0f'])\n rho_temp = (np.log(voltage) - logV0) / XKw\n\n # determine new coeffs based on the \"temporary\" values\n if np.mean(rho_temp) > 9:\n if verbose:\n print('high')\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n else:\n if verbose:\n print('low')\n XKw = coeffs['path_len'] * coeffs['Kwl']\n logV0 = np.log(coeffs['V0l'])\n # re-calculate rho with these coefficients\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)", "def get_rain_frequency(self) -> int:\n self.serial.write(b\"E!\")\n rain_freq = self.__extract_int(self.__read_response(1)[0], b\"!R\")\n\n return rain_freq", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def solar_meter(self):\n return self._solar_meter", "def rate_last(self):\n diff = (self.time - self.lasts[0][0]).total_seconds()\n try:\n return (self.pos - self.lasts[0][1]) / FAC / diff\n except ZeroDivisionError:\n return 0.0", "def rrse(self) -> float:\n return float(np.sqrt(self.rse()))", "def raw_sensor_temp(self):\n\n # return the value in millicelsius\n return float(self.raw_sensor_strings[1].split(\"=\")[1])", "def residual(S):\n rho = seawater.density(T, S, Pa)\n return (rho_1 - rho)", "def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)", "def calc_rhoenc(mass,r,rmax):\n idx = (r<rmax)\n return mass[idx].sum()/sphvol(rmax)", "def nitrogen_dioxide(self) -> float | None:\n return round_state(self._get_sensor_value(API_NO2))", "def mamajek08_logRpHK_Ro_edge():\n Ro_edge = 0.31935816876122064\n return Ro_edge", "def raw_value(self):\n if self.__trigger == gyro_trigger_mode.GET_RAW_VALUE_TRIGGER_READ:\n self.read_and_update_angle()\n return self.__value", "def RestEnergy(self):\n return (self.restMass * const.speed_of_light * const.speed_of_light)", "def get_rz(self):\n return self.rz", "def RNull(R):\n return -S*R*(r1*(K1**B1/(K1**B1 + (A/R)**B1)) - r2*(K2**B2/(K2**B2 + (A)**B2)) \\\n *(R*M)/(K3 + R*M))/((-gwt*A + R*gc)*(R+1)) + S/(R+1)", "def RA(self):\n return self.meta[\"header\"][\"OBJRA\"] * u.deg", "def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def _eventRs(self, phi, u):\n with np.errstate(all='ignore'):\n return 1/u[0] - self.Rs", "def moisture(self):\n if self.moisture_sensor is None:\n return None\n else:\n return self.moisture_sensor.percent", "def read_odometer(self):\r\n print(\"Este Carro Tem \" + str(self.odometer_reading) + \" Milhas Rodadas.\")", "def get_R(R_e,R_h):\n R = R_e + R_h\n return R", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def get_radius(self):\n return self.R", "def radial2(self) -> float:\n return self.distortion_coefficients[0]", "def read_line(self):\n self.read_calibrated()\n\n avg = 0\n summ = 0\n online = False\n\n for i in range(0, self.NUM_SENSORS):\n val = self.sensorValues[i]\n if val > 500: online = True\n if val > 50:\n multiplier = i * 1000\n avg += val * multiplier\n summ += val\n\n if online == False:\n if self.lastValue < (self.NUM_SENSORS-1)*1000/2:\n return 0\n else:\n return (self.NUM_SENSORS-1)*1000\n\n self.lastValue = avg/summ\n return self.lastValue", "def nfwPot(self, R, Rs, rho0, r200=1):\n x=R/Rs\n hx=self.h(x)\n return 2*rho0*Rs**3*hx", "def surface_resistance(self,freq):\n sigma = self.conductivity\n Rs = cmath.sqrt(2*np.pi*freq*self.mu/(2*sigma))\n return Rs", "def readout_measurement(self):\n if self.readout_qubit is None:\n raise ValueError('no readout qubit defined in the circuit')\n return cirq.Z(self.readout_qubit)", "def getRA(self):\n return self._ra", "def drift_score(self):\n if self.measured_val is None:\n return 0.0\n\n if self.rebalance_type == self.REBALANCE_TYPE_ABSOLUTE:\n return (self.measured_val - self.configured_val) / self.rebalance_thr\n else:\n return ((self.measured_val - self.configured_val) / self.configured_val) / self.rebalance_thr", "def get_resistance(self, res_type='float'):\n if res_type == 'text':\n # Output to be a string\n # Transform value (in Ohm) as a int string\n val = str(int(self.amplifier.res))\n\n # Compute displayable unit of the value\n unit = (len(val) - 1) // 3\n length = len(val) - unit * 3\n if unit <= 0:\n unit = ' Ohm'\n elif unit == 1:\n unit = ' kOhm'\n elif unit == 2:\n unit = ' MOhm'\n elif unit == 3:\n unit = ' GOhm'\n elif unit == 4:\n unit = ' TOhm'\n else:\n unit = ' 1E{} Ohm'.format(unit * 3)\n\n # Change the unit of the value\n if len(val) < length + 3:\n text_value = val[:length] + '.' + val[length:] + unit\n else:\n text_value = val[:length] + '.' + val[length:length + 2] + unit\n\n return text_value\n\n elif res_type == 'float':\n # Output to be a float\n return self.amplifier.res", "def access_resistance(self):\n return None", "def _R(self):\n return np.exp(self._log_R)", "def rad(tx,K,w,e,T0,Vo,P):\r\n\r\n M=2*np.pi*(tx-T0)/P #Mean anomaly\r\n E=np.pi\r\n for j in range(0,25):\r\n E=(M-e*(E*np.cos(E)-np.sin(E)))/(1-e*np.cos(E))\r\n th=2*np.arctan(((1+e)/(1-e))**0.5*np.tan(E/2))\r\n return K*(np.cos(th+w)+e*np.cos(w))+Vo", "def rrtime(self):\n if len(self.data.peaks):\n diff = ((self.data._masked[:-1] + self.data._masked[1:])\n / (2 * self.data.fs))\n return diff.compressed()", "def get_recfreq(self):\n sb = self.sb\n if self.ldat_type != \"xst-SEPTON\" and not self.septonconf:\n rcumode = self.rcumode[0]\n else:\n rcumode = 5\n nz = modeparms.rcumode2nyquistzone(rcumode)\n return modeparms.sb2freq(sb, nz)", "def get_radius(self):\n return self.r", "def _do_get_rate(self):\n rate = {\n 1: \"1 : Helium Probe in FAST rate\",\n 0: \"0 : Helium Probe in SLOW rate\"\n }\n result = self._execute('X')\n return rate.get(int(format(int(result[5:7]), '08b')[6]), \"Unknown\")" ]
[ "0.75763285", "0.75763285", "0.72106373", "0.70308226", "0.6785906", "0.6573991", "0.6407301", "0.62468076", "0.61387134", "0.60672534", "0.6046186", "0.6046186", "0.59931934", "0.5981607", "0.5975342", "0.5957084", "0.59491", "0.59407234", "0.5909753", "0.58624816", "0.5808946", "0.5779981", "0.5773813", "0.5759319", "0.5757578", "0.57565176", "0.57490623", "0.57175994", "0.5717317", "0.5715654", "0.57096267", "0.57041407", "0.5666802", "0.5659047", "0.5658419", "0.5647558", "0.56450397", "0.563906", "0.5635794", "0.56328076", "0.5624922", "0.5589358", "0.5582127", "0.5581066", "0.55681473", "0.5562602", "0.55588675", "0.5555658", "0.554699", "0.5537481", "0.5532513", "0.5530858", "0.55284095", "0.55284095", "0.5525055", "0.5522344", "0.5512175", "0.55078363", "0.5507359", "0.55071944", "0.5500685", "0.54998547", "0.5496776", "0.5470346", "0.5468171", "0.5465572", "0.54638565", "0.54568374", "0.54453826", "0.54425144", "0.54397696", "0.5432508", "0.5427982", "0.5419356", "0.54123735", "0.54090655", "0.540856", "0.5405756", "0.5397484", "0.53960586", "0.53943217", "0.53934646", "0.5392591", "0.5392018", "0.5390597", "0.53904474", "0.5386644", "0.5383598", "0.5380709", "0.53736085", "0.5368579", "0.53625023", "0.5359996", "0.5353933", "0.5353747", "0.5347609", "0.53467304", "0.5341401", "0.5340922" ]
0.7964137
1
Returns the resistance RZero of the sensor (in kOhms) for calibration purposes corrected for temperature/humidity
def get_corrected_rzero(self, temperature, humidity): return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)", "def r(self) -> float:\n return self._ohms.real", "def calibrate_high(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage <= 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n # if enough values are okay:\n else:\n # get \"high range\" coefficients\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n # calculate density\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def get_rel_humidity(\n self, sensitivity: Optional[str] = None, rhel_sensor: Optional[int] = None\n ) -> float:\n if sensitivity is None or rhel_sensor is None:\n sensitivity, rhel_sensor = self.get_rel_humidity_sensor()\n if sensitivity == \"hh\":\n rh = rhel_sensor * 125 / 65536 - 6\n elif sensitivity == \"h\":\n rh = rhel_sensor * 125 / 100 - 6\n else:\n raise CloudWatcherException(f\"Unknown rhel sensor type {sensitivity}\")\n return rh", "def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)", "def calibrate_decide(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage < 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n else:\n\n # get rho using full range coeffs\n XKw = coeffs['path_len'] * coeffs['Kwf']\n logV0 = np.log(coeffs['V0f'])\n rho_temp = (np.log(voltage) - logV0) / XKw\n\n # determine new coeffs based on the \"temporary\" values\n if np.mean(rho_temp) > 9:\n if verbose:\n print('high')\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n else:\n if verbose:\n print('low')\n XKw = coeffs['path_len'] * coeffs['Kwl']\n logV0 = np.log(coeffs['V0l'])\n # re-calculate rho with these coefficients\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def residual(us):\n return self.h_S(z0, us) - h_P", "def rv_from_r0v0(mu, R0, V0, t):\n #...Magnitudes of R0 and V0:\n r0 = norm(R0)\n v0 = norm(V0)\n #...Initial radial velocity:\n vr0 = np.dot(R0, V0)/r0\n #...Reciprocal of the semimajor axis (from the energy equation):\n alpha = 2/r0 - pow(v0,2)/mu\n #...Compute the universal anomaly:\n x = kepler_U(mu, t, r0, vr0, alpha)\n #...Compute the f and g functions:\n f, g = calc_f_g(mu, x, t, r0, alpha)\n #...Compute the final position vector:\n R = f*R0 + g*V0\n #...Compute the magnitude of R:\n r = norm(R)\n #...Compute the derivatives of f and g:\n fdot, gdot = calc_fdot_gdot(mu, x, r, r0, alpha)\n #...Compute the final velocity:\n V = fdot*R0 + gdot*V0\n return R, V", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def estimate_R0(self, model, disease=None, **kwargs) -> ValueStd:\n return self._estimate_R(fit.estimate_R0, model, disease, **kwargs)", "def get_rx_calibrate (self, rx_calibrate):\n\t\treturn self._rx_calibrate", "def get_meas_time_resistance(instrument):\n return float(instrument.query('SENSE:RESISTANCE:APER?'))", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def r0(self):\n return self.p[0] / self.p[1]", "def calibration(self) -> int:", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def min_humidity(self):\n return 0", "def get_resistance(self):\n\t\tif self.channel == 0 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)\n\t\telif self.channel == 1 :\n\t\t\tdata = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 1.0 + 0.075\n\t\tresistance_wa = 1.0 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def calibrate(self):\n super().calibrate()\n dataH1 = self._bus.read_i2c_block_data(self.addr,\n self.CALIBRATION_H1, 1)\n dataHX = self._bus.read_i2c_block_data(self.addr,\n self.CALIBRATION_HX, 7)\n\n self.dig_H1 = float(c_ubyte(dataH1[0]).value)\n self.dig_H2 = float(c_short((dataHX[1] << 8) + dataHX[0]).value)\n self.dig_H3 = float(c_ubyte(dataHX[2]).value)\n self.dig_H4 = float(c_short(\n (dataHX[3] << 4) + (dataHX[4] & 0xf)).value)\n self.dig_H5 = float(c_short(\n (dataHX[5] << 4) + ((dataHX[4] & 0xf0) >> 4)).value)\n self.dig_H6 = float(c_byte(dataHX[6]).value)", "def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)", "def temperature() -> float:", "def rsr(self) -> float:\n return float(self.rmse() / np.std(self.true))", "def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def rrint(self):\n if len(self.data.peaks):\n return (np.diff(self.data._masked) / self.data.fs).compressed()", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def Z_rms(self) -> np.float64:\n return np.sqrt(np.sum(self.ionic_fractions * self.charge_numbers**2))", "def humidity(self):\r\n self._read_temperature()\r\n hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)\r\n #print(\"Humidity data: \", hum)\r\n adc = float(hum[0] << 8 | hum[1])\r\n #print(\"adc:\", adc)\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n var1 = float(self._t_fine) - 76800.0\r\n #print(\"var1 \", var1)\r\n var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)\r\n #print(\"var2 \",var2)\r\n var3 = adc - var2\r\n #print(\"var3 \",var3)\r\n var4 = self._humidity_calib[1] / 65536.0\r\n #print(\"var4 \",var4)\r\n var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)\r\n #print(\"var5 \",var5)\r\n var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5\r\n #print(\"var6 \",var6)\r\n var6 = var3 * var4 * (var5 * var6)\r\n humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)\r\n\r\n if humidity > _BME280_HUMIDITY_MAX:\r\n return _BME280_HUMIDITY_MAX\r\n if humidity < _BME280_HUMIDITY_MIN:\r\n return _BME280_HUMIDITY_MIN\r\n # else...\r\n return humidity", "def _calibrate_measurement(self):\n\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15,\n self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15,\n self.wbb.data.wavelength)\n\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n\n calibration_slope = ((warm_blackbody - cold_blackbody) /\n (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum))\n calibration_offset = warm_blackbody - (self.wbb.data.average_spectrum * \n calibration_slope)\n\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n\n plate_temperature = self.dwr.header.spare_f[0]\n if (self.plate == -1) :\n plate_emissivity = self.dwr.header.spare_f[1]\n\n plate_blackbody = bb_radiance(plate_temperature + 273.15,\n self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n\n self.dwr.data.average_spectrum = ((self.dwr.data.average_spectrum - \n plate_emission) / (1 - plate_emissivity))", "def READ_PRESSURE_SENSOR():\n return 15.246", "def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)", "def calcCaliCorrandR(constants, corr, data, outName):\n print(constants)\n perr=np.sqrt(np.diag(corr))\n print(perr)\n corrmat=np.zeros([len(constants),len(constants)])\n for i in range(len(corr)):\n for j in range(len(corr)):\n \n ele=corr[i,j]\n diele=ele/(perr[i]*perr[j])\n corrmat[i,j]=round(diele,3)\n print(corrmat)\n #calculate the r^2 value\n ss_res = 0\n ss_total = 0\n residuals = np.zeros([len(data[:,0]), 1])\n for i in range(len(data[:,0])):\n residuals[i] = (LangmuirCurve(data[i,0],constants[0],constants[1],constants[2],constants[3]) - data[i,1])\n ss_res += np.square(residuals[i])\n ss_total += np.square((data[i,1] - np.average(data[:,1])))\n print(ss_res)\n print(ss_total)\n r_sq = 1 - (ss_res/ss_total)\n print(r_sq)\n #write out the fit results\n f = open(outName + \"_cali_constants.txt\", 'w')\n f.write(\"B\\ta\\tN\\tK\\n\")\n for i in range(len(constants)):\n f.write('%.9f' %constants[i] + \"\\t\")\n f.write(\"\\n\\n\")\n for i in range(len(corr)):\n f.write('%.9f' %perr[i] + \"\\t\")\n f.write(\"\\n\\n\")\n f.write(\"Correlation matrix :\\n\\n\")\n for i in range(len(corr)):\n for j in range(len(corr)):\n f.write('%.9f' %corrmat[i,j]+'\\t')\n f.write(\"\\n\\n\")\n f.write(\"R^2 value : \\t\" + '%.9f' %r_sq)\n f.close()", "def absolute_humidity(temp, rh):\r\n e_s = temp_to_saturated_vapor_pressure(temp)\r\n e = e_s * rh / 100.\r\n rho = mbar_to_pa(e) / (R_s * degc_to_kelvin(temp))\r\n return kg_to_g(rho)", "def Get_Meas_Res_RMS(self, mode, ch=1):\n rdStr = self.query(f':MEAS{ch}:RES:RMS? {mode}')\n return rdStr", "def raw_to_calibrated_humidity(self, rawhumidity, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n\n adc_H = np.array(rawhumidity, dtype='int32')\n dig_H1 = self.calib_vals['dig_H1'].astype('int32')\n dig_H2 = self.calib_vals['dig_H2'].astype('int32')\n dig_H3 = self.calib_vals['dig_H3'].astype('int32')\n dig_H4 = self.calib_vals['dig_H4'].astype('int32')\n dig_H5 = self.calib_vals['dig_H5'].astype('int32')\n dig_H6 = self.calib_vals['dig_H6'].astype('int32')\n\n var = t_fine - 76800\n var = ((((adc_H << 14) - (dig_H4 << 20) - (dig_H5 * var)) + 16384) >> 15) * (((((((var * dig_H6) >> 10) * (((var *(dig_H3) >> 11) + 32768)) >> 10) + 2097152) * (dig_H2) + 8192) >> 14))\n var -= (((((var >> 15) * (var >> 15)) >> 7) * dig_H1) >> 4)\n var.ravel()[var.ravel()<0] = 0\n var.ravel()[var.ravel()>419430400] = 419430400\n return (var>>12)/1024.", "def get_capacitive_rain_sensor_temp(\n self, rain_sensor_temp: Optional[int] = None\n ) -> float:\n # TODO: these values were hardcoded but now are taken from the CW.\n # Check which way is the \"true\" way based on the sensor type (capacitive vs Hydredon)\n # rain_pull_up_resistance = 1\n # rain_res_at_25 = 1\n # rain_beta = 3450\n absolute_zero = 273.15\n\n if rain_sensor_temp is None:\n rain_sensor_temp = self.raw_rain_sensor_temp\n\n if rain_sensor_temp < 1:\n rain_sensor_temp = 1\n elif rain_sensor_temp > 1022:\n rain_sensor_temp = 1022\n\n r = self.rain_pull_up_resistance / ((1023 / rain_sensor_temp) - 1)\n r = math.log(r / self.rain_res_at_25)\n\n return 1 / (r / self.rain_beta + 1 / (absolute_zero + 25)) - absolute_zero", "def get_RM_K(vsini_kms, rp_Rearth, Rs_Rsun):\n D = (rp_Rearth * u.Rearth.to(u.m) / Rs_Rsun * u.Rsun.to(u.m)) ** 2\n return (vsini_kms * D / (1 - D)) * 1e3", "def input_resistance(self):\n return None", "def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}", "def compute_CRR(self, FAR):\r\n print('Computing CRR')\r\n return (np.ones((1, len(FAR))) - FAR)[0]", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def get_correction_factor(self, temperature, humidity):\n\n if temperature < 20:\n return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD\n\n return self.CORE * temperature + self.CORF * humidity + self.CORG", "def get_sensor_response(self, road_object_position) -> float: # reference to the current point in the road\n\n if self._have_position():\n\n response = (np.array([road_object_position[0], road_object_position[1]])\n - np.array([self._sensor_pos_vector3[0], self._sensor_pos_vector3[1]]))\n # difference and transform it\n # in a np array -> narray for further calculations (take in consideration only the x and y axis)\n response_mag = np.sum(response*response) ** 0.5 # magnitude\n mapped_responde = response_mag / sr_cnst.consts_obj.MAX_ROAD_MARGIN\n # how the distance > MAX_ROAD_MARGIN -> the value it's mapped between [0,1]\n # if the value is >1 it means the robot it's out of it's boundaries\n return mapped_responde\n else:\n raise Exception(\"Sensor position not initialized!\")", "def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))", "def get_temp(self):\n\t\traw_temp = self.read_i2c_word(self.TEMP_OUT0)\n\n\t\t# Get the actual temperature using the formule given in the\n\t\t# MPU-6050 Register Map and Descriptions revision 4.2, page 30\n\t\tactual_temp = (raw_temp / 340.0) + 36.53\n\n\t\treturn actual_temp", "def omtrek(self):\n x = pi*self.r**2\n return x", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def anisotropy_solution(r):\n return r**2", "def get_calibration(self):\n return GetCalibration(*self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_GET_CALIBRATION, (), '', 'i i'))", "def _calculate_heater_resistance(self, target_temp):\n if target_temp > 400: #Maximum temperature\n target_temp = 400\n\n var1 = (calGH1 / 16.0) + 49.0\n var2 = ((calGH2 / 32768.0) * 0.0005) + 0.00235\n var3 = calGH3 / 1024.0\n var4 = var1 * (1.0 + (var2 * target_temp))\n var5 = var4 + (var3 * self.calAmbTemp)\n res_heat = 3.4 * ((var5 * (4 / (4 + calResHeatRange)) * (1 / (1 + (calResHeatVal * 0.002)))) - 25)\n\n return int(res_heat)", "def ST_zero_flux(self):\n return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')", "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def residual(S):\n rho = seawater.density(T, S, Pa)\n return (rho_1 - rho)", "def raw_sensor_temp(self):\n\n # return the value in millicelsius\n return float(self.raw_sensor_strings[1].split(\"=\")[1])", "def read_temp(self, ctrl_pin):\n bytes_ = self.read_bytes(ctrl_pin)\n int_ = struct.unpack('>H', bytes_)[0]\n if int_ & 0x04 > 1:\n temp_celsius = -1\n else:\n temp_celsius = (int_ >> 3) * 0.25\n return temp_celsius", "def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT", "def calibH(self):\n # in case of errors\n self.flushInput()\n if (self.model == 'GDS'):\n # GDS includes the sampling rate data with the waveform\n # data. hstep obtained later.\n self.write(':TIM:DEL?\\n')\n # minus sign necessary to make hoff on two scopes congruous\n hoff = -float(self.readline())\n elif (self.model == 'TDS'):\n self.write('WFMPre:XZEro?\\n')\n hoff = float(self.readline())\n self.write('WFMPre:XINcr?\\n')\n hstep = float(self.readline())\n # in case of errors\n self.flushInput()\n return (hstep, hoff)", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r", "def get_R(self):\n return self.R_min * tf.exp(self.R_ * self.log_R_range)", "def calibrate_meter(self):\r\n print(\"meter calibrated\")", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def raw_to_calibrated_temp(self, rawtemp):\n t_fine = self._raw_to_t_fine(rawtemp)\n deg_C = ((t_fine * 5 + 128) >> 8)/100.\n return deg_C", "def Rcoords(self):\n if self.radial > 0 and self.radial < len(self.ThRZmesh.getPositions(label=\"R\")):\n R = (self.radialInner() + self.radialOuter()) / 2.0\n else:\n # n = 0\n runLog.warning(\n \"Error: Radial Index ({}) location not INSIDE mesh \".format(self.radial)\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def antenny_calibrate(self):\n if self.antenny_config.get(\"use_bno08x_rvc\"):\n t = .25\n d = .5\n us = 50\n else:\n t = .1\n d = .5\n us = 100\n self.platform.auto_calibrate_elevation_servo(us=us, t=t, d=d)\n self.platform.auto_calibrate_azimuth_servo(us=us, t=t, d=d)\n if self.antenny_config.get(\"use_bno055\"):\n self.platform.auto_calibrate_magnetometer()\n self.platform.auto_calibrate_gyroscope()\n self.platform.auto_calibrate_accelerometer()", "def Get_Meas_Res_Reliability(self, mode, ch=1):\n rdStr = self.query(f':MEAS{ch}:RES:REL? {mode}')\n return rdStr", "def get_rain_frequency(self) -> int:\n self.serial.write(b\"E!\")\n rain_freq = self.__extract_int(self.__read_response(1)[0], b\"!R\")\n\n return rain_freq", "def fRwTemperatureCorrected(Rw_Temp1, Temp1, Temp2):\n\treturn Rw_Temp1 * ((Temp1 + 21.5) / (Temp2 + 21.5))", "def GetCelcius(self):\n ADCMax = (self.ADDevice.ADSamples * 1023) /(2**self.ADDevice.ADBitshift)\n sample=self.Get()\n R = self.RefVoltage / ADCMax\n Volt = sample*R-.5 \n return Volt/self.VoltPerDegree", "def platform_auto_calibrate_magnetometer(self):\n self._platform_auto_calibrate_check()\n return self.platform.auto_calibrate_magnetometer()", "def resamp(self):\n if self._resamp is None:\n # self._resamp = self.distributions.uniform(0., 1.) * units.deg\n # return self._resamp\n # first make resamp appropriate for low-e orbits.\n amp_max = (-403.632 + 9.09917 * self.phi0.to('deg').value - 0.0442498 *\n self.phi0.to('deg').value ** 2 - 0.0883975 / self.phi0.to('deg').value) * units.deg\n amp_max[self.e < 0.05] = 15 * units.deg\n amp_min = (79.031 * numpy.exp(-(self.phi0.to('deg').value - 121.3435) ** 2 / (2 * 15.51349 ** 2))) * units.deg\n amp_min[self.e < 0.05] = 0 * units.deg\n self._resamp = amp_max - self.distributions.linear(0.25, 1) * (amp_max - amp_min)\n self._resamp[self.e < 0.05] = 15 * units.deg\n return self._resamp", "def get_sky_ir_temperature(self) -> float:\n self.serial.write(b\"S!\")\n sky_ir_temp = self.__extract_int(self.__read_response(1)[0], b\"!1\")\n\n return round(sky_ir_temp / 100, 2)", "def ft_sensor_set_zero(self):\r\n return self._arm.ft_sensor_set_zero()", "def __getTemperatureCalibrationCoefficients(self):\n src10 = self.read_byte_data(self.address, 0x10)\n src11 = self.read_byte_data(self.address, 0x11)\n src12 = self.read_byte_data(self.address, 0x12)\n c0 = (src10 << 4) | (src11 >> 4)\n c0 = getTwosComplement(c0, 12)\n c1 = ((src11 & 0x0F) << 8) | src12\n c1 = getTwosComplement(c1, 12)\n return c0, c1", "def read_temp(self):\n return 19.0\n data = self.read(_TEMP_REG, 2)\n temp = ((data[0] * 256) + data[1]) / 16\n if temp > 2047:\n temp -= 4096\n return temp * 0.0625", "def le_calibration_func(etr, kc, ts):\n return etr * kc * (2.501 - 2.361E-3 * (ts - 273)) * 2500 / 9", "def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)", "def get_nuclear_potential(self, r):\n\n return -self.nuclear_charge/r", "def eco_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_high_c)", "def calculateTemperature(self, R):\n rRuOx2005 = self._busToRuOx2005(R)\n T = self.RuOx2005.calculateTemperature(rRuOx2005)\n return T", "def temp(self):\n\t\ttemp_out = self.read16(MPU9250_ADDRESS, TEMP_DATA)\n\t\ttemp = temp_out / 333.87 + 21.0 # these are from the datasheets\n\t\treturn temp", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def off_resamp(self):\n if self._resamp is None:\n # self._resamp = self.distributions.uniform(0., 1.) * units.deg\n # return self._resamp\n self._resamp = self.distributions.uniform(80., 160.) * units.deg\n return self._resamp", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def _read_calibration_data(self):\n #Declare global variables.\n global calT1\n global calT2\n global calT3\n global calP1\n global calP2\n global calP3\n global calP4\n global calP5\n global calP6\n global calP7\n global calP8\n global calP9\n global calP10\n global calH1\n global calH2\n global calH3\n global calH4\n global calH5\n global calH6\n global calH7\n global calGH1\n global calGH2\n global calGH3\n global calResHeatRange\n global calResHeatVal\n global calRangeSwErr\n\n #Temperature calibration.\n calT1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_T1_LSB_REG)\n calT2 = self._read_2bytes_as_short_lsbfirst(self.BME680_T2_LSB_REG)\n calT3 = self._read_register_1sbyte(self.BME680_T3_REG)\n\n #Pressure calibration.\n calP1 = self._read_2bytes_as_ushort_lsbfirst(self.BME680_P1_LSB_REG)\n calP2 = self._read_2bytes_as_short_lsbfirst(self.BME680_P2_LSB_REG)\n calP3 = self._read_register_1sbyte(self.BME680_P3_REG)\n calP4 = self._read_2bytes_as_short_lsbfirst(self.BME680_P4_LSB_REG)\n calP5 = self._read_2bytes_as_short_lsbfirst(self.BME680_P5_LSB_REG)\n calP6 = self._read_register_1sbyte(self.BME680_P6_REG)\n calP7 = self._read_register_1sbyte(self.BME680_P7_REG)\n calP8 = self._read_2bytes_as_short_lsbfirst(self.BME680_P8_LSB_REG)\n calP9 = self._read_2bytes_as_short_lsbfirst(self.BME680_P9_LSB_REG)\n calP10 = self._read_register_1ubyte(self.BME680_P10_REG)\n\n #Humidity calibration.\n calH1 = self._read_register_1ubyte(self.BME680_H1_MSB_REG) << 4 | (self._read_register_1ubyte(self.BME680_H1_LSB_REG) & 0x0F)\n calH2 = self._read_register_1ubyte(self.BME680_H2_MSB_REG) << 4 | ((self._read_register_1ubyte(self.BME680_H2_LSB_REG)) >> 4)\n calH3 = self._read_register_1sbyte(self.BME680_H3_REG)\n calH4 = self._read_register_1sbyte(self.BME680_H4_REG)\n calH5 = self._read_register_1sbyte(self.BME680_H5_REG)\n calH6 = self._read_register_1ubyte(self.BME680_H6_REG)\n calH7 = self._read_register_1sbyte(self.BME680_H7_REG)\n\n #Gas calibration.\n calGH1 = self._read_register_1sbyte(self.BME680_GH1_REG)\n calGH2 = self._read_2bytes_as_short_lsbfirst(self.BME680_GH2_LSB_REG)\n calGH3 = self._read_register_1sbyte(self.BME680_GH3_REG)\n\n #Heat calibration.\n calResHeatRange = (self._read_register_1ubyte(self.BME680_RES_HEAT_RANGE) & 0x30) / 16\n calResHeatVal = self._read_register_1sbyte(self.BME680_RES_HEAT_VAL)\n calRangeSwErr = (self._read_register_1sbyte(self.BME680_RANGE_SW_ERR) & 0xF0) / 16", "def __getPressureCalibrationCoefficients(self):\n src13 = self.read_byte_data(self.address, 0x13)\n src14 = self.read_byte_data(self.address, 0x14)\n src15 = self.read_byte_data(self.address, 0x15)\n src16 = self.read_byte_data(self.address, 0x16)\n src17 = self.read_byte_data(self.address, 0x17)\n src18 = self.read_byte_data(self.address, 0x18)\n src19 = self.read_byte_data(self.address, 0x19)\n src1A = self.read_byte_data(self.address, 0x1A)\n src1B = self.read_byte_data(self.address, 0x1B)\n src1C = self.read_byte_data(self.address, 0x1C)\n src1D = self.read_byte_data(self.address, 0x1D)\n src1E = self.read_byte_data(self.address, 0x1E)\n src1F = self.read_byte_data(self.address, 0x1F)\n src20 = self.read_byte_data(self.address, 0x20)\n src21 = self.read_byte_data(self.address, 0x21)\n c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)\n c00 = getTwosComplement(c00, 20)\n c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17\n c10 = getTwosComplement(c10, 20)\n c20 = (src1C << 8) | src1D\n c20 = getTwosComplement(c20, 16)\n c30 = (src20 << 8) | src21\n c30 = getTwosComplement(c30, 16)\n c01 = (src18 << 8) | src19\n c01 = getTwosComplement(c01, 16)\n c11 = (src1A << 8) | src1B\n c11 = getTwosComplement(c11, 16)\n c21 = (src1E < 8) | src1F\n c21 = getTwosComplement(c21, 16)\n return c00, c10, c20, c30, c01, c11, c21", "def ST_zero_mag(self):\n return 21.1", "def ST_zero_mag(self):\n return 21.1" ]
[ "0.7580876", "0.7580876", "0.6772141", "0.6595775", "0.627289", "0.627289", "0.6242239", "0.62241274", "0.5995406", "0.5957256", "0.59282094", "0.5875282", "0.58737624", "0.57592624", "0.5742674", "0.5665439", "0.56521314", "0.5646416", "0.5641885", "0.56370544", "0.56231505", "0.55898166", "0.5518144", "0.54900455", "0.5483646", "0.54810745", "0.547866", "0.547111", "0.5470423", "0.54554224", "0.54553354", "0.54529184", "0.5445317", "0.5442021", "0.5436363", "0.54326326", "0.5432147", "0.54152125", "0.537606", "0.53759086", "0.5371187", "0.5367082", "0.53571457", "0.5353022", "0.5348987", "0.5345842", "0.5327685", "0.53276485", "0.5322355", "0.53218037", "0.53218037", "0.53201157", "0.5310691", "0.5297984", "0.52948314", "0.52924174", "0.529231", "0.5281296", "0.5278991", "0.5278106", "0.5275966", "0.5274654", "0.5269337", "0.5269202", "0.52686363", "0.5257647", "0.525504", "0.52524906", "0.52317846", "0.5231596", "0.522376", "0.5207594", "0.51994026", "0.51981205", "0.51976097", "0.51939297", "0.51903147", "0.51898736", "0.51845837", "0.5180759", "0.51785797", "0.51726556", "0.516298", "0.5161797", "0.5155955", "0.51496917", "0.5146409", "0.51280695", "0.51271033", "0.512286", "0.5119318", "0.51131386", "0.5113012", "0.51113796", "0.51110214", "0.5110936", "0.5109076", "0.510889", "0.510889" ]
0.78341985
1
Find and create a configuration for Boost. prefix Where to find sofiasip, should sofiasip/sip.h.
def __init__(self, prefix = None): # Compute the search path. if prefix is None: test = [Path('/usr'), Path('/usr/local')] else: test = [Path(prefix)] self.__prefix = self._search_all('include/sofia-sip-1.12/sofia-sip/sip.h', test)[0] self.__config = drake.cxx.Config() self.__config.add_system_include_path(self.__prefix / 'include/sofia-sip-1.12') self.__config.lib_path(self.__prefix / 'lib')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def getapxs_location():\n return getconfigure_option(\"APXS\")", "def provide_felix_config(self):\n # First read the config values, so as to avoid unnecessary\n # writes.\n prefix = None\n ready = None\n iface_pfx_key = key_for_config('InterfacePrefix')\n try:\n prefix = self.client.read(iface_pfx_key).value\n ready = self.client.read(READY_KEY).value\n except etcd.EtcdKeyNotFound:\n LOG.info('%s values are missing', CONFIG_DIR)\n\n # Now write the values that need writing.\n if prefix != 'tap':\n LOG.info('%s -> tap', iface_pfx_key)\n self.client.write(iface_pfx_key, 'tap')\n if ready != 'true':\n # TODO Set this flag only once we're really ready!\n LOG.info('%s -> true', READY_KEY)\n self.client.write(READY_KEY, 'true')", "def test_replace_namespaced_build_config(self):\n pass", "def includeme(root):\n configure.scan(\"backend.services\")\n configure.scan(\"backend.content\")\n configure.scan(\"backend.install\")", "def config_locator():\n print(pkgrs.resource_filename('latools', 'latools.cfg'))\n return", "def compose_defines():\n return \"\"\"\nLIBPBDATA_INC ?=../pbdata\nLIBPBIHDF_INC ?=../hdf\nLIBBLASR_INC ?=../alignment\nLIBPBDATA_LIB ?=%(thisdir)s/pbdata/\nLIBPBIHDF_LIB ?=%(thisdir)s/hdf/\nLIBBLASR_LIB ?=%(thisdir)s/alignment/\nnohdf ?=1\n\"\"\"%(dict(thisdir=thisdir))", "def locate_config(confname, app_name, prefix='etc', verbose=False):\n candidates = []\n app_config_dir = ('%s_CONFIG_DIR' % app_name).upper()\n if app_config_dir in os.environ:\n candidate = os.path.normpath(\n os.path.join(os.environ[app_config_dir], confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n candidate = os.path.normpath(os.path.join(\n os.path.dirname(os.path.dirname(sys.executable)),\n prefix, app_name, confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n candidate = os.path.normpath('/%s/%s/%s' % (prefix, app_name, confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n candidate = os.path.normpath(os.path.join(os.getcwd(), confname))\n if os.path.isfile(candidate):\n candidates += [candidate]\n if candidates:\n if verbose:\n LOGGER.info(\"config loaded from '%s'\", candidates[0])\n return candidates[0]\n else:\n LOGGER.warning(\"config '%s' was not found.\", confname)\n return None", "def configure(conf):\n conf.find_program(\"doxygen\", var=\"DOXYGEN\")\n conf.find_program(\"dot\", var=\"DOT\")", "def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def base_install():\n # scwrl\n scwrl = {}\n print('{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\\n'\n 'All required input can use tab completion for paths.\\n'\n '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}'.format(**text_colours))\n scwrl_path = get_user_path('Please provide a path to your SCWRL executable', required=False)\n scwrl['path'] = str(scwrl_path)\n pack_mode = get_user_option(\n 'Please choose your packing mode (flexible is significantly slower but is more accurate).',\n ['flexible', 'rigid'])\n if pack_mode == 'rigid':\n scwrl['rigid_rotamer_model'] = True\n else:\n scwrl['rigid_rotamer_model'] = False\n settings['scwrl'] = scwrl\n\n # dssp\n print('{BOLD}Setting up DSSP (Recommended){END_C}'.format(**text_colours))\n dssp = {}\n dssp_path = get_user_path('Please provide a path to your DSSP executable.', required=False)\n dssp['path'] = str(dssp_path)\n settings['dssp'] = dssp\n\n # buff\n print('{BOLD}Setting up BUFF (Required){END_C}'.format(**text_colours))\n buff = {}\n ffs = []\n ff_dir = isambard_path / 'buff' / 'force_fields'\n for ff_file in os.listdir(str(ff_dir)):\n ff = pathlib.Path(ff_file)\n ffs.append(ff.stem)\n force_field_choice = get_user_option(\n 'Please choose the default BUFF force field, this can be modified during runtime.',\n ffs)\n buff['default_force_field'] = force_field_choice\n settings['buff'] = buff\n return", "def includeme(config):", "def configure(self, spec, prefix):\n options = getattr(self, \"configure_flag_args\", [])\n options += [\"--prefix={0}\".format(prefix)]\n options += self.configure_args()\n\n with working_dir(self.build_directory, create=True):\n inspect.getmodule(self).configure(*options)", "def _configure_namespaces(api):\n\t#{{cookiecutter.app_name}}_namespace\n\tapi.add_namespace({{cookiecutter.app_name}}_namespace)", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def test_patch_namespaced_build_config(self):\n pass", "def configure(_workdir):\n\n global workdir\n workdir = _workdir\n\n from os.path import join\n from ConfigParser import ConfigParser\n config = ConfigParser(dict(here=workdir))\n config.read(join(workdir, 'rnaseqlyze.ini'))\n\n for name, value in config.items(\"rnaseqlyze\"):\n globals()[name] = value\n\n import Bio.Entrez\n Bio.Entrez.email = admin_email", "def test_create_namespaced_build_config(self):\n pass", "def pibooth_configure(cfg):", "def setup(conf, products, build=False):\n import distutils.sysconfig\n paths = {\n \"CPPPATH\": distutils.sysconfig.get_python_inc().split(),\n \"LIBPATH\": [],\n }\n libs = []\n dir = distutils.sysconfig.get_config_var(\"LIBPL\")\n if not dir in paths[\"LIBPATH\"]:\n paths[\"LIBPATH\"] += [dir]\n pylibrary = distutils.sysconfig.get_config_var(\"LIBRARY\")\n mat = re.search(\"(python.*)\\.(a|so|dylib)$\", pylibrary)\n if mat:\n libs.append(mat.group(1)) \n for w in (\" \".join([distutils.sysconfig.get_config_var(\"MODLIBS\"),\n distutils.sysconfig.get_config_var(\"SHLIBS\")])).split():\n mat = re.search(r\"^-([Ll])(.*)\", w)\n if mat:\n lL = mat.group(1)\n arg = mat.group(2)\n if lL == \"l\":\n if not arg in libs:\n libs.append(arg)\n else:\n if os.path.isdir(arg) and not arg in paths[\"LIBPATH\"]:\n paths[\"LIBPATH\"].append(arg)\n conf.env.PrependUnique(**paths)\n for lib in libs:\n if lib not in conf.env.libs[\"python\"]:\n conf.env.libs[\"python\"].append(lib)\n return {\"paths\": paths, \"libs\": {\"python\": libs}}", "def build_config(\n *, quiet: bool, release: str, sp_osi: str | None, tag_suffix: str | None\n) -> Config:\n\n def osi_version() -> str:\n \"\"\"Determine the sp-osi version to use; parse \"wip\" in a special way.\"\"\"\n if sp_osi is None:\n return find.find_sp_osi_version()\n\n if sp_osi == \"wip\":\n return find.find_sp_osi_version() + defs.VERSION_WIP_SUFFIX\n\n return sp_osi\n\n return Config(\n topdir=find.find_topdir(),\n release=release,\n sp_osi_version=osi_version(),\n tag_suffix=tag_suffix if tag_suffix is not None else _build_tag_suffix(),\n verbose=not quiet,\n )", "def phone_config(self, sip_server: str = \"\") -> None:", "def _setup_applications(self):\n if 'host_nfs_path' in self.config['settings'] and 'guest_nfs_path' in self.config['settings']:\n self.settings['nfs'] = NFSSettings(host_vm_nfs_path=self.config['settings']['host_nfs_path'],\n guest_vm_nfs_path=self.config['settings']['guest_nfs_path'])\n\n self._setup_printer()", "def configure(self) -> None:", "def extractBINS( configPy, var ):\n\n\t#TODO: Better a temporary file\n\ttry:\n\t\tshutil.copy( configPy, '_tmpPy.py')\n\texcept IOError:\n\t\tmessage = '\\033[1;31mError: There is no config File named %s\\033[1;m' % configPy\n\t\traise IOError, message\n\t# To be sure the first import is FWCore.ParameterSet.Config \n\t# in order to extract BINS\n\t_file = open('_tmpPy.py','r')\n\t_lines = _file.readlines()\n\t_file.close()\n\t_lines.insert(0,'import FWCore.ParameterSet.Config as cms\\n')\n\t_file = open('_tmpPy.py','w')\n\t_file.writelines(_lines)\n\t_file.close()\n\t# Append the working directory to do the import\n\tsys.path.append( os.getcwd() )\n\t#------------------------------------------------------------ \n\t\n\ttry:\n\t\tfrom _tmpPy import BINS\n\texcept ImportError:\n\t\tmessage = '\\033[1;31mError: There is no BINS in %s file. Are you sure this is a config python to do the fit?\\033[1;m' % configPy\n\t\tos.remove('_tmpPy.py')\n\t\traise ImportError, message\n\n\tvariables = BINS.parameterNames_()\n\t# Check if the variables introduced by the user are inside\n\t# the fit config python\n\tfor i in var:\n\t\tif i not in variables:\n\t\t\tos.remove('_tmpPy.py')\n\t\t\tmessage = \"\"\"\\033[1;31mError: The variable %s is not in the parameter BINS of the config python %s. \nCheck your config or change your input variable with --var option\\033[1;m \"\"\" % ( i, configPy)\n\t\t print message\n raise KeyError\n\n\t# All was fine. Remember: first variable is the pt-like (construct the weights respect it)\n\tPT = var[0]\n\tETA = var[1]\n\n\t#bins = BINS\n\ttry:\n\t\tos.remove( '_tmpPy.py' )\n\t\tos.remove( '_tmpPy.pyc' )\n\texcept OSError:\n\t\tpass\n\n\treturn BINS,PT,ETA", "def test_read_namespaced_build_config(self):\n pass", "def configuration():", "def sirsam_bs_conf(sirsam_bootstrap):\n return os.path.join(sirsam_bootstrap, 'bootstrapping.yaml')", "def configure_apispec(app):\n pass", "def build(self):\n cmakelist_prepend = '''\ninclude(${CMAKE_CURRENT_SOURCE_DIR}/../conanbuildinfo.cmake)\nCONAN_BASIC_SETUP()\noption(HPX_BUILD_EXAMPLES BOOL OFF)\noption(HPX_BUILD_TESTS BOOL OFF)\n'''\n \n replace_in_file(\"%s/CMakeLists.txt\" % self.folder, 'project(hpx CXX C)', 'project(hpx CXX C)\\n%s' % cmakelist_prepend)\n # Don't remove module path, keep the previous\n replace_in_file(\"%s/CMakeLists.txt\" % self.folder, 'set(CMAKE_MODULE_PATH', 'set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}')\n # replace_in_file(\"%s/src/CMakeLists.txt\" % self.folder, \"if(NOT MSVC)\", \"if(0)\") # Not handle boost Boost_SYSTEM_LIBRARY_DEBUG or Boost_SYSTEM_SERIALIZATION_DEBUG\n \n # Maybe make a PR providing a new option to disable autolink? link against libraries not directories\n replace_in_file(\"%s/cmake/HPX_SetupBoost.cmake\" % self.folder, \"hpx_library_dir(${Boost_LIBRARY_DIRS})\", \"hpx_libraries(${Boost_LIBRARIES})\") # No auto-linking\n \n replace_in_file(\"%s/src/CMakeLists.txt\" % self.folder, \"${hpx_MALLOC_LIBRARY}\", \"${hpx_MALLOC_LIBRARY} ${Boost_SERIALIZATION_LIBRARY}\") # Not append boost libs\n \n cmake = CMake(self.settings)\n \n # Build\n# \n# # NO build examples nor tests\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_EXAMPLES)\", \"if(FALSE)\")\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_DOCUMENTATION)\", \"if(FALSE)\")\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_TESTS)\", \"if(FALSE)\")\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_TOOLS)\", \"if(FALSE)\")\n# \n # CONFIGURE\n self.run(\"cd %s && mkdir _build\" % self.folder)\n configure_command = 'cd %s/_build && cmake .. %s ' % (self.folder, cmake.command_line)\n self.output.warn(\"Configure with: %s\" % configure_command)\n self.run(configure_command)\n # BUILD\n cores = \"-j3\" if self.settings.os != \"Windows\" else \"\"\n self.run(\"cd %s/_build && cmake --build . %s -- %s\" % (self.folder, cmake.build_config, cores))", "def create_conf_xml(self):\n path = os.path.join(\n self.buildout['buildout']['parts-directory'],\n self.name)\n if not os.path.isdir(path):\n os.makedirs(path)\n\n xml_path = os.path.join(path, 'uwsgi.xml')\n\n conf = \"\"\n for key, value in self.conf.items():\n if value.lower() in ('true', 'on', 'yes'):\n conf += \"<%s/>\\n\" % key\n elif value and value.lower() not in ('false', 'off', 'yes'):\n conf += \"<%s>%s</%s>\\n\" % (key, value, key)\n\n\n requirements, ws = self.egg.working_set()\n eggs_paths = [dist.location for dist in ws]\n eggs_paths.extend(self.get_extra_paths())\n # order preserving unique\n unique_egg_paths = []\n for p in eggs_paths:\n if p not in unique_egg_paths:\n unique_egg_paths.append(p)\n\n for path in map(realpath, unique_egg_paths):\n conf += \"<pythonpath>%s</pythonpath>\\n\" % path\n\n f = open(xml_path, 'w')\n f.write(\"<uwsgi>\\n%s</uwsgi>\" % conf)\n f.close()\n return xml_path", "def _find_config_root(self) -> str:\n location = [\"apache2.conf\", \"httpd.conf\", \"conf/httpd.conf\"]\n for name in location:\n if os.path.isfile(os.path.join(self.root, name)):\n return os.path.join(self.root, name)\n raise errors.NoInstallationError(\"Could not find configuration root\")", "def config():", "def config():", "def setup(base_path, root_module_name=\"caliper\"):\n if sys.modules.has_key(root_module_name):\n return\n _create_module_and_parents(root_module_name)\n imp.load_package(root_module_name, base_path)\n\n # allow locally installed third party packages to be found.\n sys.path.insert(0, os.path.join(base_path, \"site_packages\"))", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def initLibPath():\n libHash = {\n 'Framework': 1,\n 'UserControlleLib': 1,\n 'CaseLib': 1\n }\n\n binPath = os.path.split(os.path.realpath(__file__))[0]\n\n for key in libHash:\n sys.path.append(os.path.join(__getLibAbsPath(binPath, libHash[key]), key))", "def pfx2pp(p):\n if p in sys.path:\n return\n sys.path.insert(1, p)", "def find_service(iface, context, name):", "def build_configs():", "def test_ipam_prefixes_create(self):\n pass", "def load_configurations() :\n\n local_path = os.path.dirname(os.path.abspath(__file__))\n print(local_path)\n file_path = local_path + os.sep + 'conf.ini'\n parser = configparser.ConfigParser()\n\n if os.path.exists(file_path) :\n config = parser.read(file_path)\n else :\n parser['PATH'] = {}\n parser['PATH']['PATH_TO_DB'] = os.path.expanduser('~/inlusio_data/InlusioDB_Juni_2015.sqlite')\n parser['PATH']['PHYSIO_PATH'] = os.path.expanduser('~/inlusio_data')\n print('Creating new configuration file!!!')\n print('Please fit conf.ini to your local data path!')\n with open(file_path, 'w') as configfile:\n parser.write(configfile)\n\n return parser", "def meson(self, spec, prefix):\n configure(\"--prefix=\" + prefix, *self.configure_args())", "def configure(self, section):", "def configure_project():\n pass", "def get_pecan_config():\n filename = api_config.__file__.replace('.pyc', '.py')\n return filename", "def find_conf():\n path = os.path.abspath(os.path.expanduser(os.getcwd()))\n while path not in ('', '/'):\n conf_path = os.path.join(path, 'dataplicity.conf')\n if os.path.exists(conf_path):\n return conf_path\n path = os.path.dirname(path)\n return None", "def setup(app) -> Dict[str, Any]:\n app.add_config_value(\"uqbar_book_console_setup\", [], \"env\")\n app.add_config_value(\"uqbar_book_console_teardown\", [], \"env\")\n app.add_config_value(\n \"uqbar_book_extensions\", [\"uqbar.book.extensions.GraphExtension\"], \"env\"\n )\n app.add_config_value(\"uqbar_book_strict\", False, \"env\")\n app.add_config_value(\"uqbar_book_use_black\", False, \"env\")\n app.add_config_value(\"uqbar_book_use_cache\", True, \"env\")\n app.add_config_value(\"uqbar_book_block_options\", {}, \"env\")\n app.add_directive(\"book\", UqbarBookDirective)\n app.add_directive(\"book-defaults\", UqbarBookDefaultsDirective)\n app.add_directive(\"book-import\", UqbarBookImportDirective)\n\n for node_class in [uqbar_book_defaults_block, uqbar_book_import_block]:\n app.add_node(\n node_class,\n html=[skip_node, None],\n latex=[skip_node, None],\n text=[skip_node, None],\n )\n app.connect(\"builder-inited\", on_builder_inited)\n app.connect(\"config-inited\", on_config_inited)\n app.connect(\"doctree-read\", on_doctree_read)\n app.connect(\"build-finished\", on_build_finished)\n return {\n \"version\": uqbar.__version__,\n \"parallel_read_safe\": False,\n \"parallel_write_safe\": True,\n }", "def configure(obj):\n click.echo(\n \"Configuration through the `ali` command is currently unsupported. Please run `aliyun configure` instead.\"\n )", "def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")", "def base():\n print(CFG.base.path)", "def _salt_factories_config(request):\n log_server = request.config.pluginmanager.get_plugin(\"saltfactories-log-server\")\n return {\n \"code_dir\": saltfactories.CODE_ROOT_DIR.parent,\n \"inject_coverage\": True,\n \"inject_sitecustomize\": True,\n \"log_server_host\": log_server.log_host,\n \"log_server_port\": log_server.log_port,\n \"log_server_level\": log_server.log_level,\n \"system_install\": \"SALT_FACTORIES_SYSTEM_INSTALL\" in os.environ,\n }", "def home():\n if sys.prefix == sys.exec_prefix:\n return sys.prefix\n else:\n return ':'.join((sys.prefix, sys.exec_prefix))", "def _dump_prefix(guard: str) -> List[str]:\n\n return [\n f\"#ifndef {guard}\",\n f\"#define {guard}\",\n \"// <<< Use Configuration Wizard in Context Menu >>>\",\n \"#ifdef USE_APP_CONFIG\",\n '#include \"app_config.h\"',\n \"#endif\"\n ]", "def configure(self, options, conf):", "def test_ipam_prefixes_available_prefixes_create(self):\n pass", "def append_common(envin, content):\n # This is the original libconfig.h. However, in case somebody (like\n # pbdagcon) builds libpbdata in-place, we need to drop a copy of\n # libconfig.h wherever pbdata is actually built, which we will not\n # know until later. This can all be cleared up later, when we are\n # more clear about where things are built.\n libconfig_h = os.path.abspath(os.path.join(os.getcwd(), 'libconfig.h'))\n content += \"\"\"\nLIBCONFIG_H:=%s\n# Use PREFIX dir, if available.\nINCLUDES += ${PREFIX_INC}\nLIBS += ${PREFIX_LIB}\n\"\"\"%libconfig_h\n env = dict(envin)\n # Some extra defs.\n if 'PREFIX' in envin:\n PREFIX = envin['PREFIX']\n setenv(env, 'PREFIX_INC', os.path.join(PREFIX, 'include'))\n setenv(env, 'PREFIX_LIB', os.path.join(PREFIX, 'lib'))\n poss = [\n 'SH_LIB_EXT',\n 'EXTRA_LDFLAGS',\n 'PREFIX_LIB', 'PREFIX_INC',\n ]\n vals = ['%-20s := %s' %(k, v) for k,v in sorted(env.items()) if k in poss]\n return '\\n'.join([''] + vals + ['']) + content", "def include(name):\n env.configs = name.split(' ') + env.configs", "def config_file_setup(logger, cf_label, cf_from_cli=None):\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n\n logger.debug(\"config file paths: {}\".format(config_file_paths))\n\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path", "def update_config_windows(config): # pragma: windows\n out = []\n if not config.has_section('windows'):\n config.add_section('windows')\n # Find paths\n clibs = [('libzmq_include', 'zmq.h', 'The full path to the zmq.h header file.'),\n ('libzmq_static', 'zmq.lib', 'The full path to the zmq.lib static library.'),\n ('czmq_include', 'czmq.h', 'The full path to the czmq.h header file.'),\n ('czmq_static', 'czmq.lib', 'The full path to the czmq.lib static library.')]\n for opt, fname, desc in clibs:\n if not config.has_option('windows', opt):\n fpath = locate_file(fname)\n if fpath:\n print('located %s: %s' % (fname, fpath))\n config.set('windows', opt, fpath)\n else:\n out.append(('windows', opt, desc))\n return out", "def configure_nopbbam(envin):\n HDF5_INC = envin.get('HDF5_INC')\n if not HDF5_INC:\n HDF5_INC = envin['HDF5_INCLUDE']\n HDF5_LIB = envin['HDF5_LIB']\n content1 = compose_defines_with_hdf(HDF5_INC, HDF5_LIB)\n content1 = append_common(envin, content1)\n content2 = compose_libconfig(pbbam=False)\n update(content1, content2)", "def test_ipam_prefixes_update(self):\n pass", "def set_prefix(prefix):\n PLUGINS.set_prefix(prefix)", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def test_ipam_prefixes_available_ips_create(self):\n pass", "def configure(conf): # pylint: disable=too-many-branches,too-many-locals\n conf.env.VERSION = VERSION\n conf.env.APPNAME = APPNAME\n conf.msg(\"Project\", f\"{conf.env.APPNAME}-{conf.env.VERSION}\")\n conf.load(\"python\")\n conf.check_python_version((3, 6))\n conf.undefine(\"PYTHONDIR\")\n conf.undefine(\"PYTHONARCHDIR\")\n\n base_err_msg = (\n \"wscript's VERSION attribute ({}) and version information in file {} \"\n \"({}) do not match.\"\n )\n\n version_file = conf.path.find_node(\"VERSION\")\n version_info = version_file.read_json()\n version_file_ver = version_info[\"native Lua\"]\n if not VERSION == version_file_ver:\n conf.fatal(base_err_msg.format(VERSION, version_file, version_file_ver))\n\n conf.env.lua_src_version = version_info[\"lua\"]\n conf.env.lua_tests_version = version_info[\"tests\"]\n conf.msg(\"native Lua version\", VERSION)\n conf.msg(\"Lua version\", conf.env.lua_src_version)\n conf.msg(\"Lua tests version\", conf.env.lua_tests_version)\n conf.env.generic = conf.options.generic\n conf.msg(\"Platform\", conf.options.generic or PLATFORM)\n conf.load(\"gnu_dirs\")\n\n conf.env.WAF_CONFIG_H_PRELUDE = (\n conf.path.find_node(os.path.join(\"cfg\", \"prelude.h.template\"))\n .read()\n .replace(\"{{ VERSION }}\", VERSION)\n .replace(\"{{ REPO_URL }}\", REPO_URL)\n )\n conf.write_config_header(configfile=\"waf_build_config.h\")\n platform_configs = conf.path.find_node(\n os.path.join(\"cfg\", \"platforms.json\")\n ).read_json()\n is_known = platform_configs[\"known-platforms\"].get(PLATFORM, False)\n if not is_known:\n pass # TODO\n if conf.options.generic:\n pass # TODO\n\n schema_compiler_setup = conf.path.find_node(\n os.path.join(\"cfg\", \"compiler-cfg.schema.json\")\n ).read_json()\n cfgs = conf.path.ant_glob(\n \"cfg/**/*.json\",\n excl=[\"**/*.schema.json\", \"cfg/generic.json\", \"cfg/platforms.json\"],\n )\n Logs.debug(\", \".join(i.relpath() for i in cfgs))\n for i in cfgs:\n valid = validate_json_schema(i.read_json(), schema_compiler_setup)\n if not valid:\n Logs.warn(f\"{i.relpath()} is not a valid compiler setup.\")\n generic_build = conf.path.find_node(os.path.join(\"cfg\", \"generic.json\")).read_json()\n for _, v in generic_build.items():\n validate_json_schema(v, schema_compiler_setup)\n\n conf.load(\"compiler_c\")\n\n # load platform-compiler configuration\n cc_config_file = os.path.join(\n \"cfg\", PLATFORM, f\"{PLATFORM}_{conf.env.CC_NAME}.json\"\n )\n cc_config = conf.path.find_node(cc_config_file).read_json()\n for i, val in cc_config.items():\n if i.isupper() or \"_PATTERN\" in i:\n conf.env[i] = val\n # add the build directory to includes as it stores the configuration file\n conf.env.append_unique(\"INCLUDES\", [conf.path.get_bld().abspath()])\n\n # validate C standard setting\n conf.env.C_STD = cc_config[\"std\"][\"opt\"] + cc_config[\"std\"][\"val\"]\n if conf.options.c_std: # setting might be overwritten on commandline\n conf.env.C_STD = conf.options.c_std\n conf.env.append_unique(\"CFLAGS\", [conf.env.C_STD])\n if \"89\" in conf.env.C_STD:\n if PLATFORM == \"win32\" and conf.env.CC_NAME.lower() == \"msvc\":\n Logs.warn(\"This will NOT effect msvc-builds on win32.\")\n else:\n Logs.warn(\n \"C89 does not guarantee 64-bit integers for Lua.Adding define: LUA_USE_C89\"\n )\n Logs.warn(\"Adding define: LUA_USE_C89\")\n conf.define(\"LUA_USE_C89\", 1) # TODO check for waf update\n\n min_c = \"#include<stdio.h>\\nint main() {\\n return 0;\\n}\\n\"\n\n lib_tests = []\n for lib in cc_config.get(\"libs\", []):\n lib_tests.append(\n {\n \"lib\": lib,\n \"uselib_store\": lib.upper(),\n \"msg\": f\"Checking for library '{lib}'\",\n }\n )\n\n conf.multicheck(\n {\"fragment\": min_c, \"execute\": True, \"msg\": \"Minimal C program\"},\n {\n \"fragment\": min_c,\n \"execute\": True,\n \"cflags\": conf.env.C_STD,\n \"msg\": f\"Checking c-standard '{conf.env.C_STD}'\",\n },\n *lib_tests,\n {\n \"fragment\": min_c,\n \"execute\": True,\n \"cflags\": conf.env.C_STD,\n \"use\": [i.upper() for i in cc_config.get(\"libs\", [])],\n \"msg\": \"Checking for all libraries\",\n },\n msg=\"Validating compiler setup\",\n mandatory=True,\n run_all_tests=True,\n )\n if cc_config.get(\"libs\", []):\n conf.env.USE_LIBS = [i.upper() for i in cc_config[\"libs\"]]", "def pibooth_startup(cfg, app):", "def make_module_req_guess(self):\n guesses = super(EB_GROMACS, self).make_module_req_guess()\n guesses.update({\n 'LD_LIBRARY_PATH': [self.lib_subdir],\n 'LIBRARY_PATH': [self.lib_subdir],\n 'PKG_CONFIG_PATH': [os.path.join(self.lib_subdir, 'pkgconfig')],\n })\n return guesses", "def _expand_prefix(prefix, configs):\n return subst_vars(prefix, configs)", "def get_packages_with_prefixes():\n return get_resources('packages')", "def test_list_namespaced_build_config(self):\n pass", "def configure_step(self):\n\n binutils_root = get_software_root('binutils')\n gcc_root = get_software_root('GCCcore') or get_software_root('GCC')\n gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')\n\n # only patch Bazel scripts if binutils & GCC installation prefix could be determined\n if binutils_root and gcc_root:\n\n res = glob.glob(os.path.join(gcc_root, 'lib', 'gcc', '*', gcc_ver, 'include'))\n if res and len(res) == 1:\n gcc_lib_inc = res[0]\n else:\n raise EasyBuildError(\"Failed to pinpoint location of GCC include files: %s\", res)\n\n gcc_lib_inc_fixed = os.path.join(os.path.dirname(gcc_lib_inc), 'include-fixed')\n if not os.path.exists(gcc_lib_inc_fixed):\n raise EasyBuildError(\"Derived directory %s does not exist\", gcc_lib_inc_fixed)\n\n gcc_cplusplus_inc = os.path.join(gcc_root, 'include', 'c++', gcc_ver)\n if not os.path.exists(gcc_cplusplus_inc):\n raise EasyBuildError(\"Derived directory %s does not exist\", gcc_cplusplus_inc)\n\n # replace hardcoded paths in CROSSTOOL\n regex_subs = [\n (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')),\n (r'(cxx_builtin_include_directory:.*)/usr/lib/gcc', r'\\1%s' % gcc_lib_inc),\n (r'(cxx_builtin_include_directory:.*)/usr/local/include', r'\\1%s' % gcc_lib_inc_fixed),\n (r'(cxx_builtin_include_directory:.*)/usr/include', r'\\1%s' % gcc_cplusplus_inc),\n ]\n for tool in ['ar', 'cpp', 'dwp', 'gcc', 'ld']:\n path = which(tool)\n if path:\n regex_subs.append((os.path.join('/usr', 'bin', tool), path))\n else:\n raise EasyBuildError(\"Failed to determine path to '%s'\", tool)\n\n apply_regex_substitutions(os.path.join('tools', 'cpp', 'CROSSTOOL'), regex_subs)\n\n # replace hardcoded paths in (unix_)cc_configure.bzl\n regex_subs = [\n (r'-B/usr/bin', '-B%s' % os.path.join(binutils_root, 'bin')),\n (r'\"/usr/bin', '\"' + os.path.join(binutils_root, 'bin')),\n ]\n for conf_bzl in ['cc_configure.bzl', 'unix_cc_configure.bzl']:\n filepath = os.path.join('tools', 'cpp', conf_bzl)\n if os.path.exists(filepath):\n apply_regex_substitutions(filepath, regex_subs)\n else:\n self.log.info(\"Not patching Bazel build scripts, installation prefix for binutils/GCC not found\")\n\n # enable building in parallel\n env.setvar('EXTRA_BAZEL_ARGS', '--jobs=%d' % self.cfg['parallel'])", "def __init__(self):\n self.ext_folder = ckan_config.get('ckanext.needupdate.ext_folder', '/usr/lib/ckan/default/src')\n self.ext_prefix = ckan_config.get('ckanext.needupdate.ext_folder', 'ckanext-')\n self.ext_sufix = ckan_config.get('ckanext.needupdate.ext_folder', '')", "def test_find_config_cur_dir(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, \"\")", "def register_oslo_configs(conf):\n conf.register_opts(_get_oslo_configs())", "def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")", "def setup_confighelper(self):\n self.cfghelper = cfgmodule.MCfgModule()\n self.cfghelper.load_configfiles(self.configname, self.get_pkgdirimp_config())", "def findLibraryPath():\n path = os.path.split(os.path.abspath(__file__))[0]\n\n if os.path.exists(os.path.join(path, 'lib/header_primaryHDU.txt')):\n return os.path.join(path, 'lib')\n elif os.path.exists(os.path.join(path, 'header_primaryHDU.txt')):\n return path\n elif os.path.exists('header_primaryHDU.txt'):\n return './'\n else:\n raise IOError(\"Cannot find header files. Called from findLibraryPath() in sdfits.py\")", "def _get_MindtPy_config():\n CONFIG = ConfigBlock('MindtPy')\n\n _add_common_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_bound_configs(CONFIG)\n _add_roa_configs(CONFIG)\n return CONFIG", "def getsitepackages():\n # For now used only on Windows. Raise Exception for other platforms.\n if is_win:\n pths = [os.path.join(sys.prefix, 'Lib', 'site-packages')]\n # Include Real sys.prefix for virtualenv.\n if is_virtualenv:\n pths.append(os.path.join(base_prefix, 'Lib', 'site-packages'))\n return pths\n else:\n # TODO Implement for Python 2.6 on other platforms.\n raise NotImplementedError()", "def configure(self):\n pass", "def configure(self):\n pass", "def bootstrap_default():\n\treturn default_configuration", "def configure(self):\r\n pass", "def configure(self, conf):\n return", "def _read_uconf(self):\n fname = self.gen_conf.conf_file_name\n fdir = self.gen_conf.conf_file_path\n fpath = os.path.join(fdir, fname)\n \n from pathlib import Path\n cfile = Path(fpath) \n \n if cfile.exists() & cfile.is_file():\n\n self._load_uconf(fpath)\n \n else:\n if cfile.exists():\n raise Exception(\"Configuration file \"+fpath+\" seems to exist\"+\n \" but it is not a file\")\n else:\n print(\"Warning: Configuration file \"+fpath+\" does not exit\")\n print(\"Warning: Placing a default configuration are using it\")\n \n import pkg_resources\n\n resource_package = \"quantarhei\" # Could be any module/package name\n resource_path = '/'.join(('core', 'conf', 'qrhei.py')) \n content = pkg_resources.resource_string(resource_package,\n resource_path)\n\n with open(fpath, \"w\") as f:\n f.write(content.decode(\"utf-8\"))\n \n self._load_uconf(fpath)\n \n #printlog(\"Configuration file: \", fpath, \"loaded\", loglevel=9) ", "def setup(app):\r\n app.add_config_value('rosmsg_path_root', [], 'env')\r\n app.add_directive(\"ros_message\", MessageDirective)\r\n app.connect('config-inited', on_config_inited)\r\n return {\r\n 'version': __version__,\r\n }", "def get_bindir_options(previous=None) -> Dict[str, Tuple[Path, str]]:\n options = {} # key is an option name, value is (optpath, optinfo)\n if previous is not None and os.access(previous, os.W_OK):\n # Make previous bindir as the first option\n options[\":prev\"] = (previous, \"previously selected bindir\")\n if within_flopy: # don't check is_dir() or access yet\n options[\":flopy\"] = (flopy_appdata_path / \"bin\", \"used by FloPy\")\n # Python bin (same for standard or conda varieties)\n py_bin = Path(sys.prefix) / (\n \"Scripts\" if get_ostag().startswith(\"win\") else \"bin\"\n )\n if py_bin.is_dir() and os.access(py_bin, os.W_OK):\n options[\":python\"] = (py_bin, \"used by Python\")\n home_local_bin = Path.home() / \".local\" / \"bin\"\n if home_local_bin.is_dir() and os.access(home_local_bin, os.W_OK):\n options[\":home\"] = (home_local_bin, \"user-specific bindir\")\n local_bin = Path(\"/usr\") / \"local\" / \"bin\"\n if local_bin.is_dir() and os.access(local_bin, os.W_OK):\n options[\":system\"] = (local_bin, \"system local bindir\")\n # Windows user\n windowsapps_dir = Path(\n os.path.expandvars(r\"%LOCALAPPDATA%\\Microsoft\\WindowsApps\")\n )\n if windowsapps_dir.is_dir() and os.access(windowsapps_dir, os.W_OK):\n options[\":windowsapps\"] = (windowsapps_dir, \"User App path\")\n\n # any other possible OS-specific hard-coded locations?\n if not options:\n raise RuntimeError(\"could not find any installable folders\")\n\n return options", "def get_packages_by_prefix(prefix: str) -> Dict[str, str]:\n\n with request.urlopen(PYPI_SIMPLE_API_URL) as response:\n html = response.read().decode()\n\n return {\n name: PYPI_SIMPLE_API_URL + url\n for url, name in re.findall(\n f'<a href=\"/simple/(.+)\">({prefix}.*)</a>', html\n )\n }", "def test_find_config_way_up(self, in_tmp_path):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n\n subdirs = [\"foo\", \"bar\", \"snap\", \"crackle\", \"pop\"]\n\n for sd in subdirs:\n os.mkdir(sd)\n os.chdir(sd)\n\n # Verify our current working dir\n assert_paths_equal(os.getcwd(), in_tmp_path.joinpath(*subdirs))\n\n path, rel, _ = scuba.config.find_config()\n assert_paths_equal(path, in_tmp_path)\n assert_paths_equal(rel, join(*subdirs))", "def _generate_slave_config(project: PyfmuProject):\n return project.project_configuration", "def set_syspath(self, hasal_dir):\n library_path = os.path.join(hasal_dir, \"lib\", \"sikuli\")\n sys.path.append(library_path)\n return library_path", "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path", "def bootstraps():\n \n requirejs = open(\"lib/vendor/require.js\").read()\n requirejs_node = open(\"lib/vendor/require.node.js\").read()\n \n namespace = open(\"lib/global.js\").read()\n \n adapters = os.listdir(\"lib/adapters/\")\n \n def listSources(adapterpath):\n sources = []\n for (path, dirs, files) in os.walk(\"lib/adapters/%s/\"%adapterpath):\n for f in files:\n if re.match('.*\\.js$', f):\n sources+=[os.path.join(path.replace(\"lib/adapters/%s/\"%adapterpath,\"\"),f)[0:-3]]\n \n sources.remove(\"global\")\n try:\n sources.remove(\"bootstrap\")\n except:\n pass\n \n return sources\n \n \n for c in adapters:\n # skip useless directories\n if( re.match('\\.DS_Store', c) ):\n continue\n sources = {}\n \n namespace_adapter = open(\"lib/adapters/%s/global.js\"%c).read()\n \n # todo replace by some jseval().\n adapter_deps = re.search(\"J(oshfire)?\\.adapterDeps\\s*\\=\\s*([^\\;]+)\\;\",namespace_adapter)\n \n deps = [c]\n if adapter_deps:\n deps += json.loads(adapter_deps.group(2).replace(\"'\",'\"'))\n \n for d in deps:\n sources[d] = listSources(d)\n \n patched_namespace = namespace\n patched_namespace = patched_namespace.replace(\"JOSHFIRE_REPLACEME_ADAPTER_MODULES\",json.dumps(sources))\n patched_namespace = patched_namespace.replace(\"JOSHFIRE_REPLACEME_ADAPTER_ID\",json.dumps(c))\n \n bootstrap = __getCopyrightHeader() + \"\\n\\n\"\n\n if c==\"node\":\n bootstrap += patched_namespace+namespace_adapter+requirejs+requirejs_node+open(\"lib/adapters/%s/global.exec.js\"%c).read()\n \n #patch needed in require.js\n bootstrap = bootstrap.replace(\"var require, define;\",\"\")\n \n else:\n bootstrap += requirejs+patched_namespace+namespace_adapter\n \n print \"Writing %s ...\" % (\"lib/adapters/%s/bootstrap.js\"%c)\n open(\"lib/adapters/%s/bootstrap.js\"%c,\"w\").write(bootstrap)\n open(\"lib/adapters/%s/modules.json\"%c,\"w\").write(json.dumps(sources))", "def compose_defines_pacbio(envin):\n env = dict()\n setenv(env, 'SHELL', 'bash')\n setifenvf(env, envin, 'OS_STRING', get_OS_STRING)\n setifenv(env, envin, 'LIBPBDATA_INC', '../pbdata')\n setifenv(env, envin, 'LIBPBIHDF_INC', '../hdf')\n setifenv(env, envin, 'LIBBLASR_INC', '../alignment')\n setifenv(env, envin, 'LIBPBDATA_LIB', '../pbdata/')\n setifenv(env, envin, 'LIBPBIHDF_LIB', '../hdf/')\n setifenv(env, envin, 'LIBBLASR_LIB', '../alignment/')\n if 'nohdf' in envin:\n env['nohdf'] = envin['nohdf']\n # Otherwise, do not define it at all. TODO(CD): Remove nohdf, as it is not used.\n nondefaults = set([\n 'CXX', 'AR',\n 'HDF5_INC', 'HDF5_LIB',\n 'PBBAM_INC', 'PBBAM_LIB',\n 'HTSLIB_INC', 'HTSLIB_LIB',\n 'BOOST_INC',\n 'ZLIB_LIB',\n 'GCC_LIB',\n 'GTEST_INC', 'GTEST_SRCDIR',\n ])\n update_env_if(env, envin, nondefaults)\n return compose_defs_env(env)", "def pkg_config(name):\n\n proc = subprocess.Popen(['pkg-config', '--cflags', '--libs', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n raw_config, err = proc.communicate()\n if proc.wait():\n return\n\n config = {}\n for chunk in raw_config.strip().split():\n if chunk.startswith('-I'):\n config.setdefault('include_dirs', []).append(chunk[2:])\n elif chunk.startswith('-L'):\n config.setdefault('library_dirs', []).append(chunk[2:])\n elif chunk.startswith('-l'):\n config.setdefault('libraries', []).append(chunk[2:])\n elif chunk.startswith('-D'):\n name = chunk[2:].split('=')[0]\n config.setdefault('define_macros', []).append((name, None))\n\n return config" ]
[ "0.5301707", "0.52438116", "0.51499075", "0.5118139", "0.50677866", "0.5059345", "0.5040578", "0.50159454", "0.49063885", "0.48804155", "0.48284978", "0.4827175", "0.4806287", "0.4769773", "0.47503495", "0.47349274", "0.47349274", "0.47349274", "0.47349274", "0.4719681", "0.47054222", "0.4700979", "0.46805796", "0.4662344", "0.46299243", "0.46288693", "0.46256593", "0.46201238", "0.46129155", "0.4596091", "0.45832852", "0.45809367", "0.45775273", "0.4575074", "0.45715857", "0.4570601", "0.4566786", "0.4566786", "0.45580277", "0.45489332", "0.45255023", "0.45244914", "0.45152855", "0.4504686", "0.44975504", "0.44814855", "0.44808146", "0.4475937", "0.44554934", "0.44519135", "0.44506428", "0.44501638", "0.44483238", "0.444482", "0.44444457", "0.44366693", "0.4435217", "0.44351667", "0.4433157", "0.44299895", "0.4429302", "0.44280216", "0.4412777", "0.44075474", "0.44057268", "0.4397153", "0.43919346", "0.43861264", "0.43721023", "0.43623656", "0.43594286", "0.4355373", "0.43512654", "0.4349747", "0.4348146", "0.43439308", "0.43419454", "0.4336927", "0.43364707", "0.43285853", "0.43178934", "0.43171614", "0.43157068", "0.43083987", "0.43049833", "0.43049833", "0.43049163", "0.43024987", "0.42980102", "0.42907983", "0.4285139", "0.4281467", "0.4276776", "0.42754474", "0.42710343", "0.42709386", "0.42624235", "0.4260456", "0.42596012", "0.42593753" ]
0.67315805
0
Transliterate and clean username by removing any unsupported character
def clean_username(value): if NO_ASCII_REGEX.search(value): value = unidecode(value) value = NO_ASCII_REGEX.sub('', value) value = NO_SPECIAL_REGEX.sub('', value) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_username(username):\n\n regex = compile(UnicodeUsernameValidator.regex)\n normalized_username = \"\"\n for char in username:\n if not regex.match(char):\n continue\n normalized_username += char\n return normalized_username", "def clean_username(self, username):\n return username.lower()", "def prepare_username(username):\n username = username.upper()\n\n if not username.startswith('\\-'):\n return username\n\n return username.replace('\\-', '-', 1)", "def normalize_username(value):\n return value.lower()", "def raw_username(username):\n sitewide_domain = settings.HQ_ACCOUNT_ROOT\n username = str(username or '')\n username = username.lower()\n try:\n u, d = username.split(\"@\")\n except Exception:\n return username\n if d.endswith('.' + sitewide_domain):\n return u\n else:\n return username", "def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name", "def normalize_name(word):\n return word.strip(\"0123456789!@#$%^&*_() +=\\/?<>,.`~;:\").lower().replace(\" \",\"_\")", "def _username_from_name(self, name):\r\n return name.replace(' ', '_')", "def clean_username(self):\n if self.edit_user is None:\n # checks for alnum and that this user doesn't already exist\n return super(RegisterUserForm, self).clean_username()\n # just checks for alnum\n if not self.cleaned_data['username'].isalnum():\n raise forms.ValidationError(_(u'Please enter a username containing only letters and numbers.'))\n return self.cleaned_data['username']", "def _sanitizeName(name):\n\n name = name.lower() # lower.\n name = name.replace('.','') # remove periods.\n name = name.replace('-','') # remove dashes.\n name = name.replace(\"'\",'') # remove apostrophies.\n # return it.\n return name", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def make_name2(u):\n\treturn re.sub(r'\\s+', '', u).lower()", "def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()", "def clean_username(self):\n data = self.cleaned_data['username']\n if '@' in data or '|' in data or ' ' in data or '+' in data:\n raise forms.ValidationError(_(u'Usernames should not have special characters.'))\n try:\n user = User.objects.get(username__exact=self.cleaned_data['username'])\n except User.DoesNotExist:\n return self.cleaned_data['username']\n raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))", "def _scrub(self, string):\n if not string.isalnum():\n raise ValueError(\"Table name cannot include non-alphanumerics.\")\n return string", "def sanitize(value):\n from re import sub\n from unicodedata import normalize\n value = normalize('NFKD', value).encode('ascii', 'ignore')\n value = sub('[^\\w\\s\\.-]', '', value.decode('utf-8')).strip().lower()\n return sub('[-_\\s]+', '_', value)", "def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()", "def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text", "def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())", "def processword(word):\n word = word.lower()\n word = word.strip('()?,!`.-:\\\"\\n \\'')\n return word", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def to_clean_str(s: str) -> str:\n return re.sub(\"[^a-zA-Z0-9]\", \"\", s).lower()", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def clean_string(value):\n\treturn re.sub(r'[^a-zA-Z0-9_.]', '', str(value))", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def invalid_username(username):\n word_letters = re.sub('[^a-zA-Z-0-9]+', '', str(username))\n if any(item.isalpha() for item in word_letters):\n return False\n return True", "def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval", "def clean(s):\n punctuations = \"-,.?!;:\\n\\t()[]\\\"-\"\n return s.translate(None, string.punctuation).lower()", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def clean_text(txt):\n\n cleaned_txt = ''\n for character in txt:\n if character not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVQXWY ': #punctuation\n character = ''\n cleaned_txt += character\n elif character == character.upper(): #uppercase\n character = character.lower()\n cleaned_txt += character\n else:\n cleaned_txt += character\n return cleaned_txt", "def normalizeName(fullName):\n\n fullName = fullName.lower()\n\n fullName = fullName.split('\\n')[0].strip()\n fullName = re.sub(r'\\([^()]*\\)', '', fullName) # Remove parenthesis\n fullName = re.sub(r\"\\'[^'']*\\'\", '', fullName) # Remove commas\n fullName = unidecode.unidecode(fullName) # Remove diacritics\n fullName = fullName.replace(' ', '_')\n # Remove all non-alphanumerics characters (except _)\n fullName = re.sub(r'\\W+', '', fullName)\n fullName = re.sub(r\"[_]+\", '_', fullName)\n return fullName", "def clean_username (self):\n return self.instance.username", "def validate_username(username):\n if re.match(r\"^[a-zA-Z0-9åäöÅÄÖ]{3,20}$\", username):\n return True\n return False", "def clean_message(message: str) -> str:\n message = message.upper()\n new_message = ''\n\n for char in message:\n\n if char.isalpha():\n new_message += char\n message = new_message\n\n else:\n message = new_message\n\n return message", "def _clean_name(tweet):\n return tweet.split(u': ', 1)[1]", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def clean_str(cleaned_tweet):\n # string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", cleaned_tweet)\n string = re.sub(r'(.)\\1+', r'\\1\\1', cleaned_tweet)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"“”¨«»®´·º½¾¿¡§£₤‘’\", \"\", string)\n return string.strip().lower()", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def fix_characters(title):\n return re.sub('[^0-9a-zA-Z]+', ' ', title)", "def _format(string):\n return str(filter(str.isalnum, string)).lower()", "def _convertUselessWords(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n #Remove www.* or https?://* \n s = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',s) \n #Remove @username\n s = re.sub('@[^\\s]+','',s) \n #Replace #word with word \n s = re.sub(r'#([^\\s]+)', r'\\1', s) \n \n return s", "def sanitize(string):\n retval = string.lower()\n retval = re.sub(r\"[^\\w\\s]\", '', retval)\n retval = re.sub(r\"\\s+\", '_', retval)\n return retval", "def resolve_username(obj, _):\n return obj.username.decode()", "def clean_user_names(record):\n if 'first_name' in record and 'last_name' in record:\n #Remove all special characters from first_name/last name\n lower_first_name = record['first_name'].replace('-', '')\\\n .replace('_', '').replace('[', '')\\\n .replace(']', '').replace(' ', '')\\\n .lower()\n lower_last_name = record['last_name'].replace('-', '')\\\n .replace('_', '').replace('[', '')\\\n .replace(']', '').replace(' ', '')\\\n .lower()\n return lower_first_name, lower_last_name\n else:\n return None, None", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def clean_text(text):\n new_text = \"\"\n text = text.lower()\n for character in text:\n if character.isalpha():\n new_text = new_text + character\n return new_text", "def normalize_name(self, value):\n import unicodedata\n import re\n\n self.log('Converting string %s' % value)\n \n # Double try in name conversion\n try:\n value = unicodedata.normalize('NFKD', u'%s' % value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n except:\n self.log('Conversion error: \\n%s' % traceback.format_exc())\n\n value = unicode(value, 'ascii', errors='ignore')\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n value = re.sub('[-\\s]+', '-', value)\n\n\n self.log('Conversion finished to %s' % value)\n\n return value", "def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text", "def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def normalize(sent):\n sent = re.sub(r'(?:@[\\w_]+)', \"user\", sent)\n sent = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', \"url\", sent)\n #sent = re.sub(r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", \"hashtag\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n return sent", "def safe_name(self, name):\n\n output = \"\"\n for char in name:\n if char not in '\\\\/<>:\"|?*':\n output += char\n\n return output", "def str_clean(string):\n\timport re\n\treturn re.sub(ur'[\\W_]+', u'', string, flags=re.UNICODE).lower()", "def clean_username(self):\n username = self.cleaned_data.get(\"username\")\n if username.lower() != slugify(username).lower():\n raise forms.ValidationError(\n ugettext(\"Username can only contain letters, numbers, dashes \"\n \"or underscores.\"))\n lookup = {\"username__iexact\": username}\n try:\n User.objects.exclude(id=self.instance.id).get(**lookup)\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(\n ugettext(\"This username is already registered\"))", "def sanitize_string(unclean_string: str) -> str:\n return unidecode(unclean_string)", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def normalize_text(text):\n punctuation = '!\"#$%&\\'()*+,./:;<=>?@[\\\\]^_`{|}~'\n return text.lower().strip().translate(None, punctuation)", "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n return string", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def python_safe_name(s):\n no_punctuation = re.compile(r'[\\W_]', re.MULTILINE).sub\n s = s.lower()\n s = no_punctuation(' ', s)\n s = '_'.join(s.split())\n if py2 and isinstance(s, unicode):\n s = s.encode('ascii', 'ignore')\n return s", "def prepare_for_hashing(text):\n if not text:\n return ''\n return text.translate(CHARS_TO_DELETE).lower()", "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "def clean_str(self,string):\r\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n string = re.sub(r\"\\'s\", \" \\'s\", string)\r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n string = re.sub(r\"\\'re\", \" \\'re\", string)\r\n string = re.sub(r\"\\'d\", \" \\'d\", string)\r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n string = re.sub(r\",\", \" , \", string)\r\n string = re.sub(r\"!\", \" ! \", string)\r\n string = re.sub(r\"\\(\", \" \\( \", string)\r\n string = re.sub(r\"\\)\", \" \\) \", string)\r\n string = re.sub(r\"\\?\", \" \\? \", string)\r\n string = re.sub(r\"\\s{2,}\", \" \", string)\r\n return string.strip().lower()", "def clean_message(message):\n \n new_message = ''\n for char in message:\n if char.isalpha():\n new_message = new_message + char.upper()\n return new_message", "def _removeDiacritics(self, text):\n norm_txt = unicodedata.normalize('NFD', text)\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n # remove accents and other diacritics, replace spaces with \"_\" because identifiers can't have spaces\n no_spaces = unicodedata.normalize(\n 'NFC', shaved).lower().replace(\" \", \"_\")\n final_text = no_spaces\n # only allow [a-z], [0-9] and _\n p = re.compile('[a-z0-9_]+')\n for i in range(0, len(no_spaces)):\n if not (p.match(no_spaces[i])):\n final_text = final_text[:i] + '_' + final_text[i+1:]\n # i the first char is not a-z then replaceit (all identifiers must start with a letter)\n p2 = re.compile('[a-z]+')\n if not p2.match(final_text[0]):\n final_text = 'a' + final_text[1:]\n return final_text", "def sanitize_name(name: str) -> str:\n return re.sub(r\"[^A-Za-z0-9_-]\", \"-\", name)[0:128]", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def normalize_string(string, logger_):\n regex = r\"[a-zA-Z]\"\n invalid_regex = r\"[\\W_\\d+]\"\n search_pattern = re.search(invalid_regex, string)\n if search_pattern:\n logger.log(\n level=\"warning\",\n message='\"{}\" String has invalid ' \"characters\".format(string),\n logger=logger_,\n )\n matches = re.finditer(regex, string, re.MULTILINE)\n return \"\".join(match.group() for match in matches)", "def clean_word(word: str) -> str:\n\n cleaned_word = ''\n for char in word.lower():\n if char.isalnum():\n cleaned_word = cleaned_word + char\n return cleaned_word", "def clean_str(s):\n s = re.sub(r\"[^\\\\p{L}\\\\s]\", \" \", s) # This removes accents, which we want.\n s = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", s) #This removes accents, which we want.\n s = re.sub(r\"\\'s\", \"\", s)\n s = re.sub(r\"\\'ve\", \"have\", s)\n s = re.sub(r\"n\\'t\", \" not\", s)\n s = re.sub(r\"\\'re\", \" are\", s)\n s = re.sub(r\"\\'d\", \" would\", s)\n s = re.sub(r\"\\'ll\", \" will\", s)\n s = re.sub(r\",\", \"\", s) #s = re.sub(r\",\", \" ,\", s)\n s = re.sub(r\"!\", \"\", s)\n # s = re.sub(r\"\\(\", \"\\(\", s)\n # s = re.sub(r\"\\)\", \"\\) \", s)\n s = re.sub(r\"\\?\", \"\", s)\n s = re.sub(r\"\\s{2,}\", \" \", s)\n s = re.sub(r\" \", \" \", s)\n return s.strip().lower()", "def removeAtUser(text):\n text = re.sub('@[^\\s]+','',text)\n return text", "def clean_str(string):\n words = string.split(' ')\n for idx, word in enumerate(words):\n if word == '@USER' or word == 'URL':\n continue\n elif len(word) > 0 and word[0] == '@':\n words[idx] = '@USER'\n continue\n\n word = re.sub(r'^https?:\\/\\/.*', 'URL', word)\n word = re.sub(r\"[^A-Za-z0-9()@,!?\\'\\`]\", \" \", word)\n word = re.sub(r\"\\'s\", \" \\'s\", word)\n word = re.sub(r\"\\'ve\", \" have\", word)\n word = re.sub(r\"n\\'t\", \" not\", word)\n word = re.sub(r\"\\'re\", \" are\", word)\n word = re.sub(r\"\\'d\", \" \\'d\", word)\n word = re.sub(r\"\\'ll\", \" will\", word)\n word = re.sub(r\",\", \" , \", word)\n word = re.sub(r\"!\", \" ! \", word)\n word = re.sub(r\"\\(\", \" \\( \", word)\n word = re.sub(r\"\\)\", \" \\) \", word)\n word = re.sub(r\"\\?\", \" \\? \", word)\n word = re.sub(r\"\\s{2,}\", \" \", word)\n words[idx] = word.strip().lower()\n return words", "def clean_username(self):\n\t\tusername = self.cleaned_data['username']\n\t\tif User.objects.filter(username=username):\n\t\t\traise forms.ValidationError('username de usuario ya registrado.')\n\t\treturn username", "def sanitize_txt(x):\n return '_'.join(smart_split(x.lower()))", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def cleanName(name):\n return ''.join(char for char in name.lower() if char in (string.digits + string.ascii_lowercase))", "def remove_uni(s):\n s2 = ''\n if s.__contains__(\"u'\"):\n s2 = s.replace(\"u'\", \"'\")\n elif s.__contains__('u\"'):\n s2 = s.replace('u\"', '\"')\n return s2", "def strip_other_charcter():\n pass", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def sanitize(instring):\r\n return instring.encode('ascii','replace')", "def cleanText_letters(text):\n text = re.sub(r\"emoji_(\\w+)\", r\" \", text)\n text = re.sub(r\"hashtag_(\\w+)\", r\" \", text)\n text = re.sub(r\"specialmentioned\", r\" \", text)\n text = re.sub(r\"specialurl\", r\" \", text)\n text = re.sub(\"\\s+\", \" \", text).lower().strip() \n\n if text == \" \" or text == \"\":\n return \"blank_comment\"\n else:\n return text \n \n return text", "def cleanASJP(word):\n word = re.sub(r\",\", \"-\", word)\n word = re.sub(r\"\\%\", \"\", word)\n word = re.sub(r\"\\*\", \"\", word)\n word = re.sub(r\"\\\"\", \"\", word)\n word = re.sub(r\".~\", \"\", word)\n word = re.sub(r\"(.)(.)(.)\\$\", r\"\\2\", word)\n word = re.sub(r\"\\$\", \"\", word)\n word = re.sub(r\"\\s+\", \"\", word)\n return word.replace('~', '')", "def clean_tweet(tweet):\n\n pattern = r'http\\S+|pic.\\S+|@[a-zA-Z0-9_]+|#[a-zA-Z0-9_]+|[‘’“”’–—…]|\\xa0'\n return re.sub(pattern, '', tweet)", "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def normalize(self, name):\n\n\t\t# label emojis, specifically :) and :( as @artist, then apply \n\t\t# base normalization\n\n\t\tname = super().normalize(re.sub(r'\\s*:[\\(\\)]\\s*',' @artist ', name))\n\t\t\n\t\t# if now name is ? it may be an artist, so label as @artist\n\t\tif name.strip() in {'?','...'}:\n\t\t\treturn '@artist'\n\t\t\n\t\t# fix ! - remove if at the end of a word, otherwise replace with i\n\t\tname = re.sub(r'\\!+$','', re.sub(r'\\!+(?=[^\\b\\w])','', name)).replace('!','i')\n\t\t\n\t\t# remove the and a\n\t\tname = re.sub(r'^(the|a)\\s+','', name)\n\t\t \n\t\t# remove multiple white spaces\n\t\tname = re.sub(r'\\s{2,}', ' ', name).strip()\n\t\t\n\t\treturn name", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def _only_letters(s):\r\n\treturn _regex_non_letters.sub('', s)", "def normalize_name(name):\n return PUNCT_RE.sub('-', name.lower()).strip('-')", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def disallow_username_substring(self) -> bool:\n return pulumi.get(self, \"disallow_username_substring\")", "def clean_word(word: str) -> str:\n return re.sub(\n r\"^[,.'\\\"()!]+\", \"\", re.sub(r\"[,.'\\\"()!]+$\", \"\", word.lower())\n )", "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"@\", \"\", string)\n return string.lower()", "def clean_field_title(title):\r\n return ''.join((c if c in ALLOWED_CHARS else '_') for c in title.lower())", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()" ]
[ "0.8276948", "0.7640557", "0.748814", "0.7462814", "0.6971219", "0.6806881", "0.6668218", "0.6659604", "0.6582828", "0.6548192", "0.6532645", "0.65318185", "0.65207565", "0.6505552", "0.6473751", "0.64688873", "0.6456", "0.64483285", "0.64419836", "0.64402777", "0.6427039", "0.6384738", "0.6369296", "0.6352192", "0.63462204", "0.63444954", "0.63430846", "0.6333575", "0.6319038", "0.62954724", "0.6293179", "0.6288356", "0.62853587", "0.62669545", "0.6265987", "0.62526953", "0.6250571", "0.62378997", "0.6225453", "0.6225453", "0.6219932", "0.62152606", "0.62127155", "0.6195517", "0.61904556", "0.61895365", "0.6185468", "0.6182481", "0.6168086", "0.6160188", "0.6156948", "0.61548376", "0.6145642", "0.61455554", "0.613871", "0.61383957", "0.6135482", "0.61156446", "0.61115515", "0.61111623", "0.61040264", "0.6102261", "0.60942376", "0.6092226", "0.60780394", "0.6069392", "0.6068221", "0.60590714", "0.6057158", "0.6055158", "0.6054807", "0.6050649", "0.60480356", "0.60380554", "0.60287535", "0.60260487", "0.60250413", "0.60242", "0.6024103", "0.60156465", "0.60049784", "0.6004909", "0.600486", "0.59999925", "0.59995574", "0.59948254", "0.5993997", "0.5993196", "0.5988349", "0.5987487", "0.5986082", "0.59774595", "0.5975277", "0.5973809", "0.5973809", "0.596557", "0.596456", "0.59584415", "0.5957586", "0.5952886" ]
0.8559825
0
Replacement of ore.alchemist.container.stringKey The difference is that here the primary_key is not determined by sqlalchemy.orm.mapper.primary_key_from_instance(obj) but by doing the logically equivalent (but a little more laborious) [ getattr(instance, c.name) for c in mapper.primary_key ]. This is because, in some hardtodebug cases, the previous was returning None to all pk values e.g. for objects on which checkPermission() has not been called. Using this version, the primary_key is correctly determined irrespective of whether checkPermission() had previously been called on the object.
def stringKey(obj): unproxied = proxy.removeSecurityProxy(obj) mapper = orm.object_mapper(unproxied) #primary_key = mapper.primary_key_from_instance(unproxied) identity_values = [ getattr(unproxied, c.name) for c in mapper.primary_key ] identity_key = "-".join(map(str, identity_values)) return "obj-%s" % (identity_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def key(self):\n def validate(name):\n '''Compute the key if necessary and validate'''\n found = getattr(self, name)\n value = found() if callable(found) else found\n if value is None:\n raise BadKeyError(\"The key for %s cannot be None\" % self)\n return str(value) \n if self.__key is None:\n namespace, kind, key = Schema.Get(self)\n self.__id = key\n value = validate(key)\n self.__key = Key(namespace, kind, value)\n else:\n self.__key.id = validate(self.__id)\n return self.__key", "def _get_obj_pk(self, obj):\n if self.use_natural_keys and hasattr(obj, 'natural_key'):\n raw_nat_key = obj.natural_key()\n obj_pk = smart_text(NATURAL_KEY_JOINER.join(raw_nat_key))\n keytype = 'natural'\n else:\n obj_pk = obj._get_pk_val()\n keytype = 'pk'\n\n return obj_pk, keytype", "def primary_key(cls):\n has_multiple_pk = len(class_keys(cls)) > 1\n\n if has_multiple_pk:\n # guess the pk\n pk = cls.__name__.lower() + '_id'\n else:\n for key in class_keys(cls):\n pk = key\n break\n\n if not pk in cls.__dict__:\n # could not find pk field in class, now check\n # whether it has been explicitly specified\n if 'pk_field' in cls.__dict__:\n pk = cls.__dict__['pk_field']\n else:\n raise KeyNotFoundException(\"Could not figure out primary key field\"\n \"for %s model. Tried to first use %s as\"\n \" field name,and then looked for\"\n \" pk_field attr which was also missing\"\n % (cls.__name__, pk))\n\n return pk", "def get_primary_key(cls) -> str:\n return inspect(cls).primary_key[0].name", "def primary_key(cls):\n\n if cls.__from_class__:\n cls = cls.__from_class__\n return cls.__table__.primary_key.columns.values()[0].name", "def get_key_id(self):", "def parent_model_object_to_key(self, parent_object: 'Any') -> 'Any':\n key = tuple(getattr(parent_object, pk) for pk in self.parent_model_pks)\n return key", "def test_primary_key(self):\r\n\r\n # This should just work.\r\n class AutoFieldKey(models.Model):\r\n key = models.AutoField(primary_key=True)\r\n AutoFieldKey.objects.create()\r\n\r\n # This one can be exactly represented.\r\n class CharKey(models.Model):\r\n id = models.CharField(primary_key=True, max_length=10)\r\n CharKey.objects.create(id='a')\r\n\r\n # Some rely on unstable assumptions or have other quirks and\r\n # should warn.\r\n\r\n# # TODO: Warning with a range limitation.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class IntegerKey(models.Model):\r\n# id = models.IntegerField(primary_key=True)\r\n# IntegerKey.objects.create(id=1)\r\n\r\n# # TODO: date/times could be resonably encoded / decoded as\r\n# # strings (in a reversible manner) for key usage, but\r\n# # would need special handling and continue to raise an\r\n# # exception for now\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DateKey(models.Model):\r\n# id = models.DateField(primary_key=True, auto_now=True)\r\n# DateKey.objects.create()\r\n\r\n# # TODO: There is a db.Email field that would be better to\r\n# # store emails, but that may prevent them from being\r\n# # used as keys.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class EmailKey(models.Model):\r\n# id = models.EmailField(primary_key=True)\r\n# EmailKey.objects.create(id='aaa@example.com')\r\n\r\n# # TODO: Warn that changing field parameters breaks sorting.\r\n# # This applies to any DecimalField, so should belong to\r\n# # the docs.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DecimalKey(models.Model):\r\n# id = models.DecimalField(primary_key=True, decimal_places=2,\r\n# max_digits=5)\r\n# DecimalKey.objects.create(id=1)\r\n\r\n # Some cannot be reasonably represented (e.g. binary or string\r\n # encoding would prevent comparisons to work as expected).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class FloatKey(models.Model):\r\n id = models.FloatField(primary_key=True)\r\n FloatKey.objects.create(id=1.0)\r\n\r\n # TODO: Better fail during validation or creation than\r\n # sometimes when filtering (False = 0 is a wrong key value).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class BooleanKey(models.Model):\r\n id = models.BooleanField(primary_key=True)\r\n BooleanKey.objects.create(id=True)\r\n len(BooleanKey.objects.filter(id=False))", "def primary_key(self) -> str:\n return self.model._meta.pk.name # type: ignore", "def object_pk(self):\n\n if self._wrapped not in (None, empty):\n return str(self._wrapped.pk)\n\n if '_object_pk' in self.__dict__:\n return self.__dict__['_object_pk']\n\n identifier = self._get_identifier()\n if identifier:\n # noinspection PyBroadException\n try:\n object_pk = identifier.split('.', 2)[-1]\n if object_pk == 'None':\n object_pk = None\n self.__dict__['_object_pk'] = object_pk\n return object_pk\n except Exception:\n pass\n\n raise AttributeError()", "def primary(self):\n primary_k = self.__class__.get_primary()\n return getattr(self, primary_k)", "def pk(self):\n return getattr(self, self.schema.pk.name, None)", "def keyify(content_type_pk, pk):\n return '%s:%s' % (content_type_pk, pk)", "def get_datastore_key(model, pk):\n\n kind = get_top_concrete_parent(model)._meta.db_table\n return Key.from_path(kind, pk)", "def _primary_key_names(obj):\n return [key.name for key in _get_mapper(obj).primary_key]", "def keyify_obj(o):\n return keyify(o.content_type.pk, o.pk)", "def _get_key(key_or_id, key_cls):\n return (\n key_cls.from_string(key_or_id)\n if isinstance(key_or_id, str)\n else key_or_id\n )", "def get_primary_id(self):", "def persistent_cache_key_adapter(obj):\n # pylint: disable=protected-access\n if obj._p_oid:\n return hex(int.from_bytes(obj._p_oid, byteorder='big'))[2:]\n return hex(id(obj))[2:]", "def key(self):\n return self._key if self._key else self.factory().key", "def get_key(self, obj):\n if hasattr(obj, \"id\"):\n hashed_id = hashlib.md5(str(obj.id).encode(\"utf-8\")).hexdigest()\n return hashed_id\n else:\n return None", "def _cache_key(cls, pk, db):\r\n key_parts = ('o', cls._meta, pk, db)\r\n return ':'.join(map(encoding.smart_unicode, key_parts))", "def pk(self, ctx):\n\n #if (self._pk == False):\n if True:\n pk_cols = []\n for col in self.columns:\n if col.pk:\n pk_cols.append(col)\n\n if (len(pk_cols) > 1):\n raise Exception(\"Table %s has multiple primary keys: %s\" % (self.name, pk_cols))\n elif (len(pk_cols) == 1):\n self._pk = pk_cols[0]\n else:\n self._pk = None\n\n return self._pk", "def instance_key(model, instance_or_pk):\r\n\r\n return '%s.%s:%d' % (\r\n model._meta.app_label,\r\n model._meta.module_name,\r\n getattr(instance_or_pk, 'pk', instance_or_pk),\r\n )", "def _get_raw_key(self, key_id):", "def get_key(self):\n return self._determine_key()", "def get_object_id(self, key):\n try:\n return self.key_object[key]\n except KeyError:\n return None", "def get_pk(self):\n return getattr(self, self.get_pk_name(), None)", "def keyify(self):\n return keyify_obj(self)", "def test_primary_key_coercing(self):\r\n CharKey.objects.create(id=1)\r\n CharKey.objects.create(id='a')\r\n CharKey.objects.create(id=1.1)\r\n CharKey.objects.get(id='1')\r\n CharKey.objects.get(id='a')\r\n CharKey.objects.get(id='1.1')\r\n\r\n IntegerKey.objects.create(id=1)\r\n with self.assertRaises(ValueError):\r\n IntegerKey.objects.create(id='a')\r\n IntegerKey.objects.create(id=1.1)\r\n IntegerKey.objects.get(id='1')\r\n with self.assertRaises(ValueError):\r\n IntegerKey.objects.get(id='a')\r\n IntegerKey.objects.get(id=1.1)", "def natural_key(self):\n\t\tself_keys = {\n\t\t\t'name': self.name,\n\t\t\t'allow_negative': self.allow_negative,\n\t\t\t'currency': self.currency.natural_key(),\n\t\t\t'primary': self.primary,\n\t\t\t'type': self.type.natural_key()\n\t\t}\n\t\tnatural_keys = super(PaymentObject, self).natural_key(self_keys)\n\t\treturn natural_keys", "def pk_filter(cls, value=None):\n return {cls.PK_NAME: value}", "def get_key(self, object_id):\n try:\n info = self.storage.load(\"\", object_id)\n return info[\"object_key\"]\n except KeyError:\n return None", "def key(self):\n return self.key_for(self.id)", "def _load_key(client, entity_type, entity_id=None, parent_key=None):\n\n key = None\n if entity_id:\n key = client.key(entity_type, entity_id, parent=parent_key)\n else:\n # this will generate an ID\n key = client.key(entity_type)\n return key", "def primary_key(self) -> List[int]:\n return self.doc.get('primaryKey')", "def isPrimaryKey(self):\n return self._primaryKey", "def get_primary_key(self, table_name):\n return self.get_table_meta(table_name).get('primary_key')", "def key_object_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"key_object_name\")", "def pk(entity) -> str:\n for field in entity.modelfields.values():\n if field.primary: return field.name", "def _get_cache_key(cls, args, kwargs):\n result = None\n # Quick hack for my composites work for now.\n if hasattr(cls._meta, \"pks\"):\n pk = cls._meta.pks[0]\n else:\n pk = cls._meta.pk\n # get the index of the pk in the class fields. this should be calculated *once*, but isn't\n # atm\n pk_position = cls._meta.fields.index(pk)\n if len(args) > pk_position:\n # if it's in the args, we can get it easily by index\n result = args[pk_position]\n elif pk.attname in kwargs:\n # retrieve the pk value. Note that we use attname instead of name, to handle the case\n # where the pk is a a ForeignKey.\n result = kwargs[pk.attname]\n elif pk.name != pk.attname and pk.name in kwargs:\n # ok we couldn't find the value, but maybe it's a FK and we can find the corresponding\n # object instead\n result = kwargs[pk.name]\n\n if result is not None and isinstance(result, Model):\n # if the pk value happens to be a model instance (which can happen wich a FK), we'd\n # rather use its own pk as the key\n result = result._get_pk_val()\n return result", "def _pre_key_for(self, *objects):\n obj_type = objtype(objects[0])\n return \"{}/{}/%s/{}\".format(self.cache_prefix, obj_type, str(self.pk))", "def key_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"key_id\")", "def underlying_key(self):\n pass", "def primary_key(self) -> List[int]:\n return self._primary_key", "def key_id(self):\n return self._key_id", "def test_create_primary_key(self):\n assert self.tbl.primary_key == 'id'", "def to_key(self) -> TKey:\n\n key_type = ClassInfo.get_key_from_record(type(self))\n key = key_type()\n key.populate_from(self)\n return key", "def object_key(self) -> str:\n return self._values.get('object_key')", "def key(nullable=True):\n return sa.Column(\n \"key\",\n sa.Text().with_variant(mysql.VARCHAR(255), \"mysql\"),\n nullable=nullable,\n )", "def get_key(self) -> int:\n return self.__key", "def test_pk_attr_shortcut_method(self):\n m = self.Model({})\n self.assertEqual(m.pk_attr(), 'id')\n self.Model.set_primary_key('foo')\n self.assertEqual(m.pk_attr(), 'foo')", "def _GetKeyString(self):\n return self.__key_string", "def _get_parent_key(self):\n parent_kind = metadata_api.GetParentKind(self)\n if parent_kind:\n parent_key_field = metadata_api.GetParentKeyField(self)\n parent_key_name = getattr(self, parent_key_field, None)\n if parent_key_name:\n return ndb.Key(parent_kind, parent_key_name)\n return None", "def __GetKeyString(self):\n return self._GetKeyString()", "def _key(self):\n return None", "def resolve_key(obj, _):\n return obj.key.decode()", "def test_primary_key_class_methods(self):\n self.assertTrue(hasattr(self.Model, 'primary_key'))\n self.assertTrue(callable(self.Model.primary_key))\n self.assertTrue(hasattr(self.Model, 'set_primary_key'))\n self.assertTrue(callable(self.Model.set_primary_key))", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def key(self, key_id):\r\n return keys.RepoKey(self, key_id)", "def get_key(self) -> int:\n return self.key", "def string_cache_key_adapter(obj):\n return obj", "def _cache_key(self, pk=\"all\", **kwargs):\n q_filter = \"\".join(\"%s=%s\" % (k, v) for k, v in kwargs.items()) or self.pk\n return \"%s.%s[%s]\" % (self.model.__tablename__, q_filter, pk)", "def _get_identifier(self):\n\n if '_identifier' not in self.__dict__:\n\n object_or_string, args, kwargs = self._init_args\n\n # Get the identifier for the wrapped object, e.g. 'auth.user.1234'\n # If there is a lookup in the kwargs, then the following call\n # will figure out the object_pk. It caches these lookups.\n kwargs['_fail_silently'] = self._fail_silently\n self.__dict__['_identifier'] = get_identifier(object_or_string, *args, **kwargs)\n\n return self.__dict__['_identifier']", "def __getKey(self, keyNameRaw, extant=True):\n\t\tfullKeyName = self.__DELIMITER.join(\n\t\t\tself.__currentGroupNames + [str(keyNameRaw)])\n\t\tif extant and (fullKeyName not in self.__settings):\n\t\t\treturn None\n\t\treturn fullKeyName", "def safe_key(cls, db_key, transform_fn):\n cls._split_key(db_key.name())\n name = db_key.name().strip('()')\n unsafe_submission_key_name, unsafe_reviewer_id_or_name = name.split(\n ':', 1)[1].rsplit(':', 1)\n unsafe_reviewer_key = db.Key.from_path(\n models.Student.kind(), unsafe_reviewer_id_or_name)\n safe_reviewer_key = models.Student.safe_key(\n unsafe_reviewer_key, transform_fn)\n\n # Treating as module-protected. pylint: disable-msg=protected-access\n _, unit_id, unsafe_reviewee_key_name = (\n student_work.Submission._split_key(unsafe_submission_key_name))\n unsafe_reviewee_key = db.Key.from_path(\n models.Student.kind(), unsafe_reviewee_key_name)\n unsafe_submission_key = student_work.Submission.get_key(\n unit_id, unsafe_reviewee_key)\n safe_submission_key = student_work.Submission.safe_key(\n unsafe_submission_key, transform_fn)\n\n return db.Key.from_path(\n cls.kind(), cls.key_name(safe_submission_key, safe_reviewer_key))", "def key(self):\n return str(self._id)", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def test_set_primary_key_class_method(self):\n self.Model.set_primary_key('foo')\n self.assertEqual(self.Model.primary_key(), 'foo')", "def primary_keys(class_):\n for column in class_.__table__.c:\n if column.primary_key:\n yield column", "def primary_key(table_name: str) -> str:\n\n return f\"\"\"\n SELECT\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type\n FROM\n pg_index i\n JOIN\n pg_attribute a\n ON\n a.attrelid = i.indrelid AND\n a.attnum = ANY(i.indkey)\n WHERE\n i.indrelid = '{table_name}'::regclass AND\n i.indisprimary\n \"\"\"", "def prepare_key (self, key, for_seq):\n r_key = \"%s:%d:%s\" % (self.classkey, for_seq, key)\n return r_key" ]
[ "0.69380665", "0.67975974", "0.6636642", "0.6627335", "0.64218795", "0.6354907", "0.6309344", "0.62846273", "0.622991", "0.62281317", "0.61777174", "0.61190313", "0.6101327", "0.6091566", "0.6083226", "0.6061366", "0.6051213", "0.6004258", "0.5950747", "0.5949102", "0.59490883", "0.59411365", "0.5940988", "0.59337777", "0.5926337", "0.59218293", "0.5921642", "0.59214526", "0.5879429", "0.5867949", "0.58555555", "0.58397985", "0.583902", "0.5836159", "0.5824136", "0.5815574", "0.5803467", "0.5773028", "0.576903", "0.57420164", "0.57372475", "0.57264495", "0.5712196", "0.5711593", "0.5707571", "0.5706659", "0.5700665", "0.5699684", "0.56982815", "0.56887597", "0.5668643", "0.56606334", "0.5660062", "0.5653539", "0.56530786", "0.5634418", "0.5615004", "0.5613896", "0.5613059", "0.5613059", "0.55908704", "0.55904496", "0.5585126", "0.5576697", "0.556116", "0.5556793", "0.5556358", "0.5547691", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.5545873", "0.55376625", "0.5519822", "0.5516176", "0.5512856" ]
0.7004091
0
Generator of all [zope.schema] fields that will be displayed in a container listing. Redefines alchemist.ui.container.getFields, making use of the property of the ModelDescriptor class.
def getFields(context, interface=None, annotation=None): if interface is None: domain_model = proxy.removeSecurityProxy(context.domain_model) interface = utils.get_derived_table_schema(domain_model) if annotation is None: annotation = utils.get_descriptor(interface) for field_name in annotation.listing_columns: yield interface[field_name] # !+FIELD_KEYERROR(mr, jul-2012) throws a KeyError when field_name is # not part of the interface e.g. if we use a "field property" that is # implemented as a domain_model.{property}.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fields(self, doclet):\n FIELD_TYPES = OrderedDict([('params', _params_formatter),\n ('properties', _params_formatter),\n ('exceptions', _exceptions_formatter),\n ('returns', _returns_formatter)])\n for field_name, callback in iteritems(FIELD_TYPES):\n for field in doclet.get(field_name, []):\n description = field.get('description', '')\n unwrapped = sub(r'[ \\t]*[\\r\\n]+[ \\t]*', ' ', description)\n yield callback(field, unwrapped)", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def get_fields(self) -> Iterable[fields.Field]:\n for attr_name in dir(self):\n attr = getattr(self, attr_name)\n if isinstance(attr, fields.Field):\n yield attr", "def get_fields(self):\r\n return self.fields", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def _get_fields(self):\n return self._fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_fields(self):\n\n\t\treturn self.__fields", "def fieldsIterator(self):\n for name, field in self.fields.items():\n renderer = self.renderers.get(name)\n if renderer:\n value = renderer(self.instance)\n else:\n value = getattr(self.instance, name)\n yield field.verbose_name, value", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def get_all_fields(context):\n\n schema = zope.component.getUtility(\n IDexterityFTI, name=context.portal_type).lookupSchema()\n fields = dict((fieldname, schema[fieldname]) for fieldname in schema)\n\n assignable = IBehaviorAssignable(context)\n for behavior in assignable.enumerateBehaviors():\n behavior_schema = behavior.interface\n fields.update((name, behavior_schema[name])\n for name in behavior_schema)\n\n return fields", "def fields(self):\n if self._fields is None:\n self._init_fields()\n return self._fields", "def listFields(self):\n return self.get_json('/field')", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def Fields(self):\n return self._fields", "def define_fields(cls, dbmanager):\n return []", "def fields(cls):\n if not hasattr(cls, '_fields'):\n cls.parse_attributes()\n return cls._fields", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def get_fields(self):\n for field in self.fields_box.children:\n if isinstance(field, MyTextField):\n yield field", "def fields(self):", "def get_fields(self):\n return list(self.metadata.keys())", "def fields(self):\r\n pass", "def getFields(iface):\n return getFieldsInOrder(iface)", "def fields(self):\n ...", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def fields(self):\n _fields = {\n i: attrgetter(i) for i in ('pf_type', 'label',)\n }\n _fields['host'] = self.get_safely_instance_partial(Host, 'host')\n return _fields", "def get_fieldlist(cls):\n return cls.fieldlist", "def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list", "def get_fields(self):\n \n return self.metadata.keys()", "def get_fields(cls):\n return cls.fields.values()", "def fields(self):\r\n return self._by_name.iteritems()", "def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]", "def _get_all_field_lines(self):\n for field in self._all_fields.values():\n if field.hide:\n continue\n if field.showname:\n field_repr = field.showname\n elif field.show:\n field_repr = field.show\n else:\n continue\n yield '\\t' + field_repr + os.linesep", "def fields(self) -> List[SingleField]:\n return self._fields", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def _get_fields(self):\n tables = [self.sell_table, self.buy_table, self.pending_table]\n for table in tables:\n sql = f'SHOW COLUMNS in {table}'\n self.fields[table] = list(tb.Database().read(sql).Field)", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def objectFields(self):\n raise NotImplementedError", "def get_fields(self):\n for child in self.children:\n if isinstance(child, MyTextField):\n yield child", "def items(self):\n for name in self.fields:\n yield name, getattr(self, name)", "def _get_fields(self):\n table = self.ui.tableFields\n rows = table.rowCount()\n cols = table.columnCount()\n fields = []\n for i in range(rows):\n fields.append(\n tuple(map(lambda x: table.item(i, x).text(), range(cols)))\n )\n return fields", "def inspect_model_fields(self, model: ModelRepresentation) -> None:\n c = model.count()\n title(f\"{model.name} ({c})\")\n print(model.fields_info())", "def model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n model_fields = {}\n for field in self.model._meta.fields:\n name, column = field.get_attname_column()\n model_fields[converter(column)] = field\n return model_fields", "def fields(proto):\n return [x[0].name for x in proto.ListFields()]", "async def get_fields(self) -> List[Field]:\n schema = await self.get_schema()\n fields = []\n if schema:\n # The faust-avro parser expects a json-parsed avro schema\n # https://github.com/masterysystems/faust-avro/blob/master/faust_avro/parsers/avro.py#L20\n parsed_schema = self._parse(json.loads(schema))\n for field in parsed_schema.fields:\n fields.append(Field(field.name, field.type.python_type))\n\n return fields", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields_container(self):\n return self._fields_container", "def fields(cls):\n return cls._nameToValue", "def show_database_structure(self):\n self.analyze()\n items = []\n for model in get_models():\n names = []\n # for f, m in model._meta.get_fields_with_model():\n for f in model._meta.concrete_fields:\n names.append(f.name)\n items.append(\n \"{0} : {1}\".format(fmn(model), ', '.join(names)))\n\n items = sorted(items)\n return rstgen.ul(items)", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def extract(self):\n self.field_list = []\n \n try:\n self.mfields = self.getModel()._meta.fields\n if(self.mfields):\n try:\n for model_fields in self.mfields:\n if(model_fields.name == \"id\"):\n pass \n \n elif(model_fields.name == \"pci\"):\n pass \n elif(model_fields.name == \"sci\"):\n pass \n elif(model_fields.name == \"validated\"):\n pass \n else:\n self.field_list.append(model_fields.name)\n return self.field_list\n except:\n raise \n else:\n return None \n except:\n raise", "def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))", "def listMetaDataFields(self, exclude=True):\n #tool = getToolByName(self, ATCT_TOOLNAME)\n #original_list = tool.getMetadataDisplay(exclude)\n\n return DisplayList((\n ('getAnalysisCategory', _p('Analysis Category')),\n ('getAnalysisService', _p('Analysis Service')),\n ('getAnalysts', _('Analyst')),\n ('getClientOrderNumber', _('Client Order')),\n ('getClientReference', _('Client Reference')),\n ('getClientSampleID', _('Client Sample ID')),\n ('getClientTitle', _('Client')),\n ('getContactTitle', _('Contact')),\n ('Creator', _p('Creator')),\n ('created', _('Date Created')),\n ('getDatePublished', _('Date Published')),\n ('getDateReceived', _('Date Received')),\n ('getDateSampled', _('Date Sampled')),\n ('getProfilesTitle', _('Analysis Profiles')),\n ('getRequestID', _('Request ID')),\n ('getSampleID', _('Sample ID')),\n ('getSamplePointTitle', _('Sample Point')),\n ('getSampleTypeTitle', _('Sample Type')),\n ('review_state', _p('Review state')),\n ))", "def describe(self):\r\n return dict((field.name, dict(\r\n id=field.name,\r\n name=field.label,\r\n validators=ValidatorSerializer(\r\n field.requires if isSequenceType(field.requires) else [field.requires])(),\r\n comment=field.comment,\r\n readable=field.readable,\r\n writable=field.writable,\r\n type=getattr(field, 'wtype',\r\n field.type.type if isinstance(field.type, SQLCustomType) else field.type.split('(')[0]),\r\n # w2pwidget=field.widget,\r\n )) for field in self.descibe_columns)", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "async def get_schema_info(self, collection):\n await self.ensure_collection(collection)\n try:\n # Luke handler is not supported in API v2 yet.\n # /v2/collections/<COLLECTION>/schema/fields doesn't show dynamically\n # created fields.\n # So using old API (/solr/...).\n response = await self.get(\n '/solr/{}/admin/luke?numTerms=0'.format(collection)\n )\n return json.loads(response.body.decode('utf-8'))\n except SolrError:\n logger.warning('Failed to fetch fields list for collection {}'\n .format(collection))\n raise", "def fields(self):\r\n if not hasattr(self, '_fields'):\r\n self._fields = dict((clean_field_title(pc.title), pc.get_content())\r\n for pc in self.pagecontent_set.all())\r\n\r\n return self._fields", "def define_fields(cls, dbmanager):\n\n # ATTN: UNFINISHED\n fieldlist = [\n # standard primary id number field\n mdbfield.DbfPrimaryId('id', {\n 'label': \"The primary key and id# for this group\"\n }),\n # globally unique resource reference\n mdbmixins.dbfmixin_gobselfreference(),\n ]\n\n return fieldlist", "def get_model_fields(self):\n fields = []\n\n for field in self.model._meta.get_fields():\n fields.append(field.name)\n\n return fields", "def f(self):\r\n return self.fields()", "def list_meta_fields():\n ret = {}\n status, result = _query(action=\"meta\", command=\"fields\")\n root = ET.fromstring(result)\n for field in root:\n field_id = None\n field_ret = {\"name\": field.text}\n for item in field.items():\n field_ret[item[0]] = item[1]\n if item[0] == \"id\":\n field_id = item[1]\n ret[field_id] = field_ret\n return ret", "def fields(self, forge, values):\n\n values[\"forge\"] = forge['id']\n\n fields = opengui.Fields(\n values=values,\n fields=FIELDS,\n ready=True\n )\n\n fields[\"forge\"].description = forge[\"description\"]\n\n if os.path.exists(\"/opt/service/forge/fields.yaml\"):\n with open(\"/opt/service/forge/fields.yaml\", \"r\") as fields_file:\n fields.extend(yaml.safe_load(fields_file).get(\"fields\", []))\n\n for field in forge.get(\"input\", {}).get(\"fields\", []):\n if field[\"name\"] in RESERVED:\n raise Exception(f\"field name '{field['name']}' is reserved\")\n self.field(fields, field)\n\n return fields", "def make_fields(self):\n #Let's first get fields in material_information printer_information\n metadata = GUI.api.get_metadata()\n field_correct_form = filter(lambda field: field['form_name']=='material_information' or field['form_name'] == 'printer_information', metadata)\n rows_w_fields = []\n for field in field_correct_form:\n #make label\n row = []\n key = field['field_name']\n type = field['field_type']\n row.append(sg.Text(text = field['field_label'], key=key+\"_label\"))#keys for labels are key_label (ex. record_id_label)\n if(type == 'radio' or type == \"dropdown\"):\n options = utils.get_options(field)\n row.append(sg.Combo(options, key=key, disabled= True, metadata=True, enable_events=True))\n elif(type == \"yesno\"):\n options = [\"Yes\", \"No\"]\n row.append(sg.Combo(options, key=key, disabled= True, metadata=True, enable_events=True))\n elif(type == \"text\"):\n row.append(sg.Input(key=key, disabled=True, metadata=True))\n else:#descirptive\n row[0] = sg.Text(text = field['field_label'], key=key, metadata=True)#we only need text in this case\n rows_w_fields.append(row)\n return rows_w_fields", "def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__", "def getVirtualFields(self):\n result = []\n objects_cls = self.getClass()\n if objects_cls:\n from cdb.platform.mom import entities, fields\n cls = entities.Class.ByKeys(objects_cls._getClassname())\n result = [f.field_name for f in cls.DDAllFields if isinstance(f, fields.DDVirtualField)]\n return result", "def get_columns(self, request, cl):\n columns = []\n for field_name in cl.model_admin.list_display:\n text, _ = label_for_field(field_name, cl.model, model_admin=cl.model_admin, return_attr=True)\n columns.append({field_name: text})\n return columns", "def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()", "def db_fields(self):", "def describe_fields(self):\n opened_file = self.data\n description = []\n\n if not opened_file:\n opened_file = self.open()\n\n for n in range(0, opened_file.GetLayerCount()):\n layer = opened_file.GetLayer(n)\n layer_description = {'name': layer.GetName(),\n 'feature_count': layer.GetFeatureCount(),\n 'fields': [],\n 'index': n,\n 'geom_type': self.geometry_type(layer.GetGeomType())\n }\n\n layer_definition = layer.GetLayerDefn()\n for i in range(layer_definition.GetFieldCount()):\n field_desc = {}\n field = layer_definition.GetFieldDefn(i)\n field_desc['name'] = field.GetName()\n field_desc['type'] = field.GetFieldTypeName(i)\n layer_description['fields'].append(field_desc)\n\n description.append(layer_description)\n\n return description", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def fields(self) -> Dict[str, Field]:\n return self._fields", "def all_fields(cls):\n return cls.__by_name.values()", "def get_fields(schema: BaseModel, exclude_dump_only: bool = False) -> dict:\n if hasattr(schema, \"fields\"):\n fields = schema.fields\n elif hasattr(schema, \"_declared_fields\"):\n fields = copy.deepcopy(schema._declared_fields)\n else:\n raise ValueError(\n \"{!r} doesn't have either `fields` or `_declared_fields`.\".format(schema)\n )\n Meta = getattr(schema, \"Meta\", None)\n return filter_excluded_fields(fields, Meta, exclude_dump_only)", "def all_fields(item):\n return scom.all_fields(item)", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def field_names(self):\n ...", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def model_fields(cls):\n members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a)))\n return [m for m in members if issubclass(m[1].__class__, fields.Field)]", "def get_dashmanager_field_components(doctype):\n\tfields_list, fields_component_list = get_fields_component_list(doctype)\n\treturn {\n\t\t\"fields\" : json.dumps(fields_list),\n\t\t\"fields_components\" : json.dumps(fields_component_list)\n\t}", "def person_fields(self):\r\n return persons.PersonFields(self)", "def get_fields():\n return jsonify(result=Tree.fields())", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .extension_schema_property import ExtensionSchemaProperty\n\n from .entity import Entity\n from .extension_schema_property import ExtensionSchemaProperty\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"description\": lambda n : setattr(self, 'description', n.get_str_value()),\n \"owner\": lambda n : setattr(self, 'owner', n.get_str_value()),\n \"properties\": lambda n : setattr(self, 'properties', n.get_collection_of_object_values(ExtensionSchemaProperty)),\n \"status\": lambda n : setattr(self, 'status', n.get_str_value()),\n \"targetTypes\": lambda n : setattr(self, 'target_types', n.get_collection_of_primitive_values(str)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_fields(self):\n \n fields = []\n for order in self.order_lst:\n fields += order.get_fields()\n \n fields = list(set(fields))\n \n out_fields = self.eod.sort_fields(fields)\n \n return out_fields", "def meta_fields(item):\n return scom.meta_fields(item)", "def get_fields(self):\n \n fields = []\n for img in self.img_lst:\n fields += img.get_fields()\n \n fields = list(set(fields))\n \n return fields", "def get_show_columns(self, model):\n return [\n getattr(field, 'di_display_name', False) or field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def deal_fields(self):\r\n return deals.DealFields(self)", "def get_all_fields(\n schema: Union[Schema, Config]\n) -> List[Tuple[str, Schema, BaseField]]:\n if isinstance(schema, Config):\n schema = schema._schema\n\n ret = []\n prefix = schema._key + \".\" if schema._key else \"\"\n for key, field in schema._fields.items():\n ret.append((prefix + key, schema, field))\n if isinstance(field, Schema):\n ret.extend(\n [\n (prefix + subkey, schema, subfield)\n for subkey, schema, subfield in get_all_fields(field)\n ]\n )\n return ret", "def get_fields_in_model(instance):\n assert isinstance(instance, Document)\n return instance._fields", "def _create_fields(property_, alias_dictionary):\n fields = []\n # Only generate properties that have a field template\n if property_['field_template'] is not None:\n # If the property is independent, add the single-bit sized isInherited flag\n # to the list of Fields as well.\n if property_['independent']:\n fields.append(_create_inherited_flag_field(property_))\n\n fields.append(_create_property_field(property_, alias_dictionary))\n\n return fields", "def get_fields(self):\n fields = {}\n allowed_types = (\n SerializerMethodField,\n Field,\n Serializer,\n )\n for attr in dir(self):\n if attr == 'data':\n continue\n\n if isinstance(getattr(self, attr), allowed_types):\n fields[attr] = getattr(self, attr)\n\n return fields", "def retrieve_db_fields():\n\n columns = Column._retrieve_db_columns()\n\n fields = set()\n for c in columns:\n if 'dbField' in c.keys() and c['dbField']:\n fields.add(c['name'])\n\n return list(fields)", "def GetAllFields(self, run_unsafe=False):\n\n if not (self.inherited_fields_expanded or run_unsafe):\n raise RuntimeError(f'Type {self.typename} has not been expanded')\n if self._all_fields is None:\n tmp = self.local_field_names.copy()\n tmp.update(self.inherited_field_names)\n if run_unsafe:\n return tmp\n self._all_fields = tmp\n return self._all_fields" ]
[ "0.6986266", "0.6902756", "0.6768921", "0.6752611", "0.6708141", "0.66911685", "0.66597337", "0.66597337", "0.66573167", "0.6641873", "0.65973717", "0.65927196", "0.6563586", "0.65375286", "0.6519489", "0.6499071", "0.64926445", "0.6473653", "0.646271", "0.64609843", "0.64545995", "0.6449818", "0.6442649", "0.640742", "0.64040315", "0.63837314", "0.63810277", "0.6365576", "0.63620055", "0.63458866", "0.63454306", "0.63401043", "0.6329875", "0.63255364", "0.63231385", "0.62960273", "0.6294183", "0.62895817", "0.62807536", "0.62520623", "0.6245181", "0.62315565", "0.6203232", "0.61895", "0.6186681", "0.61773056", "0.6173546", "0.6167707", "0.61648935", "0.61648935", "0.61648935", "0.61648935", "0.61648935", "0.61648935", "0.61647016", "0.6154563", "0.61525387", "0.61522824", "0.614619", "0.61403173", "0.61332375", "0.610559", "0.61022675", "0.6089804", "0.6087416", "0.60829544", "0.60759574", "0.607014", "0.6056887", "0.605565", "0.6054289", "0.6050362", "0.6047118", "0.59833294", "0.59825206", "0.59740716", "0.5967749", "0.59663475", "0.595933", "0.5952969", "0.5935123", "0.5926774", "0.5925113", "0.5919776", "0.5912796", "0.59088355", "0.5886402", "0.58740276", "0.58687776", "0.58447284", "0.5833675", "0.5829228", "0.5820229", "0.5814589", "0.58052874", "0.58030593", "0.5793562", "0.57895213", "0.57871747", "0.5769316" ]
0.67437035
4
An implementation of zope.app.container.contained.contained that doesn't generate events, for internal use. copied from SQLOS / z3c.zalchemy (via ore.alchemist.container)
def contained(obj, parent, name=None): if (parent is None): raise TypeError("Must provide a parent") if not IContained.providedBy(obj): if ILocation.providedBy(obj): interface.directlyProvides(obj, IContained, interface.directlyProvidedBy(obj)) else: obj = ContainedProxy(obj) oldparent = obj.__parent__ oldname = obj.__name__ if ((oldparent is None) or not (oldparent is parent or sameProxiedObjects(oldparent, parent)) ): obj.__parent__ = parent if oldname != name and name is not None: obj.__name__ = name return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _containment_onAdd( self, item, container ):\n # Not calling base class's methods from here avoids reinitialization\n # of all the content objects after product version change.\n # Setup is carried by generator anyway.\n\n # need to realize same as Scheduler schema to provide non-conflict database backup\n # if more than one ExpressSuiteCore in ZODB is presented.\n loop_app = self.getPhysicalRoot()\n if not hasattr( loop_app, 'ExpressSuiteBackup' ):\n try:\n b = BackupFSRoot.BackupFSRoot()\n loop_app._setObject( b.id, b )\n except:\n pass", "def Embedded(self) -> bool:", "def _contained_op(spec):", "def ship_container(self):", "def __call__(container, name, object):", "def process(self, container):\n pass;", "def _announceContainers(self):\n pass", "def _execute_container(self):\n pass", "def _containment_onDelete( self, item, container ):\n root = self.getPhysicalRoot()\n backupFSRoot = getattr(root, 'ExpressSuiteBackup', None)\n if backupFSRoot is not None:\n backupFSRoot.unregistryAppBackup( joinpath( item.getPhysicalPath() ) )\n\n PortalObjectBase.manage_beforeDelete( self, item, container )", "def compartment(self):\n return \"_embedded\"", "def test_container_on_container_html(self):\r\n published_container = ItemFactory.create(\r\n parent_location=self.child_container.location,\r\n category=\"wrapper\", display_name=\"Wrapper\"\r\n )\r\n ItemFactory.create(\r\n parent_location=published_container.location,\r\n category=\"html\", display_name=\"Child HTML\"\r\n )\r\n\r\n def test_container_html(xblock):\r\n self._test_html_content(\r\n xblock,\r\n expected_section_tag=(\r\n '<section class=\"wrapper-xblock level-page is-hidden studio-xblock-wrapper\" '\r\n 'data-locator=\"{0}\" data-course-key=\"{0.course_key}\">'.format(published_container.location)\r\n ),\r\n expected_breadcrumbs=(\r\n r'<a href=\"/unit/{unit}\"\\s*'\r\n r'class=\"navigation-link navigation-parent\">Unit</a>\\s*'\r\n r'<a href=\"/container/{split_test}\"\\s*'\r\n r'class=\"navigation-link navigation-parent\">Split Test</a>\\s*'\r\n r'<a href=\"#\" class=\"navigation-link navigation-current\">Wrapper</a>'\r\n ).format(\r\n unit=re.escape(unicode(self.vertical.location)),\r\n split_test=re.escape(unicode(self.child_container.location))\r\n )\r\n )\r\n\r\n # Test the published version of the container\r\n test_container_html(published_container)\r\n\r\n # Now make the unit and its children into a draft and validate the container again\r\n modulestore('draft').convert_to_draft(self.vertical.location)\r\n modulestore('draft').convert_to_draft(self.child_vertical.location)\r\n draft_container = modulestore('draft').convert_to_draft(published_container.location)\r\n test_container_html(draft_container)", "def __init__(self, container):\r\n self.container = container", "def __init__(self, container):\r\n self.container = container", "def build_container(self, outname: str = 'META-INF/container.xml') -> None:\n logger.info(__('writing META-INF/container.xml file...'))\n outdir = path.join(self.outdir, 'META-INF')\n ensuredir(outdir)\n copy_asset_file(path.join(self.template_dir, 'container.xml'), outdir)", "def inside(self, _, on):\n return on", "def contained(name, data): # noqa: N805", "def container (self):\n return self.__container", "def daemonize(self):\n raise NotImplementedError()", "def test_update_container(self):\n pass", "def _newcontainer(self, siginfo):\n pass", "def onMaster(self):", "def containments(self):\n return self.edges_to_contained + self.edges_to_containers", "def getRootIsolatedObjects():\n return frozenset([id for id, obj in getSite().aq_parent.objectItems() if IObjectToIsolate.providedBy(obj)])", "def create_container(self, **kwargs):\n story_host = self.get_client(delegate_login=True).story.api_client.external_root_url\n source_url = \"{0}/cache/{1}\".format(story_host, self.nonce)\n empty_parent_div = lxml.html.Element(\"div\", {\n 'class': 'empty-parent bg-light',\n 'style': 'height: 100%; width: 100%, display: block; text-align: left;'\n })\n frame = lxml.html.Element(\"iframe\", {\n 'src': source_url,\n 'frameborder': \"0\",\n 'scrolling': \"auto\",\n 'class': 'd3-responsive-frame',\n 'style': 'max-height: none; max-width: none; height:100%; width: 100%;',\n 'sandbox': 'allow-scripts allow-same-origin'\n })\n empty_parent_div.append(frame)\n return lxml.html.tostring(empty_parent_div).decode('utf-8')", "def containers(self):\n return self.__containers", "def test_rackspace_uploader_creates_container(self, mock, mock2):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n mycf.get_container.side_effect = NoSuchContainer\r\n mycf.create_container.return_value = True\r\n mycf.make_container_public.return_value = True\r\n u = RackspaceUploader()\r\n res = u.init_app(self.flask_app)\r\n err_msg = \"Init app should return the container.\"\r\n assert res is True, err_msg", "def create_embedded():\n from .server import create_application\n return create_application()", "def getContainedObjects(self, folder):\n\n objlist = list()\n\n if folder.containedObjects:\n\n for obj in folder.containedObjects:\n basic = vsdModels.APIBase(**obj)\n o = self.getObject(basic.selfUrl)\n objlist.append(o)\n return objlist\n else:\n print('the folder does not have any contained objects')\n return None", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def is_container(value: object) -> TypeGuard[AnyContainer]:\n if isinstance(value, Container):\n return True\n if hasattr(value, \"__pt_container__\"):\n return is_container(cast(\"MagicContainer\", value).__pt_container__())\n return False", "def wake(self) -> None:\n self.debug(\n f\"Waking up container '{self.container}'.\", level=2, shift=0)", "def test_destroy_container(self):\n pass", "def test_get_container(self):\n pass", "def test_show_container(self):\n pass", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.eS)\n nwbfile.add_acquisition(self.container)", "def test_container_on_container_html(self):\n draft_container = self._create_item(self.child_container.location, \"wrapper\", \"Wrapper\")\n self._create_item(draft_container.location, \"html\", \"Child HTML\")\n\n def test_container_html(xblock):\n self._test_html_content(\n xblock,\n expected_section_tag=(\n '<section class=\"wrapper-xblock level-page is-hidden studio-xblock-wrapper\" '\n 'data-locator=\"{0}\" data-course-key=\"{0.course_key}\">'.format(draft_container.location)\n ),\n expected_breadcrumbs=(\n '<a href=\"/course/{course}{subsection_parameters}\">Lesson 1</a>.*'\n '<a href=\"/container/{unit_parameters}\">Unit</a>.*'\n ).format(\n course=re.escape(str(self.course.id)),\n unit_parameters=re.escape(str(self.vertical.location)),\n subsection_parameters=re.escape('?show={}'.format(http.urlquote(\n str(self.sequential.location).encode()\n ))),\n ),\n )\n\n # Test the draft version of the container\n test_container_html(draft_container)\n\n # Now publish the unit and validate again\n self.store.publish(self.vertical.location, self.user.id)\n draft_container = self.store.get_item(draft_container.location)\n test_container_html(draft_container)", "def SyncRoot(self) -> object:", "def __init__(self, *args, **kwargs):\n super(ContainerBox, self).__init__(*args, **kwargs)\n if not hasattr(self, 'local_cache'):\n self.local_cache = {}", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.group)\n nwbfile.set_electrode_table(self.table)\n nwbfile.add_acquisition(self.container)", "def Finalize(self):\n self.local_namespace.QualifyParentNames()", "def __init__(self):\n self.container = list() # All items will be added to the container when put_on_stack method invoked\n self.changed_last = False # This attribute changes to True when item is being put on the stack", "def test_rackspace_uploader_get_container(self, mock1):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n cdn_enabled_mock = PropertyMock(return_value=False)\r\n type(fake_container).cdn_enabled = cdn_enabled_mock\r\n mycf.get_container.side_effect = NoSuchContainer\r\n\r\n calls = [call.get_container('user_3'),\r\n call.create_container('user_3'),\r\n call.make_container_public('user_3')\r\n ]\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n assert u.get_container('user_3')\r\n mycf.assert_has_calls(calls, any_order=True)", "def attach(self):\n assert(False) #No implementation", "def expose_data(self):\r\n return _ExposedFarmData(self._platforms, self._awaiting, self._channels)", "def addContainer(self, nwbfile):\n nwbfile.add_device(self.dev1)\n nwbfile.add_electrode_group(self.container)", "def test_create_container(self):\n pass", "def run(self, container_config: ContainerConfig) -> Container:", "def application(self):", "def contained(container: Container) -> Predicate[object]:\n\n def compare(value: object, /) -> bool:\n return value in container\n\n return compare", "def addContainer(self, nwbfile):\n nwbfile.add_acquisition(self.clustering)\n nwbfile.add_acquisition(self.container)", "def _any_containers_opened():\n if len(Container._construction_stack) > 0:\n return True\n return False", "def onMasterLost(self):", "def get_app():\n return ApplicationContainer()", "def pop_application(self):\n raise NotImplementedError()", "def get_container(self) -> CNT:\n raise NotImplementedError()", "def extension (self):\n assert False, \"To be implemented by child\"", "def push_application(self):\n raise NotImplementedError()", "def get_app_main_container(app_title_txt, center_area):\n return dbc.Container(\n [\n html.Br(),\n html.H2(app_title_txt), # page title\n html.Hr(),\n dbc.Row(\n [\n dbc.Col(center_area),\n ],\n align=\"center\",\n ),\n html.Br(),\n html.Hr(),\n HTML_RETURN_BUTTON,\n ],\n fluid=False, # False indicates there is a center box which will not expand when the screen goes too wide\n )", "def __init__(self):\n # try to load the container\n # cf will be global... \n # self.cf = pyrax.cloudfiles\n logging.debug(\"Opening cloudfiles container '%s'\" % self.container_name)\n notify(\"Reading environment configuration\")\n \n # check if our container exists; if not create it\n all_containers = cf.list_containers()\n \n if self.container_name in all_containers:\n logging.debug(\"Container exists, opening\")\n mycontainer = cf.get_container(self.container_name)\n else:\n logging.warn(\"Container doesn't exist, creating...\")\n mycontainer = cf.create_container(self.container_name)\n \n self.container = mycontainer\n \n if not self.load_footprints():\n logging.warn(\"No footprints loaded\")\n notify(\"No footprints found.\")", "def __init__(self, client, nr, uid, data):\r\n self._client = client\r\n self._nr = nr\r\n self._name = name = 'C{0}'.format(nr)\r\n self._terminating = None\r\n\r\n # Additional container parameters to use\r\n # TODO: At the moment not used; currently data also does not contain\r\n # these information\r\n# self._size = data.get('size', 1)\r\n# self._cpu = data.get('cpu', 0)\r\n# self._memory = data.get('memory', 0)\r\n# self._bandwidth = data.get('bandwidth', 0)\r\n# self._specialFeatures = data.get('specialFeatures', [])\r\n\r\n client.registerContainer(self)\r\n\r\n # Create the directories for the container\r\n self._confDir = confDir = pjoin(client.confDir, name)\r\n self._dataDir = dataDir = pjoin(client.dataDir, name)\r\n\r\n if os.path.isdir(confDir):\r\n raise ValueError('There is already a configuration directory for '\r\n \"'{0}' \\n Please remove it manually if the engine \"\r\n 'did not shut down correctly on last execution and '\r\n 'you are sure it is not in use. \\n dir: {1}.'.format(name, confDir))\r\n\r\n if os.path.isdir(dataDir):\r\n raise ValueError('There is already a data directory for '\r\n \"'{0}' \\n Please remove it manually if the engine \"\r\n 'did not shut down correctly on last execution and '\r\n 'you are sure it is not in use. \\n dir: {1}.'.format(name, dataDir))\r\n os.mkdir(confDir)\r\n os.mkdir(dataDir)\r\n\r\n # Create additional folders for the container\r\n rceDir = pjoin(dataDir, 'rce')\r\n rosDir = pjoin(dataDir, 'ros')\r\n\r\n os.mkdir(rceDir)\r\n os.mkdir(rosDir)\r\n\r\n if client.rosRel > 'fuerte':\r\n # TODO: Switch to user 'ros' when the launcher is used again\r\n shutil.copytree(pjoin(client.rootfs, 'root/.ros/rosdep'),\r\n pjoin(rceDir, '.ros/rosdep'))\r\n\r\n # Create network variables\r\n bridgeIP = client.bridgeIP\r\n ip = '{0}.{1}'.format(bridgeIP.rsplit('.', 1)[0], nr)\r\n self._address = '{0}:{1}'.format(ip, client.envPort)\r\n self._rosproxyAddress = '{0}:{1}'.format(ip, client.rosproxyPort)\r\n self._fwdPort = str(nr + 8700)\r\n self._rosproxyFwdPort = str(nr + 10700)\r\n\r\n ovsname = data.get('name')\r\n ovsip = data.get('ip')\r\n\r\n if ovsname and ovsip:\r\n ovsif = 'eth1'\r\n ovsup = pjoin(confDir, 'ovsup')\r\n\r\n if client.ubuntuRel > 'quantal':\r\n ovsdown = pjoin(confDir, 'ovsdown')\r\n else:\r\n ovsdown = None\r\n else:\r\n ovsif = ovsup = ovsdown = None\r\n\r\n # Construct password\r\n passwd = encodeAES(cipher(client.masterPassword),\r\n salter(uid, client.infraPassword))\r\n\r\n # Create upstart scripts\r\n upComm = pjoin(confDir, 'upstartComm')\r\n with open(upComm, 'w') as f:\r\n f.write(_UPSTART_COMM.format(masterIP=client.masterIP,\r\n masterPort=client.masterPort,\r\n internalPort=client.envPort,\r\n uid=uid, passwd=passwd))\r\n\r\n upRosapi = pjoin(confDir, 'upstartRosapi')\r\n with open(upRosapi, 'w') as f:\r\n f.write(_UPSTART_ROSAPI.format(proxyPort=client.rosproxyPort))\r\n\r\n # TODO: For the moment there is no upstart script for the launcher.\r\n# upLauncher = pjoin(confDir, 'upstartLauncher')\r\n# with open(upLauncher, 'w') as f:\r\n# f.write(_UPSTART_LAUNCHER)\r\n\r\n # Setup network\r\n networkIF = pjoin(confDir, 'networkInterfaces')\r\n with open(networkIF, 'w') as f:\r\n f.write('auto lo\\n')\r\n f.write('iface lo inet loopback\\n')\r\n f.write('\\n')\r\n f.write('auto eth0\\n')\r\n f.write('iface eth0 inet static\\n')\r\n f.write(' address {0}\\n'.format(ip))\r\n f.write(' gateway {0}\\n'.format(bridgeIP))\r\n f.write(' dns-nameservers {0} 127.0.0.1\\n'.format(bridgeIP))\r\n\r\n if ovsif:\r\n f.write('\\n')\r\n f.write('auto {0}\\n'.format(ovsif))\r\n f.write('iface {0} inet static\\n'.format(ovsif))\r\n f.write(' address {0}\\n'.format(ovsip))\r\n\r\n # Create up/down script for virtual network interface if necessary\r\n if ovsup:\r\n with open(ovsup, 'w') as f:\r\n f.write(_LXC_NETWORK_SCRIPT.format(if_op='up', ovs_op='add',\r\n name=ovsname))\r\n\r\n os.chmod(ovsup, stat.S_IRWXU)\r\n\r\n if ovsdown:\r\n with open(ovsdown, 'w') as f:\r\n f.write(_LXC_NETWORK_SCRIPT.format(if_op='down', ovs_op='del',\r\n name=ovsname))\r\n\r\n os.chmod(ovsdown, stat.S_IRWXU)\r\n\r\n # TODO: SSL stuff\r\n# if self._USE_SSL:\r\n# # Create a new certificate and key for environment node\r\n# caCertPath = pjoin(self._SSL_DIR, 'Container.cert')\r\n# caCert = loadCertFile(caCertPath)\r\n# caKey = loadKeyFile(pjoin(self._SSL_DIR, 'container/env.key'))\r\n# (cert, key) = createKeyCertPair(commID, caCert, caKey)\r\n#\r\n# # Copy/save file to data directory\r\n# shutil.copyfile(caCertPath, os.path.join(rceDir, 'ca.pem'))\r\n# writeCertToFile(cert, os.path.join(rceDir, 'cert.pem'))\r\n# writeKeyToFile(key, os.path.join(rceDir, 'key.pem'))\r\n\r\n # Create the container\r\n self._container = container = Container(client.reactor, client.rootfs,\r\n confDir, name)\r\n\r\n # Add lxc bridge\r\n container.addNetworkInterface('eth0', client.bridgeIF, ip)\r\n\r\n # Add the virtual network bridge if necessary\r\n if ovsname and ovsip:\r\n container.addNetworkInterface(ovsif, None, ovsip, ovsup, ovsdown)\r\n\r\n # Add additional lines to fstab file of container\r\n container.extendFstab(rosDir, 'home/ros', False)\r\n container.extendFstab(rceDir, 'opt/rce/data', False)\r\n container.extendFstab(upComm, 'etc/init/rceComm.conf', True)\r\n # TODO: For the moment there is no upstart script for the launcher.\r\n# container.extendFstab(upLauncher, 'etc/init/rceLauncher.conf', True)\r\n container.extendFstab(upRosapi, 'etc/init/rceRosapi.conf', True)\r\n container.extendFstab(networkIF, 'etc/network/interfaces', True)\r\n\r\n for srcPath, destPath in client.pkgDirIter:\r\n container.extendFstab(srcPath, destPath, True)", "def associated_coroot(self):", "def root(self):\n raise NotImplementedError('must be implemented by subclass')", "def root(self):\n raise NotImplementedError('must be implemented by subclass')", "def remote_createContainer(self, uid, data):\r\n try:\r\n nr = self._nrs.pop()\r\n except KeyError:\r\n raise MaxNumberExceeded('Can not manage any additional container.')\r\n\r\n container = RCEContainer(self, nr, uid, data)\r\n return container.start().addCallback(lambda _: container)", "def add_infra (self):\n raise NotImplementedError", "def dockControl(*args, allowedArea: Union[AnyStr, List[AnyStr], bool]=\"all\", annotation:\n Union[AnyStr, bool]=\"\", area: Union[AnyStr, bool]=\"\", backgroundColor:\n Union[List[float, float, float], bool]=None, closeCommand: Script=None,\n content: Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", docTag:\n Union[AnyStr, bool]=\"\", dockStation: AnyStr=\"\", dragCallback: Script=None,\n dropCallback: Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, enablePopupOption: bool=True, exists: bool=True,\n fixedHeight: bool=True, fixedWidth: bool=True, floatChangeCommand: Script=None,\n floating: bool=True, fullPathName: bool=True, height: Union[int, bool]=0,\n highlightColor: Union[List[float, float, float], bool]=None, isObscured:\n bool=True, label: Union[AnyStr, bool]=\"\", manage: bool=True, moveable:\n bool=True, noBackground: bool=True, numberOfPopupMenus: bool=True, parent:\n Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True,\n r: bool=True, retain: bool=True, sizeable: bool=True, splitLayout: AnyStr=\"\",\n state: Union[AnyStr, bool]=\"\", statusBarMessage: AnyStr=\"\", useTemplate:\n AnyStr=\"\", visible: bool=True, visibleChangeCommand: Union[Script, bool]=None,\n width: Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def container(app, container=None):\n if container is None:\n # Print containers\n table = Table([\n (\"NAME\", 30),\n ])\n table.print_header()\n for container in sorted(app.containers, key=lambda c: c.name):\n table.print_row([\n container.name,\n ])\n else:\n # Container name\n click.echo(CYAN(\"Name: \") + container.name)\n # Build parent\n click.echo(\n CYAN(\"Build ancestry: \") +\n \", \".join(other.name for other in app.containers.build_ancestry(container))\n )\n # Runtime dependencies\n dependencies = app.containers.dependencies(container)\n if dependencies:\n click.echo(CYAN(\"Depends on: \") + \", \".join(sorted(other.name for other in dependencies)))\n else:\n click.echo(CYAN(\"Depends on: \") + \"(nothing)\")\n # Dependents\n dependents = app.containers.dependents(container)\n if dependents:\n click.echo(CYAN(\"Depended on by: \") + \", \".join(sorted(other.name for other in dependents)))\n else:\n click.echo(CYAN(\"Depended on by: \") + \"(nothing)\")\n # Volumes\n click.echo(CYAN(\"Named volumes:\"))\n for mount_point, volume in container.named_volumes.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))\n click.echo(CYAN(\"Bind-mounted volumes:\"))\n for mount_point, volume in container.bound_volumes.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))\n # Devmodes\n click.echo(CYAN(\"Mounts (devmodes):\"))\n for name, mounts in container.devmodes.items():\n click.echo(\" {}:\".format(name))\n for mount_point, volume in mounts.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))", "def __create__container(self):\n self.__used_containers.append(contenedor.Arena(self.__blocks_size))", "def handle_container_change(instance, **kwargs):\n old_containers = getattr(instance, \"_old_containers\", set())\n new_containers = set(instance.containers)\n removed_from = old_containers - new_containers\n added_to = new_containers - old_containers\n\n # Handle possible permission change (CREATE/DELETE) on the object itself. Do\n # not send notifications on the containers, this is handled bellow.\n handle_permission_change(instance, observe_containers=False)\n # Handle notifications (CREATE/DELETE) on the containers.\n Observer.observe_instance_container(instance, ChangeType.DELETE, removed_from)\n Observer.observe_instance_container(instance, ChangeType.CREATE, added_to)\n # Handle UPDATE notifications on the instance (container property has changed).\n Observer.observe_instance_changes(instance, ChangeType.UPDATE)", "def is_still_owner(self):\n raise tooz.NotImplemented", "def _onChildrenChanged(e):\n\n desktop = pyatspi.Registry.getDesktop(0)\n if e.source == desktop:\n\n # If the desktop is empty, the user has logged out-- shutdown Orca\n #\n try:\n if desktop.childCount == 0:\n speech.speak(_(\"Goodbye.\"))\n shutdown()\n return\n except: # could be a CORBA.COMM_FAILURE\n debug.printException(debug.LEVEL_FINEST)\n shutdown()\n return", "def run_vespa_engine_container(self, disk_folder: str, container_memory: str):\n client = docker.from_env()\n if self.container is None:\n try:\n self.container = client.containers.get(self.application_package.name)\n except docker.errors.NotFound:\n self.container = client.containers.run(\n \"vespaengine/vespa\",\n detach=True,\n mem_limit=container_memory,\n name=self.application_package.name,\n hostname=self.application_package.name,\n privileged=True,\n volumes={disk_folder: {\"bind\": \"/app\", \"mode\": \"rw\"}},\n ports={self.local_port: self.local_port, 19112: 19112},\n )", "def OnRender(self, event):\r\n\r\n # if the frame is about to be deleted, don't bother\r\n if not self._frame or self._frame.IsBeingDeleted():\r\n return\r\n \r\n if not self._frame.GetSizer():\r\n return\r\n\r\n mouse = wx.GetMouseState()\r\n mousePos = wx.Point(mouse.GetX(), mouse.GetY())\r\n point = self._frame.ScreenToClient(mousePos)\r\n art = self._art\r\n\r\n dc = event.GetDC()\r\n \r\n for part in self._uiparts:\r\n \r\n # don't draw hidden pane items or items that aren't windows\r\n if part.sizer_item and ((not part.sizer_item.IsWindow() and \\\r\n not part.sizer_item.IsSpacer() and \\\r\n not part.sizer_item.IsSizer()) or \\\r\n not part.sizer_item.IsShown()):\r\n \r\n continue\r\n \r\n ptype = part.type\r\n \r\n if ptype in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:\r\n art.DrawSash(dc, self._frame, part.orientation, part.rect)\r\n\r\n elif ptype == AuiDockUIPart.typeBackground:\r\n art.DrawBackground(dc, self._frame, part.orientation, part.rect)\r\n\r\n elif ptype == AuiDockUIPart.typeCaption:\r\n art.DrawCaption(dc, self._frame, part.pane.caption, part.rect, part.pane)\r\n\r\n elif ptype == AuiDockUIPart.typeGripper:\r\n art.DrawGripper(dc, self._frame, part.rect, part.pane)\r\n\r\n elif ptype == AuiDockUIPart.typePaneBorder:\r\n art.DrawBorder(dc, self._frame, part.rect, part.pane)\r\n\r\n elif ptype == AuiDockUIPart.typePaneButton: \r\n self.DrawPaneButton(dc, part, point)", "def test_virtual(self):\n self.assertEqual(containerd.__virtual__(), \"containerd\")", "def DirEV():\n\n target.BoundarySync()", "def root(self):\n raise NotImplemented(\"must be implemented by subclass\")", "def embedded(self, embedded):\n self._embedded = embedded", "def test_no_sideeffects(self):\n c = EventLoop(\n lambda: None,\n lambda f, g: 1 / 0,\n lambda *args: 1 / 0,\n watchdog_thread=object(),\n reapAllProcesses=lambda: 1 / 0)\n del c", "def present(self):", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def __init__(self, owner):\n self.bag_of_holding = []\n self.owner = owner", "def embed():", "def isolated(self, root_dir):\n return Isolated(self.m, root_dir)", "def pre_deploy(self) -> Any:\n raise NotImplementedError", "def __init__(self):\n self.container = set()", "def execute(self, outer_instance):\n raise NotImplementedError", "def _build_impl(self):", "def root(self):\n raise NotImplementedError('must be implemented by subclass')", "def root(self):\n raise NotImplementedError('must be implemented by subclass')", "def root(self):\n raise NotImplementedError( must be implemented by subclass )", "def container_interface(self):\r\n return self._container_if", "def run(self, root):\r\n pass", "def _newcontainer(self, siginfo):\n self.logger.log('creating a new %s with siginfo %r' % (self.cname, (siginfo, )))\n if self.ourcontainer:\n self.logger.log('freeing previously loaded %s' % (self.cname))\n self.announcequeue.append(self.ourcontainer)\n self.ourcontainer.save()\n self.ourcontainer.complete = True\n self.session.add(self.ourcontainer)\n\n self.ourcontainer = self.container(self.config.owner, self.config, siginfo=siginfo)\n self.ourcontainer.create(int(self.config.container_manager.maxcapacity))\n self.ourcontainer.save()\n self.logger.log(\"New %s's filename: %s\" % (self.cname, self.ourcontainer.filename))" ]
[ "0.5968703", "0.5593695", "0.5585771", "0.5165978", "0.50236136", "0.49452823", "0.49268574", "0.48931044", "0.4875816", "0.478532", "0.47574094", "0.46905568", "0.46905568", "0.4670254", "0.46697664", "0.4660797", "0.4616566", "0.46158904", "0.46091568", "0.45892322", "0.45810974", "0.45715466", "0.45386732", "0.45349154", "0.45147657", "0.45121464", "0.44859204", "0.4410704", "0.44081795", "0.44081795", "0.44081795", "0.44081795", "0.44081795", "0.43915337", "0.4389206", "0.43757597", "0.4372881", "0.43501312", "0.4345683", "0.4324483", "0.43176523", "0.43101853", "0.430465", "0.430465", "0.430465", "0.430465", "0.4302954", "0.4301937", "0.43010464", "0.4294474", "0.42940512", "0.42938057", "0.4290587", "0.42881367", "0.4284624", "0.42764154", "0.4266064", "0.42628425", "0.42596895", "0.42560992", "0.4253924", "0.42532095", "0.42471108", "0.42467728", "0.4245284", "0.42418998", "0.42404875", "0.4238681", "0.4237164", "0.4235318", "0.4229008", "0.42246047", "0.42224663", "0.42215928", "0.42011347", "0.4192183", "0.41906875", "0.41856384", "0.4179902", "0.41713944", "0.41690946", "0.41645035", "0.41614", "0.41610807", "0.4154797", "0.4152763", "0.41477588", "0.41459823", "0.41454095", "0.4139275", "0.41386268", "0.41382888", "0.41349867", "0.4133533", "0.41269964", "0.41269964", "0.4126876", "0.4126776", "0.4124352", "0.41239178" ]
0.5526841
3
This method pulls a subset/batch of values for paging through a container.
def batch(self, order_by=(), offset=0, limit=20, filter=None): query = self._query if filter: query = query.filter(filter) if order_by: query = query.order_by(order_by) #limit and offset must be applied after filter and order_by query = query.limit(limit).offset(offset) for ob in query: ob = contained(ob, self, stringKey(ob)) yield ob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_full_container_list(container_name, **kwargs):\n limit = 10000\n kwargs['limit'] = limit\n page = []\n seed = []\n _, page = get_conn().get_container(container_name, **kwargs)\n seed.extend(page)\n\n while len(page) == limit:\n # keep getting pages..\n kwargs['marker'] = seed[-1]['name']\n _, page = get_conn().get_container(container_name, **kwargs)\n seed.extend(page)\n\n return seed", "def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n assert isinstance(page, int) and page > 0\n assert isinstance(page_size, int) and page_size > 0\n self.dataset()\n index_tuple: Tuple = index_range(page, page_size)\n start_index: int = index_tuple[0]\n end_index: int = index_tuple[1]\n return self.__dataset[start_index:end_index]", "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def get_all(self, start_at, limit, order=None):", "def get_items(self, start, stop, next_position=None):", "def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results", "def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n assert isinstance(page, int) and page > 0\n assert isinstance(page_size, int) and page_size > 0\n\n range = index_range(page, page_size)\n self.dataset()\n return self.__dataset[range[0]: range[1]]", "def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")", "def fetch_pages(query_val, page_num):\n \n for page_id in range(1 + page_num + 1):\n try:\n output = fetch_data(query_val, page_id)\n for j in output:\n print(str(j))\n \n except Exception as e:\n print(e)", "def batch_get(func: object, filt: str, catg: str):\n offset = 0\n running = True\n returned = []\n notified = False\n while running:\n lookup = func(filter=filt, offset=offset, limit=5000, fields=\"__full__\")\n total = lookup[\"body\"][\"meta\"].get(\"pagination\", {}).get(\"total\", 0)\n if not notified:\n notify = f\"Retrieving {total:,} {catg} results.\"\n if total > 50000:\n notify = f\"Retrieving first 50,000 of {total:,} {catg} results.\"\n print(notify)\n notified = True\n else:\n progress.next()\n if lookup[\"body\"][\"resources\"]:\n offset += len(lookup[\"body\"][\"resources\"])\n returned.extend(lookup[\"body\"][\"resources\"])\n if offset >= total:\n running = False\n\n return returned", "def _extract_batch(self, data, batch_size):\n\n batch_size = batch_size or BATCH_SIZE\n\n batch = []\n try:\n for i in range(batch_size):\n batch.append(data.next())\n except StopIteration:\n pass\n\n return batch", "def test_iter_paging(self):\n ref = mock.Mock()\n ref.side_effect = [\n {'rows': [x for x in range(100)]},\n {'rows': []}\n ]\n rslt = Result(ref, page_size=10)\n collection = [x for x in rslt]\n self.assertEqual(len(collection), 100)", "def fetchmany(self, size=None):\n size = size or self.arraysize\n return list(itertools.islice(self._results, size))", "def get_batch(X, Y, iteration):\n offset = 100\n start = iteration * offset % len(Y)\n \n # YOUR CODE HERE\n # This will return the entire data set each iteration. This is costly, so\n # you should experiment with different way of changing this:\n return X[start: start + offset], Y[start: start + offset]", "def get_slice(self, limit, offset):\r\n if limit == 0:\r\n return self.objects[offset:]\r\n\r\n return self.objects[offset:offset + limit]", "def _all_offset_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_offset = 0\n is_truncated = True\n while is_truncated:\n page = page_function(offset=next_offset, **kwargs)\n next_offset = page.offset + page.limit\n is_truncated = page.total > next_offset\n for data in page.page_data:\n yield data", "def Get(self, limit, offset=0):\n count = 1\n result = []\n\n iterator = self.Run()\n\n try:\n for i in xrange(offset):\n val = iterator.next()\n except StopIteration:\n pass\n\n try:\n while count <= limit:\n val = iterator.next()\n result.append(val)\n count += 1\n except StopIteration:\n pass\n return result", "def test_get_multiple_pages_lro(client):\n from azure.mgmt.core.polling.arm_polling import ARMPolling\n poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0, request_id=\"test\"))\n pager = poller.result()\n\n items = list(pager)\n\n assert len(items) == 10\n assert items[0][\"properties\"][\"id\"] == 1\n assert items[1][\"properties\"][\"id\"] == 2", "def paginate(client_fun, *args, **kwargs):\n resp = client_fun(*args, **kwargs)\n yield from resp['content']\n total_elements = resp['totalElements']\n page_size = resp['pageSize']\n page_number = resp['pageNumber'] + 1\n if 'page_number' in kwargs:\n kwargs.pop('page_number')\n while page_number * page_size < total_elements:\n resp = client_fun(*args, page_number=page_number, **kwargs)\n yield from resp['content']\n page_number = resp['pageNumber'] + 1", "def paginator_slice(self, pageno):\n return self._data[pageno * self.height : pageno * self.height + self.height]", "def paging_sws(data, key, pagestart, local_name, pagesize, extra_pages, offset):\n # Create the picklist\n last_requested = pagestart + (extra_pages * pagesize)\n picklist = data[\"picklist\"] = []\n for i in range(0, len(data[key]), pagesize):\n pick = _item(start=None, end=None, count=None, more=ezt.boolean(0))\n pick.start = getattr(data[key][i], local_name)\n pick.count = offset + i\n pick.page = (pick.count // pagesize) + 1\n try:\n pick.end = getattr(data[key][i + pagesize - 1], local_name)\n except IndexError:\n pick.end = getattr(data[key][-1], local_name)\n picklist.append(pick)\n if pick.count >= last_requested:\n pick.more = ezt.boolean(1)\n break\n data[\"picklist_len\"] = len(picklist)\n first = pagestart - offset\n # FIXME: first can be greater than the length of data[key] if\n # you select a tag or search while on a page other than the first.\n # Should reset to the first page, but this test won't do that every\n # time that it is needed. Problem might go away if we don't hide\n # non-matching files when selecting for tags or searching.\n if first > len(data[key]):\n pagestart = 0\n pageend = first + pagesize\n # Slice\n return data[key][first:pageend]", "def _fetch_in_bulk(self, func_name, page_range, **func_args):\n all_results = []\n prog_bar = None\n\n if 'page_num' in func_args:\n func_args = func_args.pop('page_num')\n\n if self.profile.use_prog_bar:\n try:\n max_val = (max(page_range) + 1)\n except ValueError:\n max_val = 1\n\n prog_bar = progressbar.ProgressBar(max_value=max_val)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.profile.num_thread_workers) as executor:\n counter = 1\n future_to_page = {executor.submit(func_name, page_num=page, **func_args): page for page in page_range}\n\n for future in concurrent.futures.as_completed(future_to_page):\n try:\n data = future.result()\n except PageSizeError:\n raise\n except RequestFailed:\n continue\n\n if 'content' in data:\n items = data['content']\n for item in items:\n all_results.append(item)\n\n if self.profile.use_prog_bar:\n prog_bar.update(counter)\n time.sleep(0.1)\n counter += 1\n\n if self.profile.use_prog_bar:\n prog_bar.finish()\n\n return all_results", "def page_query(q):\n\toffset = 0\n\twhile True:\n\t\tr = False\n\t\tfor elem in q.limit(1000).offset(offset):\n\t\t r = True\n\t\t yield elem\n\t\toffset += 1000\n\t\tif not r:\n\t\t\tbreak", "def test_get_pagination(mockclient_cl1):\n # There should be 600 statements in testset.\n r = mockclient_cl1.get(TEST_URL + \"?size=700\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 600\n\n # Get the first 500\n r = mockclient_cl1.get(TEST_URL + \"?size=500\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 500\n\n # Get the remaining 100\n r = mockclient_cl1.get(TEST_URL + \"?size=500&page=2\")\n assert r.status_code == 200\n assert len(r.json[\"statements\"]) == 100", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "def _get_objects_for_page(self, page_number):\n start, end = self._calculate_index(page_number, self.per_page, self.objects_count)\n return self._slice_objects(start, end)", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def _make_paged_query(\n conn, search_base, search_scope, ad_query, attr_list, page_size\n):\n result = []\n page_result_control = SimplePagedResultsControl(\n size=page_size,\n cookie=''\n )\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n\n while True:\n r_type, r_data, r_msgid, serverctrls = conn.result3(msgid)\n result.extend(r_data)\n\n if serverctrls:\n if serverctrls[0].cookie:\n page_result_control.size = page_size\n page_result_control.cookie = serverctrls[0].cookie\n\n msgid = conn.search_ext(\n search_base,\n search_scope,\n ad_query,\n attr_list,\n serverctrls=[page_result_control],\n )\n else:\n break\n\n return result", "def get_paginate_data(self, *args, **kwargs):\n pass", "def results(self):\n page = []\n\n for i, item in enumerate(super(VideoCarouselTile, self).results()):\n page.append(item)\n if (i + 1) % 3 == 0:\n yield page\n page = []\n if page:\n yield page", "def do_get_multi_page(self, additional_url, additional_headers=None, limit=None, filter_system_defined=True):\n offset = 0\n item_count = 0\n result_list = []\n while True:\n result = self.do_get_single_page(additional_url,\n additional_headers=additional_headers, \n limit=limit, \n offset=offset)\n paging = result['paging']\n items = result['items']\n item_count += len(items)\n offset += len(items)\n result_list.extend(items)\n if item_count == paging['count'] or len(items) == 0:\n break\n if filter_system_defined:\n result_list = [x for x in result_list if 'isSystemDefined' not in x or x['isSystemDefined'] == False]\n return result_list", "def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data", "def _query_it(records_query: List[str],\n batch_size: int,\n metadata: Training\n ) -> Iterator[Dict[str, np.ndarray]]:\n dataset = predict_data(records_query, metadata, batch_size)()\n X_tensor = dataset.make_one_shot_iterator().get_next()\n with tf.Session() as sess:\n while True:\n try:\n X = sess.run(X_tensor)\n if \"con\" in X:\n X[\"con\"] = _make_mask(X[\"con\"], X[\"con_mask\"])\n if \"cat\" in X:\n X[\"cat\"] = _make_mask(X[\"cat\"], X[\"cat_mask\"])\n yield X\n except tf.errors.OutOfRangeError:\n break\n return", "def retrieve_all (self, user, pwd, vector_file, tiles, product, startdate, enddate, cloud_max) :\n q_param = (SciHubMetadataExtractor.\n __compose_q_param(vector_file,tiles, product,startdate,enddate,cloud_max))\n if (q_param=='') :\n print (\"ERROR: can't compose query string\")\n return list()\n\n start = 0\n list_result = list()\n while True :\n query_base = SciHubMetadataExtractor.base_url\n query_base+='&start='+str(start) + '&rows='+str(SciHubMetadataExtractor.page_num)\n r = requests.post(query_base,{\"q\":q_param},auth=(user,pwd))\n if (r.status_code!=200) :\n print ('ERROR: ' + str(r.status_code))\n return ''\n json_response = json.loads(r.text)\n total = int(json_response[\"feed\"][\"opensearch:totalResults\"])\n if (total == 0) :\n return list_result\n \n raw_entities = json_response[\"feed\"][\"entry\"]\n if total == 1:\n t = list()\n t.append(raw_entities)\n raw_entities = t.copy()\n\n for re in raw_entities :\n list_result.append(SciHubMetadataExtractor.__convert_raw_entity(re)) \n \n if (start + SciHubMetadataExtractor.page_num >= total) :\n break\n else :\n start+=SciHubMetadataExtractor.page_num\n \n return list_result", "def paged(self, pagesize=128):\n # FAST algorithm ! It is about 10x faster than the naive algorithm\n # thanks to the use of GetRows, which dramatically decreases the number\n # of COM calls.\n recordset = win32com.client.Dispatch('ADODB.Recordset')\n if self.order_by:\n recordset.Open(\n unicode('SELECT * FROM [%s] ORDER BY %s' % (\n self.name, self.order_by)), self.document.connection, 0, 1)\n else:\n recordset.Open(\n unicode('SELECT * FROM [%s]' % self.name),\n self.document.connection, 0, 1)\n try:\n fields = [self.encoding(field.Name) for field in recordset.Fields]\n ok = True\n while ok:\n # Thanks to Rogier Steehouder for the transposing tip\n rows = zip(*recordset.GetRows(pagesize))\n\n if recordset.EOF:\n # close the recordset as soon as possible\n recordset.Close()\n recordset = None\n ok = False\n\n for row in rows:\n yield dict(zip(fields, map(self.encoding, row)))\n except:\n if recordset is not None:\n recordset.Close()\n del recordset\n raise", "def _chunker(self, seq, size):\n return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size))", "def _get_pages(page_size, total_records):\r\n pages = total_records/page_size+bool(total_records%page_size)\r\n return range(1, pages+1)", "def _batchify(self, data_containers: Dict, batch_size):\n\n X = Variable(torch.LongTensor(data_containers['X'])).to(self.device)\n Y = Variable(torch.FloatTensor(data_containers['Y'])).to(self.device)\n\n data_size = X.size()[0]\n num_batches = data_size // batch_size\n\n return [\n (X[bi * batch_size: (bi + 1) * batch_size],\n Y[bi * batch_size: (bi + 1) * batch_size].unsqueeze(1))\n for bi in range(num_batches + 1)\n ]", "def pagination(self, lastValue=None, sortKey=\"_id\", limit=10, asc=\"ASC\"):\n comparison = \"\"\n if lastValue is not None:\n comparison = sortKey + \" > \" + sanitize_value(lastValue)\n limit = int(limit)\n if asc != \"ASC\" and asc != \"DESC\":\n asc = \"ASC\"\n results = self.__run(\n pagination_template.substitute(\n tablename=self.tablename,\n comparison=comparison,\n sortKey=sortKey,\n asc=asc,\n limit=limit\n ),\n )\n return results", "def _collection_samples(collection_query, limit, config):\n just_cid = lambda obj : obj.get('meta', {}).get('concept-id')\n found_collections = scom.search_by_page(\"collections\",\n query=collection_query,\n filters=just_cid,\n page_state=scom.create_page_state(limit=limit),\n config=config)\n return found_collections[:limit]", "def get_all_pages(session, url, size, params=None):\n # Get first page to get results and detect number fo pages\n response = get_single_page(session, url, size, params)\n parameters = {}\n # Get number of indexes for this request\n entries = int(response.headers['X-Total'])\n # Calculate amount of pages that need to be requested\n pages = int(entries / size) + (entries % size > 1)\n # Data retrived by the request\n data = response.json()\n\n # Add params if custom parameters\n if params is not None:\n parameters.update(params)\n # Detect if more than 1 page\n if pages > 1:\n # Range between 2 and pages + 1 to get the last one as well\n for page in range(2, pages + 1):\n # Update parameters with page[number] parameter\n parameters.update({'page[number]': page})\n # Make the request\n r = get_single_page(session, url, size, params=parameters)\n try:\n # Merge data from request with already received data\n new_data = r.json()\n if new_data == '[]':\n continue\n data += new_data\n except json.JSONDecodeError:\n print('Error when decoding json, please try again...')\n exit(1)\n\n return data", "def fetchsome(cursor, arraySize=5000):\n while True:\n results = cursor.fetchmany(arraySize)\n if not results:\n break\n for result in results:\n yield result", "def paginated_call(self) -> global___Snippet.ClientCall:", "def chunk(self, count):\n page = 1\n results = self.for_page(page, count).get()\n\n while len(results) > 0:\n yield results\n\n page += 1\n\n results = self.for_page(page, count).get()", "def slice_and_run(single_iterator: permutations):\n step = 10000000\n start = 0\n stop = start + step\n # I use next_it bool to make sure to create one more slice with no end limit when slices are finished\n next_it = False\n while True:\n if next_it is False:\n cake_slice = islice(single_iterator, start, stop)\n else:\n cake_slice = islice(single_iterator, start, None)\n if args.cores is None:\n with Pool() as pool:\n data = pool.map(printer, cake_slice)\n else:\n with Pool(int(args.cores)) as pool:\n data = pool.map(printer, cake_slice)\n start += step\n stop += step\n if next_it is True:\n break\n if not data:\n next_it = True", "def batch_slices(batch, sizes=False, include_ends=True):\n size = scatter_add(torch.ones_like(batch), batch)\n cumsum = torch.cumsum(size, dim=0)\n starts = cumsum - size\n ends = cumsum - 1\n\n slices = starts\n if include_ends:\n slices = torch.stack([starts, ends], dim=1).view(-1)\n\n if sizes:\n return slices, size\n return slices", "def _slice(self, dict_data, partition):\n dict_data_iter = iter(dict_data)\n for _ in range(0, len(dict_data), partition):\n yield {k: dict_data[k] for k in islice(dict_data_iter, partition)}", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def queryset_in_batches(queryset):\n start_pk = 0\n\n while True:\n qs = queryset.filter(pk__gt=start_pk)[:BATCH_SIZE]\n pks = list(qs.values_list(\"pk\", flat=True))\n\n if not pks:\n break\n\n yield pks\n\n start_pk = pks[-1]", "def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)", "def fetchmany(self, size=None):\n\n self._check_executed()\n\n if self.rownumber >= self.rowcount:\n return []\n\n end = min(self.rownumber + (size or self.arraysize), self.rowcount)\n result = self._rows[self.rownumber - self._offset : end - self._offset]\n self.rownumber = min(end, len(self._rows) + self._offset)\n\n while (end > self.rownumber) and self.nextset():\n result += self._rows[self.rownumber - self._offset : end - self._offset]\n self.rownumber = min(end, len(self._rows) + self._offset)\n return result", "def get_slice(self, limit, offset):\n # Always get the first page\n return super(NoLimitPaginator, self).get_slice(0, 0)", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def all(cls, client, **kwargs):\n max_date = kwargs['max_date'] if 'max_date' in kwargs else None\n max_fetches = \\\n kwargs['max_fetches'] if 'max_fetches' in kwargs else None\n nonzero = kwargs.get('nonzero', False)\n\n url = 'https://api.robinhood.com/options/positions/'\n params = {'nonzero': nonzero}\n data = client.get(url, params=params)\n results = data[\"results\"]\n\n if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]):\n return results\n if max_fetches == 1:\n return results\n\n fetches = 1\n while data[\"next\"]:\n fetches = fetches + 1\n data = client.get(data[\"next\"])\n results.extend(data[\"results\"])\n if is_max_date_gt(max_date, results[-1]['updated_at'][0:10]):\n return results\n if max_fetches and (fetches >= max_fetches):\n return results\n return results", "def paginate(cls, papers, page=0, limit=30):\n offset = page * limit\n end = offset + limit\n if offset > len(papers):\n return []\n if end > len(papers):\n return papers[offeset:]\n return papers[offset:end]", "def get_entities_independent_of_pages(self, first_page: dict[str, Any], limit: int, offset: int,\n resource: Optional[str] = None) -> list[dict]:\n resource = resource or self.get_management_resource()\n entities = first_page.get('value', [])\n next_page_url = first_page.get('nextLink')\n # more entities to get\n while next_page_url and len(entities) < offset + limit:\n response = self.http_request(\n 'GET', full_url=next_page_url, resource=resource)\n\n entities = entities + response.get('value', [])\n next_page_url = response.get('nextLink')\n if offset > len(entities):\n return []\n return entities[offset:limit + offset]", "def _GetAttributeContainers(\n self, container_type, callback=None, cursor=0, maximum_number_of_items=0):\n if not cursor:\n cursor = 0\n\n cursor, items = self._store.GetSerializedAttributeContainers(\n container_type, cursor, maximum_number_of_items)\n\n containers = []\n identifiers_to_delete = []\n for identifier_bytes, serialized_container in items.items():\n identifier_string = codecs.decode(identifier_bytes, 'utf-8')\n identifier = identifiers.RedisKeyIdentifier(identifier_string)\n identifiers_to_delete.append(identifier)\n\n container = self._DeserializeAttributeContainer(\n self._active_container_type, serialized_container)\n container.SetIdentifier(identifier)\n\n if callback:\n callback(self._storage_writer, container)\n\n containers.append(container)\n\n self._store.RemoveAttributeContainers(container_type, identifiers_to_delete)\n\n self._active_cursor = cursor\n containers = self._active_extra_containers + containers\n\n if maximum_number_of_items:\n self._active_extra_containers = containers[maximum_number_of_items:]\n\n return containers[:maximum_number_of_items]", "def __iter__(self):\n return self.paged()", "def _collect_results(self, request_method, request_args, request_kwargs={}, request_params={}):\n results = []\n cursor = None\n page_params = copy.copy(request_params)\n\n while True:\n if cursor:\n page_params['cursor'] = cursor\n response = request_method(\n *request_args,\n **request_kwargs,\n params=page_params\n )\n _raise_on_error(response)\n response_json = response.json()\n results.extend(response_json['results'])\n if response_json['next']:\n cursor = get_cursor_from_url(response_json['next'])\n else:\n return results", "def _run_query(self):\n self._search_query()\n logger.debug(\"Payload\")\n logger.debug(self._payload)\n _resp = query_public_ip_pools(self._payload)\n logger.debug(_resp)\n _resp = self.load_json(_resp)\n _ret_list = []\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list", "def paginate_queryset(self, queryset, request, view=None):\n self.count = self.get_count(queryset)\n self.start_index = 0\n self.end_index = self.start_index + self.page_size - 1\n\n # TODO: this logic is repeated below...\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n range_string = request.GET.get(self.range_query_param)\n\n if range_string:\n try:\n page_range = json.loads(range_string)\n except json.JSONDecodeError:\n return None\n\n if len(page_range) != 2:\n return None\n\n self.start_index, self.end_index = [pagination._positive_int(x) for x in page_range]\n\n if self.end_index > self.count - 1:\n self.end_index = self.count - 1 if self.count else 0\n\n if self.start_index > self.end_index:\n self.start_index = self.end_index\n\n return list(queryset[self.start_index:self.end_index + 1])", "def paginated_retrieval(methodname, itemtype):\n return compose(\n reusable,\n basic_interaction,\n map_yield(partial(_params_as_get, methodname)),\n )", "def _create_slices(chunk_size, id, reference_name, start, end):\n urls = []\n chunks = int( (end - start) / chunk_size )\n slice_start = start\n slice_end = 0\n if chunks >= 1 and start != None and end != None:\n for i in range(chunks):\n slice_end = slice_start + chunk_size\n _create_slice(urls, id, reference_name, slice_start, slice_end)\n slice_start = slice_end\n _create_slice(urls, id, reference_name, slice_start, end)\n else: # One slice only\n url = f\"http://{request.host}/data?id={id}\"\n if( reference_name is not None ):\n url += f\"&reference_name={reference_name}\"\n urls.append({ \"url\": url })\n\n return urls", "def _scrolling_request(self, path, method='GET', body=None, headers=None):\n assert 'pagination' in body\n paginated_view = body\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n\n scrolling = True\n while scrolling:\n response, content = super(DSBaseService, self)._request(url,\n method,\n body=str(paginated_view).replace(\"'\", '\"'),\n headers=headers)\n\n if int(response['status']) == 200:\n data = json.loads(content)\n offset = data['currentPage']['offset']\n size = data['currentPage']['size']\n total = data['total']\n if offset + size < total:\n paginated_view['pagination']['offset'] = offset + size\n else:\n scrolling = False\n yield data\n elif int(response['status']) == 429:\n # rate limited, wait before resuming scroll requests\n time.sleep(1)\n else:\n scrolling = False", "def get_items(id_name, request, client):\n result = client.quick_search(request)\n \n items_pages = []\n limit_to_x_pages = None\n for page in result.iter(limit_to_x_pages):\n items_pages.append(page.get())\n\n items = [item for page in items_pages for item in page['features']]\n \n \n return (id_name, items)", "def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]", "def slice(self, size=None, offset=None):\n offset = offset or 0\n size = size or len(self)\n\n data = []\n data_size = 0\n\n # dequeue chunks\n size += self.offset + offset\n while self.chunks:\n if data_size >= size:\n break\n chunk = self.chunks.popleft()\n data.append(chunk)\n data_size += len(chunk)\n\n # re-queue merged chunk\n data = b''.join(data)\n self.chunks.appendleft(data)\n\n return data[self.offset + offset:size]", "def paginated(self) -> global___Snippet.Paginated:", "def list_subsets(self, workspace_unique_id=None, user_id=None, request=None):\n# print('list_subsets_request', request)\n subset_list = []\n# subset_uuid_list = [] \n# sub_request_list = []\n request_for_subset_uuid = self._get_mapping_for_name_in_dict('uuid', request)\n# subset_uuid_list.append(sub['uuid'])\n# sub_request_list.append(sub)\n# else: \n# subset_uuid_list = self.get_subset_list(workspace_unique_id=workspace_unique_id, user_id=user_id)\n# sub_request_list = [None]*len(subset_uuid_list)\n \n# for subset_uuid, sub_request in zip(subset_uuid_list, sub_request_list): \n# print('=====SUBSET_UUID=====')\n# print(workspace_unique_id)\n# print(user_id)\n# print(self.workspaces)\n# print('=====================')\n for subset_uuid in self.get_subset_list(workspace_unique_id=workspace_unique_id, user_id=user_id):\n print('=====SUBSET_UUID', '\"{}\"'.format(subset_uuid))\n sub_request = request_for_subset_uuid.get(subset_uuid, {})\n \n # Check uuid for subset in request (if given) \n# if request:\n# for sub in request:\n# # print(sub)\n# if sub['uuid'] == subset_uuid:\n# break\n \n # Get subset dict\n subset_dict = self.dict_subset(workspace_unique_id=workspace_unique_id, \n subset_unique_id=subset_uuid, \n request=sub_request)\n \n \n \n # Add subset dict to subset list\n subset_list.append(subset_dict)\n \n return subset_list", "def __getitem__(self, index: int) -> Optional[dict]:\n # if isinstance(index, slice) is True:\n\n # data_slice = index\n # start = data_slice.start\n # stop = data_slice.stop\n # step = data_slice.step\n\n # first_item_page = (start // 100) + 1\n # end_item_page = (stop // 100) + 1\n\n # all_data: List[dict] = []\n\n # for page_number in range(first_item_page, end_item_page+1):\n\n # # create url to query\n # params = {\"page\": items_page}\n # url = add_query_params(self.url, params)\n\n # data, _ = self.retrieve_data(url)\n\n # all_data += data\n\n # first_page_index = start % 100\n\n # needed_data = []\n # for index in range(start, stop, step):\n # needed_data.append(all_data[index])\n\n # return needed_data\n\n \n # get the page the item is on\n items_page = (index // 100) + 1\n\n # create url to query\n params = {\"page\": items_page}\n url = add_query_params(self.url, params)\n\n data, _, result = self.retrieve_data(url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Unable to get item from the api\")\n return None\n\n # get the position of data on the page\n page_index = index % 100\n\n try:\n return data[page_index]\n except KeyError as e:\n raise KeyError(\"Data does not exists for that index\") from e", "def get_container(url, token, container, marker=None, limit=None,\n prefix=None, delimiter=None, end_marker=None,\n version_marker=None, path=None, http_conn=None,\n full_listing=False, service_token=None, headers=None,\n query_string=None):\n close_conn = False\n if not http_conn:\n http_conn = http_connection(url)\n close_conn = True\n if full_listing:\n rv = get_container(url, token, container, marker, limit, prefix,\n delimiter, end_marker, version_marker, path=path,\n http_conn=http_conn, service_token=service_token,\n headers=headers)\n listing = rv[1]\n while listing:\n if not delimiter:\n marker = listing[-1]['name']\n else:\n marker = listing[-1].get('name', listing[-1].get('subdir'))\n version_marker = listing[-1].get('version_id')\n listing = get_container(url, token, container, marker, limit,\n prefix, delimiter, end_marker,\n version_marker, path, http_conn,\n service_token=service_token,\n headers=headers)[1]\n if listing:\n rv[1].extend(listing)\n return rv\n parsed, conn = http_conn\n cont_path = '%s/%s' % (parsed.path, quote(container))\n qs = 'format=json'\n if marker:\n qs += '&marker=%s' % quote(marker)\n if limit:\n qs += '&limit=%d' % limit\n if prefix:\n qs += '&prefix=%s' % quote(prefix)\n if delimiter:\n qs += '&delimiter=%s' % quote(delimiter)\n if end_marker:\n qs += '&end_marker=%s' % quote(end_marker)\n if version_marker:\n qs += '&version_marker=%s' % quote(version_marker)\n if path:\n qs += '&path=%s' % quote(path)\n if query_string:\n qs += '&%s' % query_string.lstrip('?')\n req_headers = {'X-Auth-Token': token, 'Accept-Encoding': 'gzip'}\n if service_token:\n req_headers['X-Service-Token'] = service_token\n if headers:\n req_headers.update(headers)\n method = 'GET'\n conn.request(method, '%s?%s' % (cont_path, qs), '', req_headers)\n resp = conn.getresponse()\n body = resp.read()\n if close_conn:\n conn.close()\n http_log(('%(url)s%(cont_path)s?%(qs)s' %\n {'url': url.replace(parsed.path, ''),\n 'cont_path': cont_path,\n 'qs': qs}, method,),\n {'headers': req_headers}, resp, body)\n\n if resp.status < 200 or resp.status >= 300:\n raise ClientException.from_response(resp, 'Container GET failed', body)\n resp_headers = resp_header_dict(resp)\n if resp.status == 204:\n return resp_headers, []\n return resp_headers, parse_api_response(resp_headers, body)", "def datasubset(loader, start, count, batch_size):\n # Note: start is the start index of batch, not image\n smaller_dataset = []\n end_idx = count / batch_size\n for batch_idx, (orig_images, labels) in enumerate(loader):\n if start <= batch_idx < end_idx:\n smaller_dataset.append((orig_images, labels))\n if batch_idx > end_idx:\n break\n return smaller_dataset", "def get_batch(self, idxs):\r\n return self.data[(self.start + idxs) % self.maxlen]", "def _get_item(self, index):\n data, label = self.data[index], self.label[index]\n coordmax = np.max(data, axis=0)\n coordmin = np.min(data, axis=0)\n nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/1.5).astype(np.int32)\n nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/1.5).astype(np.int32)\n batch_data, batch_label = [], []\n for i in range(nsubvolume_x):\n for j in range(nsubvolume_y):\n curmin = coordmin + [i*1.5, j*1.5, 0]\n curmax = coordmin+ [(i+1)*1.5, (j+1)*1.5, coordmax[2]-coordmin[2]]\n crop_ids = np.sum((data>=(curmin-0.2)) * (data<=(curmax+0.2)), axis=1) == 3\n if sum(crop_ids) == 0: continue\n crop_data = data[crop_ids]\n crop_label = label[crop_ids]\n mask = np.sum((crop_data>=(curmin-0.001)) * (crop_data<=(curmax+0.001)), axis=1) == 3\n ids = np.random.choice(crop_label.size, self.npoints, replace=True)\n this_data = crop_data[ids]\n this_label = crop_label[ids]\n this_mask = mask[ids]\n if sum(this_mask) * 1. / this_mask.size < 0.01: continue\n this_label *= this_mask\n if self.normalize:\n this_data = utils.normalize_point_cloud(this_data)\n batch_data.append(this_data[None,:,:])\n batch_label.append(this_label[None,:])\n batch_data = np.concatenate(tuple(batch_data), axis=0)\n batch_label = np.concatenate(tuple(batch_label), axis=0)\n return batch_data, batch_label", "def slice_value(*, value : Any, stop : Optional[int] = None, start : Optional[int] = None, step : Optional[int] = None) -> Any:\n slice_object = slice(start, stop, step)\n return value[slice_object]", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def GetNextBatchOfResults(self) -> typing.List[Repository.Repository]:\n while True:\n try:\n logging.debug('Requesting page %d', self.next_page_num)\n page = list(self.query.get_page(self.next_page_num))\n logging.debug('Page %d contains %d results', self.next_page_num,\n len(page))\n self.next_page_num += 1\n return page\n except github.RateLimitExceededException:\n logging.debug('Pausing on GitHub rate limit')\n time.sleep(3)\n except github.GithubException:\n # One possible cause for this exception is when trying to request\n # a page beyond 1000 results, since GitHub only returns the first\n # 1000 results for a query.\n return []", "def get_series(self, page=0, filters=''):", "def _paginate(cls, context, query):\n marker = int(context.marker or 0)\n limit = int(context.limit or CONF.metadatas_page_size)\n # order by 'updated DESC' to show the most recent metadatas first\n query = query.order_by(desc(DBMetadata.updated))\n # Apply limit/offset\n query = query.limit(limit)\n query = query.offset(marker)\n # check if we need to send a marker for the next page\n if query.count() < limit:\n marker = None\n else:\n marker += limit\n return query.all(), marker", "def get_microbatch(batch: Dict[str, jnp.ndarray],\n idx: int) -> Dict[str, jnp.ndarray]:\n offset = idx * microbatch_size\n length = microbatch_size\n starts = {k: [offset] + [0] * (b.ndim - 1) for k, b in batch.items()}\n limits = {k: [length] + list(b.shape[1:]) for k, b in batch.items()}\n return {\n k: jax.lax.dynamic_slice(b, starts[k], limits[k])\n for k, b in batch.items()\n }", "def extract_mongodb(client, dbs, coll, initial_id=None, extract_by_batch=None): \n with client:\n db=client[dbs]\n fetch_before=db[coll].find()\n fetch=db[coll].find()\n list_of_docs=[]\n count=0\n if initial_id is not None: # determine which row to start \n for doc in fetch_before:\n count+=1\n if initial_id == None:\n count=0\n break\n if initial_id == doc['_id']:\n break\n\n if extract_by_batch is None and initial_id is None:\n for docs in fetch:\n docs['_id']=str(docs['_id'])\n list_of_docs.append(docs) \n print('extract all')\n elif extract_by_batch is None and initial_id is not None:\n for docs in islice(fetch, count):\n docs['_id']=str(docs['_id'])\n list_of_docs.append(docs) \n print('extract all start at {}'.format(count))\n elif extract_by_batch is not None and initial_id is None:\n for docs in islice(fetch, 0, count+extract_by_batch):\n docs['_id']=str(docs['_id'])\n list_of_docs.append(docs) \n print('extract_by_batch {} at {}'.format(extract_by_batch, count))\n elif extract_by_batch is not None and initial_id is not None:\n for docs in islice(fetch, count, count+extract_by_batch):\n docs['_id']=str(docs['_id'])\n list_of_docs.append(docs) \n print('extract_by_batch {} at {}'.format(extract_by_batch, count))\n print(len(list_of_docs),\"'s rows from {} is being extract'\".format(coll))\n del fetch_before, fetch\n return list_of_docs", "def _get_allpages(self, url:str, paramsdict:Dict[str,str]):\n r1 = self._get_dict_from_url(url, paramsdict)\n r = [r1]\n #display(r)\n if 'total_pages' in r1:\n # print('more than one page')\n for next_page in range(2, r1['total_pages']+1):\n # print(f\"load page {next_page} \")\n r.append(self._get_dict_from_url(url, {**paramsdict, 'page':next_page}))\n # print(len(r))\n # print([len(rx['results']) for rx in r])\n results = [entry for rx in r for entry in rx['results'] ]\n\n return results", "def _run_query(self):\n self._search_query()\n logger.debug(\"Payload\")\n logger.debug(self._payload)\n _resp = query_public_ip_pool_detail(self._payload)\n logger.debug(_resp)\n _resp = self.load_json(_resp)\n _ret_list = []\n if _resp is None:\n self._record_total = self._record_filtered = 0\n return []\n _ret_list = _resp[\"ret_set\"]\n self._record_filtered = self._record_total = _resp.get(\"total_count\") or 100\n return _ret_list", "def _retrieve_data(keyw, limit, page=1):\n # Max results per page is 100\n per_page = limit if limit < 100 else 100\n url = BASE_URL + QUALIFIERS % (keyw, per_page, page)\n\n req = requests.get(url)\n r_json = req.json()\n\n if limit > 100:\n r_json['items'].extend(_retrieve_data(keyw, limit - 100, page + 1).\n get('items', []))\n\n return r_json", "def _granule_samples(found_collections, filters, limit, config):\n found_granules = []\n for concept in found_collections:\n query = {\"concept_id\": concept}\n granules = search(query, filters=filters, limit=limit, config=config)\n found_granules.extend(granules)\n return found_granules[:len(found_collections)*limit]", "def GetContainerObjects(self, uri, container, limit=-1, marker=''):\n self.apihost = uri\n urioptions = '/' + container + '?format=json'\n if not limit is -1:\n urioptions += '&limit=%d' % limit\n if len(marker):\n urioptions += '&marker=%s' % marker\n self.ReInit(self.sslenabled, urioptions)\n self.headers['X-Auth-Token'] = self.authenticator.AuthToken\n self.headers['Content-Type'] = 'text/plain; charset=UTF-8'\n self.log.debug('uri: %s', self.Uri)\n self.log.debug('headers: %s', self.Headers)\n res = requests.get(self.Uri, headers=self.Headers)\n if res.status_code == 200:\n # We have a list in JSON format\n return res.json()\n elif res.status_code == 204:\n # Nothing left to retrieve\n return {}\n else:\n # Error\n self.log.error('Error retrieving list of containers: (code=' + str(res.status_code) + ', text=\\\"' + res.text + '\\\")')\n return {}", "def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;", "def get_all(self, start_at, limit, order=None):\n result = []\n objects = []\n if limit == 0:\n objects = self.items[start_at:]\n else:\n objects = self.items[start_at:(start_at + limit)]\n for item in objects:\n result.append(FileDict(item))\n return result", "def fetch_paginated_data(url):\n data = []\n while url:\n response = requests.get(url)\n response_json = response.json()\n data.extend(response_json[\"results\"])\n url = response_json[\"next\"]\n return data", "def get_next_batch(self):\n url_list = self.create_url_batch(self.start_url, self.batch_size,\n self.params)\n pages = self.get_pages(url_list)\n adjustment_factor = self.get_adjustment_factor(pages, self.page_size,\n self.item_key)\n self.adjust_batch_size(adjustment_factor)\n items = self.sorted_items_from_pages(pages, self.item_key,\n self.sort_key)\n items = self.remove_duplicate_items(items, self.prior_batch_ids)\n try:\n last_item_timestamp = items[-1]['created_at']\n except IndexError:\n time.sleep(3)\n return []\n self.params[\"since\"] = last_item_timestamp\n self.prior_batch_ids = set([x[\"id\"] for x in items])\n return items", "def page(self):\r\n limit = self.get_limit()\r\n offset = self.get_offset()\r\n count = self.get_count()\r\n objects = self.get_slice(limit, offset)\r\n meta = {\r\n 'offset': offset,\r\n 'limit': limit,\r\n 'total_count': count}\r\n\r\n if limit:\r\n meta['previous'] = self.get_previous(limit, offset)\r\n meta['next'] = self.get_next(limit, offset, count)\r\n\r\n return {\r\n self.collection_name: objects, 'meta': meta}", "def nest_queryset(nest_size, queryset):\n nested_items = []\n for index, item in enumerate(queryset, 1):\n if (index + nest_size) % nest_size == 1:\n nested_items.append([])\n nested_items[index // nest_size].append(item)\n else:\n nested_index = index // nest_size - 1 \\\n if index // nest_size == 1 else index // nest_size\n nested_items[nested_index].append(item)\n return nested_items", "def _chunked_query(self, model_class, chunk_field, items, chunk_size=500, **kwargs):\r\n res = chain.from_iterable(\r\n self._query(model_class, **dict([(chunk_field, chunk)] + kwargs.items()))\r\n for chunk in chunks(items, chunk_size)\r\n )\r\n return res", "def paging_command(limit: int | None, page_size: Union[str, None, int], page_number: str | None, func_command,\n page_size_def='50', **kwargs) -> tuple[Any, Union[list, Any]]:\n response = {}\n items = []\n if not limit:\n page_size = page_size or page_size_def\n response = func_command(page_size, page_number, **kwargs)\n items = response.get('items', [])\n else:\n if page_number or page_size:\n raise DemistoException(\"Can't supply limit and page number/page size\")\n page_size = page_size if limit > 100 else limit\n total = 0\n while total < limit:\n response = func_command(page_size, page_number, **kwargs)\n items += response.get('items', [])\n if not response.get('hasNext'):\n break\n total += len(response.get('items', []))\n page_number = response.get('pageNumber', 0) + 1\n items = items[:limit]\n\n return response, items", "def fetch(self, platform=None):\n params = {\n \"platforms\": platform,\n \"offset\": 0\n }\n while(True):\n response = super(GiantBombFeed, self).fetch(params)\n for result in response[\"results\"]:\n yield self.parse(result)\n if response[\"number_of_page_results\"] < self.results_per_page:\n break\n params[\"offset\"] += self.results_per_page", "def getAllListPage():\n firstPage = city + '/line1'\n data = urlopen(firstPage).read().decode('gbk')\n urlList = getLineTypeList(data)\n urlList.append(firstPage)\n num = len(urlList)\n i = 0\n p = Pool(processes=4)\n pageData = p.map(readData, urlList)\n# manager = Manager()\n# pageData = manager.list()\n# while i < num:\n# procline = Process(target=readData, args=(urlList[i], pageData,))\n# procline.start()\n# procline.join()\n# i += 1\n return pageData", "def get_resources(resource_client) -> list:\n resource_list = []\n paginator = resource_client.get_paginator(BOTO3_LIST_FUNCTION)\n pages = paginator.paginate()\n for page in pages:\n # Your going to have to look through the response and append the correct value to the list\n resource = page[\"something\"]\n resource_list = resource_list + resource\n return resource_list", "def iter_call(self, service, method,\r\n chunk=100, limit=None, offset=0, *args, **kwargs):\r\n if chunk <= 0:\r\n raise AttributeError(\"Chunk size should be greater than zero.\")\r\n\r\n if limit:\r\n chunk = min(chunk, limit)\r\n\r\n result_count = 0\r\n kwargs['iter'] = False\r\n while True:\r\n if limit:\r\n # We've reached the end of the results\r\n if result_count >= limit:\r\n break\r\n\r\n # Don't over-fetch past the given limit\r\n if chunk + result_count > limit:\r\n chunk = limit - result_count\r\n results = self.call(service, method,\r\n offset=offset, limit=chunk, *args, **kwargs)\r\n\r\n # It looks like we ran out results\r\n if not results:\r\n break\r\n\r\n # Apparently this method doesn't return a list.\r\n # Why are you even iterating over this?\r\n if not isinstance(results, list):\r\n yield results\r\n break\r\n\r\n for item in results:\r\n yield item\r\n result_count += 1\r\n\r\n offset += chunk\r\n\r\n if len(results) < chunk:\r\n break" ]
[ "0.6219603", "0.5777015", "0.56956834", "0.5685915", "0.5678526", "0.56506056", "0.5633514", "0.5525495", "0.54748476", "0.5472662", "0.5422143", "0.54113364", "0.539315", "0.5385653", "0.53844", "0.5360519", "0.5335842", "0.5334678", "0.53321403", "0.5319961", "0.5312008", "0.5305421", "0.5286904", "0.527561", "0.52733314", "0.52642775", "0.52572876", "0.52335685", "0.52296615", "0.52273077", "0.5224108", "0.5217166", "0.518623", "0.5182475", "0.51822346", "0.51796895", "0.51739365", "0.5168888", "0.51666915", "0.5160576", "0.51383764", "0.5131621", "0.51249224", "0.51213235", "0.51185864", "0.5117121", "0.5110393", "0.5101814", "0.50944453", "0.50944453", "0.50752074", "0.50689656", "0.50671667", "0.50553066", "0.5048779", "0.50389415", "0.502946", "0.50270885", "0.5024029", "0.50161684", "0.50008136", "0.49985012", "0.49976528", "0.49947977", "0.49854153", "0.49850026", "0.4977404", "0.4970656", "0.49705347", "0.496869", "0.49620005", "0.4952537", "0.49519727", "0.49358672", "0.4930869", "0.49306667", "0.49272636", "0.49196908", "0.4916772", "0.49136996", "0.4896107", "0.48953936", "0.4893928", "0.4892533", "0.4888837", "0.48883256", "0.48814395", "0.4878283", "0.48781663", "0.48750123", "0.4865699", "0.48551896", "0.48549986", "0.4853544", "0.48490638", "0.4847355", "0.48426163", "0.48418492", "0.48390788", "0.4838589" ]
0.56218
7
Parse arguments from commandline RETURNS
def parseArgs(): parser = ap.ArgumentParser() parser.add_argument("-cf", "--controlfile", metavar="FILE", type=str, help="path to control file") args = parser.parse_args() controlfile = args.controlfile return controlfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def parse(self, command_line=sys.argv[1:]):\n return self._parser.parse_args(command_line)", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def parse_arguments(self,parser):\r\n return parser.parse_args()", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_cmdline_args():\n parser = argparse.ArgumentParser(description=\"Guesses the functional element for host.\")\n ##\n ## Internal options\n ##\n parser.add_argument(\"--json\", dest=\"json\", action='store_true', help=\"output in JSON\")\n\n ##\n ## PuppetDB options\n ##\n pdbconf = PdbConfig()\n pdbconf.add_standard_args(parser)\n\n parser.add_argument(\"host\", metavar=\"HOST\",\n help=\"hostnames to query for FE\")\n\n return parser.parse_args()", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments", "def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()", "def parse_args(args=None):\n\t\treturn _get_args_parser().parse_args(args)", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def parse():\n\n args = sys.argv\n if os.name == 'nt' and args and 'python' in os.path.basename(args[0]).lower():\n args = args[2:]\n else:\n args = args[1:]\n args = vars(parser.parse_args(args))\n \n # set the global verbosity level of the script\n script.set_verbosity(args['verbosity']) \n \n return args", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Changes a lammps data file by implementing options such as: '\n 'reorder atom ids in a lammps data file, given a dictionary to '\n 'reorder the atoms (a csv of old_index,new_index), and/or '\n 'change the atom, bond, angle, dihedral, and/or improper types,'\n 'given a dictionary to do so. Can also '\n 'print info for selected atom ids. ')\n parser.add_argument(\"-c\", \"--config\", help=\"The location of the configuration file in ini format.\"\n \"The default file name is {}, located in the \"\n \"base directory where the program as run.\".format(DEF_CFG_FILE),\n default=DEF_CFG_FILE, type=read_cfg)\n args = None\n try:\n args = parser.parse_args(argv)\n except IOError as e:\n warning(\"Problems reading file:\", e)\n parser.print_help()\n return args, IO_ERROR\n except (InvalidDataError, KeyError, MissingSectionHeaderError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "def parse_arguments(p_args):\n\tresult = 0\n\tcommand = None\n\targument = None\n\n\tif len(p_args) < 2:\n\t\tprint(\"Incorrect arguments.\")\n\t\tprint_help()\n\n\telif p_args[1] not in ['interrogate']:\n\t\tprint(\"Incorrect arguments.\")\n\t\tprint_help()\n\n\telse:\n\t\tresult = 1\n\t\tcommand = p_args[1]\n\n\treturn (result, command, argument)", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='For each timestep, gather the energy information output by LAMMPS '\n 'in the log file.')\n parser.add_argument(\"-f\", \"--file\", help=\"The log file to be processed.\",\n default=None)\n parser.add_argument(\"-l\", \"--list_file\", help=\"The a file with a list of log files to be processes.\",\n default=None)\n args = None\n try:\n args = parser.parse_args(argv)\n if args.file is None:\n args.file_list = []\n else:\n if os.path.isfile(args.file):\n args.file_list = [args.file]\n args.source_name = args.file\n else:\n raise IOError(\"Could not find specified log file: {}\".format(args.file))\n if args.list_file is not None:\n args.file_list += file_rows_to_list(args.list_file)\n args.source_name = args.list_file\n if len(args.file_list) < 1:\n raise InvalidDataError(\"Found no log file names to process. Specify one or more files as specified in \"\n \"the help documentation ('-h').\")\n except IOError as e:\n warning(\"Problems reading file:\", e)\n parser.print_help()\n return args, IO_ERROR\n except (KeyError, InvalidDataError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def _parse_args():\n args = sys.argv[1:]\n cmd_parser = argparse.ArgumentParser()\n cmd_parser.add_argument(\n '--produce-sub',\n dest='produce_sub',\n help='Produce submision file',\n default=False,\n action='store_true',\n )\n cmd_parser.add_argument(\n '--search-cv',\n dest='search_cv',\n help='Perform Search of parameters',\n default=False,\n action='store_true',\n )\n cmd_opts = cmd_parser.parse_args(args=args)\n return cmd_opts", "def _parse_args(argv):\n parser = make_parser()\n args = parser.parse_args(argv)\n LOGGER.setLevel(to_log_level(args.loglevel))\n\n if not args.inputs:\n if args.list:\n tlist = \", \".join(API.list_types())\n _exit_with_output(\"Supported config types: \" + tlist)\n elif args.env:\n cnf = os.environ.copy()\n _output_result(cnf, args.output, args.otype or \"json\", None, None)\n sys.exit(0)\n else:\n parser.print_usage()\n sys.exit(1)\n\n if args.validate and args.schema is None:\n _exit_with_output(\"--validate option requires --scheme option\", 1)\n\n return args", "def parse_args(self):\n return self.__process_args__(self.parser.parse_args())", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def parse_args():\n p = argparse.ArgumentParser(\n description='Parse system logs, for fun or something')\n p.add_argument('-l', '--log', dest='log_file', help='The log file')\n p.add_argument('-f', '--filter', dest='filter', help='filter by daemon')\n return p.parse_args()", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def _parse_arguments(text):\n parser = argparse.ArgumentParser(\n description=\"Build Python-based Rez packages in just a single command.\",\n )\n\n parser.add_argument(\n \"--hdas\",\n nargs=\"+\",\n help=\"The relative paths to each folder containing VCS-style Houdini HDAs.\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--items\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to copy / install.\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--eggs\",\n nargs=\"+\",\n help=\"The relative paths to each file/folder to make into a .egg file.\",\n )\n\n parser.add_argument(\n \"--symlink\",\n action=\"store_true\",\n default=linker.must_symlink(),\n help=\"If True, symlink everything back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-files\",\n action=\"store_true\",\n default=linker.must_symlink_files(),\n help=\"If True, symlink files back to the source Rez package.\",\n )\n\n parser.add_argument(\n \"--symlink-folders\",\n action=\"store_true\",\n default=linker.must_symlink_folders(),\n help=\"If True, symlink folders back to the source Rez package.\",\n )\n\n known, _ = parser.parse_known_args(text)\n\n return known", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"script for downloading and merging log files from S3 for particular time period\")\n parser.add_argument(\"-s\", \n \"--startdate\", \n help=\"start date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-e\", \"--enddate\", \n help=\"end date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-f\", \n \"--file\", \n help=\"destination file\", \n required=True)\n parser.add_argument( \"-c\", \"--config\",\n default=\"/Users/samarius/.get_analytics_log.config.json\",\n help=\"configuration file path\")\n\n\n try:\n args = parser.parse_args()\n return args\n except Exception as e:\n print \"can't parse command line args: {}\".format(repr(e))\n raise", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\",\n help=\"Print lots of debugging statements\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.ERROR,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.INFO,\n )\n parser.add_argument(\"runscript\", default=None)\n return parser.parse_args()", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n\n # add these command line arg options\n parser.add_argument(\"departure_date\", help=\"Provide departure date in MM/DD/YYYY\")\n parser.add_argument(\"return_date\", help=\"Provide return date in MM/DD/YYYY\")\n parser.add_argument(\"departure_airport\", help=\"Provide airport code, e.g. BWI\")\n parser.add_argument(\"return_airport\", help=\"Provide airport code, e.g. ICN\")\n\n # parse these command line options\n arg = parser.parse_args()\n\n departure_date = arg.departure_date\n return_date = arg.return_date\n departure_airport = arg.departure_airport\n return_airport = arg.return_airport\n\n return departure_date, return_date, departure_airport, return_airport", "def parse_command_line():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter,\n epilog=\"For a list of table formats check this page: \"\n \"https://github.com/astanin/python-tabulate#table-format\"\n )\n requir = parser.add_argument_group(\"required arguments\")\n requir.add_argument(\"-f\", \"--find\",\n required=True,\n help=\"Search string to identify\"\n )\n requir.add_argument(\"-k\", \"--client_id\",\n required=True,\n help=\"CrowdStrike API client ID\"\n )\n requir.add_argument(\"-s\", \"--client_secret\",\n required=True,\n help=\"CrowdStrike API client secret\"\n )\n parser.add_argument(\"-r\", \"--reverse\",\n help=\"Reverse the sort.\",\n default=False,\n action=\"store_true\"\n )\n parser.add_argument(\"-t\", \"--types\",\n help=\"Types to search (indicator, report or actor). Comma delimited.\"\n )\n parser.add_argument(\"-tf\", \"--table_format\",\n help=\"Set the table format.\",\n default=\"fancy_grid\"\n )\n parser.add_argument(\"-o\", \"--output_prefix\",\n help=\"Output filename prefix for storing results (CSV format).\",\n default=None\n )\n\n parsed = parser.parse_args()\n allow = [\"indicator\", \"report\", \"actor\"]\n parsed.types = [t for t in parsed.types.split(\",\") if t in allow] if parsed.types else allow\n\n return parsed", "def command_line_parse(iargs=None):\n\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n\n return inps", "def _parse_args():\n usage = \"usage: %prog [options] arg1 arg2\"\n parser = optparse.OptionParser()\n parser.add_option(\n '--platform', dest='platform', default=\"\", type = \"string\",\n help='platform name: UC 360 baidu etc.')\n parser.add_option(\n '--workspace', dest='workspace', default=\"./\", type = \"string\",\n help='project directory.')\n parser.add_option(\n '--project', dest='projectDir', default=\"./destProject\", type = \"string\",\n help='project directory.')\n # parser.add_option(\n # \"-t\", dest=\"test\", action=\"store_const\", const=lambda:_test, default=_test2, help=\"////////////\"\n # )\n options, args = parser.parse_args()\n # positional arguments are ignored\n return options, args", "def parse_args():\n parser = argparse.ArgumentParser(description='Extract left-turn speed data CSV files from Excel')\n parser.add_argument('veh_conflict_data', type=str, help='Excel file with all veh conflicts data')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"plumes\", help=\"path to input plume file\")\n parser.add_argument(\"output\", help=\"path to output plume file\")\n parser.add_argument(\"-r\", \"--radius\", required=True,\n help=\"radius (meters) for nearest neighbor clustering\")\n parser.add_argument(\"-v\", \"--visualize\", action='store_true',\n help=\"Show plot of points/clusters (default=no plot)\")\n args = parser.parse_args()\n return args.plumes, args.output, float(args.radius), args.visualize", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def parse_arguments(raw=None):\n args = argument_parser().parse_args(raw)\n return args", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 4:\n print(\"[ERR] Invalid number of command line arguments!\")\n print(len(sys.argv))\n print(sys.argv[:])\n _usage()\n sys.exit(1)\n\n # Check if lis.config template exists.\n lis_config_template = sys.argv[1]\n if not os.path.exists(lis_config_template):\n print(f\"[ERR] {lis_config_template} does not exist!\")\n sys.exit(1)\n\n # Check if directory for restart files exists. Actual restart file\n # shall be checked later.\n restart_dir = sys.argv[2]\n if not os.path.exists(restart_dir):\n print(f\"[ERR] Directory {restart_dir} does not exist!\")\n sys.exit(1)\n\n # Get start date of new LIS run.\n yyyymmdd = sys.argv[3]\n if len(yyyymmdd) != 8:\n print(\"[ERR] Invalid length for YYYYMMDD, must be 8 characters!\")\n sys.exit(1)\n year = int(yyyymmdd[0:4])\n month = int(yyyymmdd[4:6])\n day = int(yyyymmdd[6:8])\n try:\n startdate = datetime.date(year, month, day)\n except ValueError:\n print(\"[ERR] Invalid YYYYMMDD passed to script!\")\n sys.exit(1)\n\n return lis_config_template, restart_dir, startdate", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--thoughtspot_host\", required=True,\n help=\"domain or ip. E.g. http://1.1.1.1\")\n parser.add_argument(\"-u\", \"--username\", required=True,\n help=\"username - must have administrative privileges\")\n parser.add_argument(\"-p\", \"--password\", required=True,\n help=\"password - must have administrative privileges\")\n parser.add_argument(\"-d\", \"--delimiter\", default=',',\n help=\"character to seperate values by. Default to comma\")\n parser.add_argument(\"-c\", \"--csv\", action=\"store_true\",\n help=\"create csv file called permissions.csv\")\n parser.add_argument(\"-s\", \"--share\", action=\"store_true\",\n help=\"output usable format for share api\")\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"CUDAPOA Python API sample program.\")\n parser.add_argument('-m',\n help=\"Run MSA generation. By default consensusis generated.\",\n action='store_true')\n parser.add_argument('-p',\n help=\"Print output MSA or consensus for each POA group.\",\n action='store_true')\n parser.add_argument('-l',\n help=\"Use long or short read sample data.\",\n action='store_true')\n return parser.parse_args()", "def _parse_arguments():\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n args, unknown = parser.parse_known_args()\n return args, unknown", "def parseArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_folder',\n help='Path of the folder where output files should be written.')\n parser.add_argument('--partition_id',\n help='ID of the computer partition to collect data from.')\n parser.add_argument('--collector_db',\n help='The path of slapos collect database.')\n\n return parser.parse_args()", "def parseArgs():\n # Configure the option parser for CLI options to the script\n usage = \"usage: %prog [options] userName password configlet xlfile\"\n parser = argparse.ArgumentParser(description=\"Excel File to JSON Configlet Builder\")\n parser.add_argument(\"--userName\", help='Username to log into CVP')\n parser.add_argument(\"--password\", help='Password for CVP user to login')\n parser.add_argument(\"--target\", nargs=\"*\", metavar='TARGET', default=[],\n help='List of CVP appliances to get snapshot from URL,URL')\n parser.add_argument(\"--snapshot\", help='CVP Snapshot containing Show Inventory and Show LLDP neighbor data')\n parser.add_argument(\"--opticType\", default='PSM4', help=\"Optic Type to look for\")\n parser.add_argument(\"--verbose\", default=False, help='Return more information to the command line')\n args = parser.parse_args()\n return checkArgs( args )", "def Args(parser):", "def parse_args():\n\n parser = argparse.ArgumentParser(description='Disk metric sender')\n parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')\n parser.add_argument('--debug', action='store_true', default=None, help='Debug?')\n\n return parser.parse_args()", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)", "def parse_cmdline(argv):\n\n parser = argparse.ArgumentParser(description='Print information about probable hung attempts')\n parser.add_argument('--des_services', action='store', help='')\n parser.add_argument('--section', action='store',\n help='Must be specified if not set in environment')\n parser.add_argument('--days', action='store', default=7, help='finished in last X days')\n parser.add_argument('--file', action='store')\n\n args = vars(parser.parse_args(argv)) # convert to dict\n\n return args", "def parse_arguments():\n parser = ArgumentParser()\n\n # For development/testing\n parser.add_argument(\"--dev\", help=\"run the code of the developers tag\")\n\n return parser.parse_args()", "def parse_cli():\n parser = OptionParser()\n return parser.parse_args()", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def parse_command_line():\n parser = argparse.ArgumentParser(prog='scoring')\n parser.add_argument(\"pdb_list\", help=\"list of PDB structures\")\n script_args = parser.parse_args()\n return script_args", "def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev", "def parse_args():\n\n parser = argparse.ArgumentParser(description=\"Benchmark Thing WoT server\")\n parser = utils.extend_server_arg_parser(parser)\n\n return parser.parse_args()", "def parse_args():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-i', '--infile', type=is_valid_file, action=FullPaths,\n metavar='FILE', required=True, help='''Settings file'''\n )\n parser.add_argument(\n '-d', '--ddc_file', type=is_valid_file, action=FullPaths,\n metavar='FILE', default='ddc2_nios2_sw.elf',\n help='''DDC2 download file'''\n )\n parser.add_argument(\n '-t', '--time', type=int, metavar='INT', default=5,\n help='''Number of seconds to run DDC2'''\n )\n parser.add_argument(\n '-o', '--outfile', type=str, default='./data/test/test',\n metavar='FILE', required=False,\n help='''Output location of data (no need to include file extension)'''\n )\n parser.add_argument(\n '--live', action='store_true', default=False,\n help='''Live visualisation'''\n )\n parser.add_argument(\n '-v', '--verbose', action='store_true', default=False,\n help='''Verbose'''\n )\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def _parse_args(self, prepared_args):\n pass", "def parsare_argumente():\n for arg in sys.argv:\n if arg == \"-h\":\n display_usage()\n\n in_dir=\"input\"\n out_dir=\"output\"\n n=3\n timeout=10\n for arg in sys.argv[1:]:\n check = arg.split(\"=\")\n if len(check) < 2:\n print(\"invalid\")\n exit()\n if check[0] == \"if\":\n in_dir = ''.join(check[1:])\n elif check[0] == \"of\":\n out_dir = ''.join(check[1:])\n elif check[0] == 'n':\n try:\n n = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n elif check[0] == 't':\n try:\n timeout = int(''.join(check[1:]))\n except ValueError:\n print(\"nr invalid\")\n display_usage()\n\n return [in_dir, out_dir, n, timeout]", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def __parse_cmd_args():\n parser = argparse.ArgumentParser(description='Python Image Downloader.')\n parser.add_argument(\"-f\", \"--file\",\n help=\"Where the URL file is located.\")\n parser.add_argument(\"-d\", \"--dir\",\n help=\"Where the downloaded files are to be stored.\")\n args = parser.parse_args()\n return args", "def get_args(args):\n parser = parse_arguments()\n return parser.parse_args(args)", "def parse_args():\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Pilot kodi\")\n parser.add_argument(\"--server\",\"-s\",metavar=\"SERVER\",required=True,help=\"Server to connect to\")\n parser.add_argument(\"--port\",\"-p\",metavar=\"PORT\",default=8080,type=int,help=\"Port of the server\")\n parser.add_argument(\"--get-audio\",action=\"store_true\",help=\"Get audio\")\n parser.add_argument(\"--switch-audio\",action=\"store_true\",help=\"Switch Audio\")\n parser.add_argument(\"--valid-audios\",metavar=\"SUBTITLE\",default=[\"fre\",\"eng\"],type=lambda p:p.split(\",\"),help=\"Valid audios\")\n parser.add_argument(\"--get-subtitle\",action=\"store_true\",help=\"Get subtitle\")\n parser.add_argument(\"--toggle-subtitle\",action=\"store_true\",help=\"Toggle subtitles\")\n parser.add_argument(\"--switch-subtitles\",action=\"store_true\",help=\"Switch subtitles\")\n parser.add_argument(\"--valid-subtitles\",metavar=\"SUBTITLE\",default=[\"fre\"],type=lambda p:p.split(\",\"),help=\"Valid subtitles\")\n\n return parser.parse_args()", "def parse_and_validate_cmd_line():\n if len(sys.argv) != 4:\n print USAGE_STR.format(sys.argv[0])\n sys.exit()\n # attempt to parse the parameters tell the user and exit if we can't\n num_segments = parse_and_validate_num_segs(sys.argv[1])\n # try to parse numThreads\n num_threads = parse_and_validate_num_threads(sys.argv[2])\n # try to parse and test the data directory\n data_dir = parse_and_validate_data_dir(sys.argv[3])\n return num_segments, num_threads, data_dir", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def parse_arguments():\n\n text_folder = \"the unzipped Slack export directory\"\n text_remote_name = \"keep Slack file IDs instead of using the file names\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"folder\", help=text_folder)\n parser.add_argument(\"--remote-name\", help=text_remote_name,\n action=\"store_true\")\n\n arguments = parser.parse_args()\n return arguments", "def parse_arguments():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"postcode\", type=str, help=\"Postcode of current location\")\n parser.add_argument(\"-d\", \"--debug\", \n help=\"Turns on debug mode\", \n action='store_true')\n parser.add_argument(\"-fp\",\"--postcode-file\", \n default=\"postcodes_swift_sample.csv\",\n help=\"Location of Postcode CSV file (default postcodes_swift_sample.csv)\",\n type=str)\n parser.add_argument(\"-fb\",\"--pub-file\", \n default=\"pubnames_swift_sample.csv\",\n help=\"Location of Pub Postcode CSV file (default pubnames_swift_sample.csv)\",\n type=str)\n parser.add_argument(\"-l\",\"--limit\", \n default=10, \n help=\"Limit Number of Results (default 10)\",\n type=int)\n parser.add_argument(\"-m\",\"--max-distance\", \n default=50, \n help=\"Only return results less than this distance (default 50)\",\n type=int)\n return parser", "def _ParseCommandArguments():\n arg_parser = argparse.ArgumentParser()\n arg_parser.usage = __doc__\n\n arg_parser.add_argument('--download-dir',\n type=str,\n required=True,\n help='Directory into which corpora are downloaded.')\n arg_parser.add_argument('--build-dir',\n required=True,\n type=str,\n help='Directory where fuzzers were built.')\n args = arg_parser.parse_args()\n return args", "def argParse():\n p = ap.ArgumentParser()\n p.add_argument('field',\n help='Name of field')\n p.add_argument('telescope',\n help='Name of telescope',\n choices=['io', 'callisto', 'europa',\n 'ganymede', 'artemis', 'saintex',\n 'nites', 'rcos20'])\n p.add_argument('filt',\n help='Name of filter')\n return p.parse_args()", "def parse_args() -> tuple:\n operation = 0\n args = list()\n filter = tuple()\n suppress_errors = False\n\n for arg in argv[1:]:\n if arg.startswith('--mode='):\n operation = int(arg[7:])\n elif arg == '--suppress_errors':\n suppress_errors = True\n elif arg.startswith('--filter='):\n filter = tuple(remove_quotes(arg[9:]).split())\n else:\n args.append(remove_quotes(arg))\n\n if filter == tuple():\n filter = None\n\n return operation, suppress_errors, filter, args", "def parse_args():\n parser = argparse.ArgumentParser(description=\"evaluate the recovered derg by comparing with ground truth mapping file\")\n parser.add_argument(\"-mapping\", action=\"store\", dest=\"mapping_file\",\n required=True, help=\"path to proguard-generated mapping.txt\")\n parser.add_argument(\"-recovered_derg\", action=\"store\", dest=\"recovered_derg\",\n required=True, help=\"path to recovered derg\")\n parser.add_argument(\"-nice2predict_mapping\", action=\"store\", dest=\"nice2predict_mapping_file\",\n help=\"path to nice2predict-generated mapping.txt\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"report_dir\",\n default=\".\", help=\"directory of report files\")\n parser.add_argument(\"-report_name\", action=\"store\", dest=\"report_name\",\n default=DEFAULT_REPORT_NAME, help=\"name of report file\")\n parser.add_argument(\"-match_mode\", action=\"store\", dest=\"match_mode\",\n default=MATCH_MODE_EXACT, help=\"match mode\")\n\n options = parser.parse_args()\n print options\n return options", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"auth\",\n help=\"authentication string for Infermedica API: \"\n \"APP_ID:APP_KEY or path to file containing it.\")\n parser.add_argument(\"--model\",\n help=\"use non-standard Infermedica model/language, \"\n \"e.g. infermedica-es\")\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", dest=\"input_file\", help=\"input file or pattern\", default=\"\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output_file\", help=\"output file or pattern\", default=\"\")\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action='store_true')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action='store_true')\n parser.set_defaults(verbose=False)\n parser.set_defaults(debug=False)\n return parser.parse_args()", "def parse_cmdline(get_parser=get_parser_files):\n return get_parser().parse_args()", "def parse_args():\n if len(sys.argv) < REQUIRED_NUM_ARGS or len(sys.argv) > MAXIMUM_NUM_ARGS:\n error_quit(\"Incorrect number of arguments!\", 400)\n # Set port to DEFAULT if not specified as an arg. Otherwise, port = portarg.\n port = sys.argv[PORT_ARG_NUM] if len(sys.argv) == MAXIMUM_NUM_ARGS else DEFAULT_FTP_PORT\n port = validate_port(port)\n # Get host address and logfile name from args.\n host, log_file = sys.argv[HOST_ARG_NUM], sys.argv[LOG_ARG_NUM]\n return host, log_file, port", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"pop-nedry Win64 shellcode build script\"\n )\n\n parser.add_argument(\n '--url', type=str, required=True,\n help='URL for web page hosting the Nedry GIF'\n )\n\n return parser.parse_args()", "def ParseCommandLineArgs():\n\n theParameterManager = ParameterManager() \n \n # set input options\n parser = argparse.ArgumentParser(description='Command line input.')\n parser.add_argument(\"-i\",\"--input\",action=\"store\", help = \"Load XML input file\")\n parser.add_argument(\"-p\",\"--param\",action=\"append\", help = \"Add user-defined parameter\")\n \n rv = parser.parse_args()\n \n # strip out params\n if (rv.param):\n for param in rv.param:\n splt = string.split(param,\"=\",1)\n if (len(splt) < 2 ): \n raise BluecapError(\"Error: Input parameters should be in the form -p key=value \")\n name = splt[0].strip()\n paramString = splt[1]\n theParameterManager.SetParameter(name,paramString)\n \n \n # sanity checks\n if (not rv.input):\n print(\"Warning - failed to provide input file\")\n #raise IFDError(\"Input error\",\"Failed to provided input file\")\n \n \n return rv", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()", "def parse_args():\n\tparser = argparse.ArgumentParser(description='Show video statistics.')\n\tparser.add_argument('--sort', metavar='FIELD', choices=['views', 'likes', 'dislikes'],\n\t default='views',\n\t help='sort by the specified field. Options are views, likes and dislikes.')\n\tparser.add_argument('--max', metavar='MAX', type=int, help='show the top MAX entries only.')\n\tparser.add_argument('--csv', action='store_true', default=False,\n\t help='output the data in CSV format.')\n\tparser.add_argument('--table', action='store_true', default=False,\n\t help='output the data in an ascii table.')\n\tparser.add_argument('--workers', type=int, default=8,\n\t help='number of workers to use, 8 by default.')\n\treturn parser.parse_args()", "def parse_arguments(raw_args=sys.argv[1:]):\n parser = optparse.OptionParser(\n usage=\"usage: %prog [OPTIONS] DOMAIN_NAME DOMAIN_CONFIG_FILE\",\n description=\"A tool for provisioning a Khan Academy CloudSearch \"\n \"domain.\")\n\n parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"If specified, DEBUG messages will be printed and more \"\n \"information will be printed with each log message.\")\n\n parser.add_option(\"--leave-temp-dir\", action=\"store_true\", default=False,\n help=\"If specified, the created temporary directory will not be \"\n \"deleted when the script exits.\")\n\n parser.add_option(\"-n\", \"--dry-run\", action=\"store_true\", default=False,\n help=\"If specified, no commands will actually be executed.\")\n\n parser.add_option(\"--no-reindex\", action=\"store_true\", default=False,\n help=\"If specified, will only update the config, without reindexing.\")\n\n options, args = parser.parse_args(raw_args)\n\n if len(args) != 2:\n parser.error(\"You must specify the name of the domain and a file \"\n \"containing the domain configuration.\")\n\n return (options, args[0], args[1])", "def parse_cli():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"ENV\", help=\"Enviorment SCANNER, PC, REMOMTE\")\n args = parser.parse_args()\n\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Scraper')\n parser.add_argument('--prefix', help='Prefix for saving files', default=\"\")\n parser.add_argument('--path', help='Dir path', default=\"\")\n parser.add_argument('--urls_path', help='Url path', default=False)\n parser.add_argument('--url', help='Url', default=False)\n parser.add_argument('--disney', dest='disney', action='store_true', help=\"Choose all disney movies\")\n parser.add_argument('--ngram', help='Max ngram', default=2)\n\n args = parser.parse_args()\n return args", "def parse_command_line():\n\n parser=OptionParser(usage=\"%prog [options] \",\n description=\" updates tracker\" )\n parser.add_option(\"-c\", \"--candidate\", action=\"store\", type=\"string\",\n dest=\"candidate\", default=\"\", help=\"candidate name\")\n parser.add_option(\"-u\", \"--username\", action=\"store\", type=\"string\",\n dest=\"username\", default=\"gzhou\",\n help=\"username\")\n parser.add_option(\"-p\",\"--password\", action=\"store\",\n dest=\"password\", default=\"egghead\", help=\"password\")\n parser.add_option(\"-i\",\"--input\", action=\"store\",\n dest=\"input\", default=\"tracker_temp.txt\", help=\"Input file\") \n parser.add_option(\"-l\",\"--upload\", action=\"store\",\n dest=\"upload\", default=None, help=\"upload file\") \n parser.add_option(\"-d\",\"--description\", action=\"store\",\n dest=\"description\", default=None, help=\"descirption\") \n (options, args)=parser.parse_args()\n\n return options, args", "def parse_args():\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--p', dest='path_in',\n action='store', type=str, required=True, default='',\n help=\"Path relative to the data/ directory, to the input ATL01, ANC13, and ANC27 files.\")\n parser.add_argument('--atl01', dest='atl01_file',\n action='store', type=str, required=False, default=None,\n help=\"Path + filename to directory of the ATL01.\")\n parser.add_argument('--anc13', dest='anc13_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to outputs directory of the ANC13.\") \n parser.add_argument('--anc27', dest='anc27_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to directory of the ANC27.\")\n\n args = parser.parse_args()\n \n return args" ]
[ "0.85394055", "0.76848423", "0.76680905", "0.7588562", "0.75848377", "0.7569822", "0.75066495", "0.7421597", "0.7406338", "0.7390177", "0.7364632", "0.7349577", "0.7346166", "0.73298585", "0.7313611", "0.7307958", "0.7288206", "0.7281204", "0.72765344", "0.72620463", "0.72527874", "0.725069", "0.7243399", "0.7229168", "0.7213391", "0.72101057", "0.7207643", "0.7188848", "0.7187473", "0.71652865", "0.716509", "0.7163907", "0.71491253", "0.71399903", "0.71389276", "0.713777", "0.7121061", "0.7105738", "0.71029913", "0.71000004", "0.7095679", "0.70928603", "0.70873165", "0.70847195", "0.7084095", "0.70821524", "0.7080187", "0.70713294", "0.7057681", "0.7055876", "0.7052503", "0.70354027", "0.7032371", "0.7030975", "0.70224416", "0.7022409", "0.70220035", "0.7014969", "0.70139885", "0.701373", "0.70070803", "0.7002277", "0.70000714", "0.69916195", "0.69907546", "0.69899255", "0.6989713", "0.6981709", "0.6977807", "0.69716567", "0.69708604", "0.69694084", "0.6965893", "0.6958373", "0.6950843", "0.69501376", "0.6948842", "0.6946415", "0.69457626", "0.69433254", "0.694155", "0.69402736", "0.69382834", "0.69356394", "0.6935618", "0.6926642", "0.691796", "0.6913544", "0.69124824", "0.6912472", "0.69080514", "0.6905628", "0.6905216", "0.6902398", "0.6901325", "0.69003284", "0.6897022", "0.6896015", "0.68959415", "0.68927765", "0.68912864" ]
0.0
-1
Tests that multiple serial `get`s perform only a single actual call to batch_get
def test_batch_get_lazy_load(): t = VersionedTransaction(dict()) table_a = ItemTable("a") table_b = ItemTable("b") a1_k = dict(id="a1") a2_k = dict(id="a2") b1_k = dict(id="b1") a3_k = dict(id="a3") def triple_get(t: VersionedTransaction) -> VersionedTransaction: a1 = table_a.get(a1_k)(t) b1 = table_b.get(b1_k)(t) a2 = table_a.get(a2_k)(t) # all three of the above gets will be performed # together as a single call to batch_get. return table_a.put(dict(a3_k, items=[a1, b1, a2]))(t) calls = 0 a1 = dict(a1_k, i=6) a2 = dict(a2_k, i=8) b1 = dict(b1_k, j=4) def batch_get(item_keys_by_table_name): if not item_keys_by_table_name: return dict() nonlocal calls calls += 1 return dict(a=[a1, a2], b=[b1]) t = versioned_transact_write_items( triple_get, batch_get_item=batch_get, # type: ignore transact_write_items=lambda **_kw: None, ) assert calls == 1 assert table_a.require(a3_k)(t)["items"] == [a1, b1, a2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_batch(self):\n pass", "async def test_batch_get_data(container_requester):\n async with container_requester as requester:\n await requester(\n 'POST', '/db/guillotina', data=json.dumps({\n '@type': 'Item',\n 'id': 'foobar1'\n }))\n await requester(\n 'POST', '/db/guillotina', data=json.dumps({\n '@type': 'Item',\n 'id': 'foobar2'\n }))\n response, _ = await requester(\n 'POST',\n '/db/guillotina/@batch',\n data=json.dumps([{\n 'method': 'GET',\n 'endpoint': 'foobar1'\n }, {\n 'method': 'GET',\n 'endpoint': 'foobar2'\n }])\n )\n assert len(response) == 2\n assert response[1]['body']['@name'] == 'foobar2'", "def batch(self):\n return self._client.batch()", "def test_identity_multiple_batched(self, dev):\n qml.enable_tape()\n dev = qml.device(dev, wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n res = dev.batch_execute([tape1])\n assert len(res) == 1\n assert np.allclose(res[0], np.array([1, 1]))\n qml.disable_tape()", "def test_get_multi(self):\n\t\tyield self.conn.set(\"an_integer\", 42)\n\t\tyield self.conn.set(\"a_string\", \"hello\")\n\n\t\tres = yield self.conn.get_multi([ \"a_string\", \"an_integer\" ])\n\n\t\tself.assertEquals(res, { \"a_string\": \"hello\", \"an_integer\": 42 })", "def test_batch_execute_parallel(mock_run_batch):\n mock_run_batch.return_value = TASK_BATCH\n dev = _aws_device(wires=4, foo=\"bar\", parallel=True)\n assert dev.parallel is True\n\n with QuantumTape() as circuit:\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.probs(wires=[0])\n qml.expval(qml.PauliX(1))\n qml.var(qml.PauliY(2))\n qml.sample(qml.PauliZ(3))\n\n circuits = [circuit, circuit]\n batch_results = dev.batch_execute(circuits)\n for results in batch_results:\n assert np.allclose(\n results[0], RESULT.get_value_by_result_type(result_types.Probability(target=[0]))\n )\n assert np.allclose(\n results[1],\n RESULT.get_value_by_result_type(\n result_types.Expectation(observable=Observable.X(), target=1)\n ),\n )\n assert np.allclose(\n results[2],\n RESULT.get_value_by_result_type(\n result_types.Variance(observable=Observable.Y(), target=2)\n ),\n )\n assert np.allclose(\n results[3],\n RESULT.get_value_by_result_type(\n result_types.Sample(observable=Observable.Z(), target=3)\n ),\n )\n\n mock_run_batch.assert_called_with(\n [CIRCUIT, CIRCUIT],\n s3_destination_folder=(\"foo\", \"bar\"),\n shots=SHOTS,\n max_parallel=None,\n max_connections=AwsQuantumTaskBatch.MAX_CONNECTIONS_DEFAULT,\n poll_timeout_seconds=AwsQuantumTask.DEFAULT_RESULTS_POLL_TIMEOUT,\n poll_interval_seconds=AwsQuantumTask.DEFAULT_RESULTS_POLL_INTERVAL,\n foo=\"bar\",\n )", "def test_batch_execute_non_parallel(monkeypatch):\n dev = _aws_device(wires=2, foo=\"bar\", parallel=False)\n assert dev.parallel is False\n\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"batch_execute\", lambda self, circuits: 1967)\n res = dev.batch_execute([])\n assert res == 1967", "def test_get_multiple(multiple_bucket): # pylint: disable=redefined-outer-name\n for idx in range(2):\n element_number = idx + 1\n assert multiple_bucket.get(f\"key {element_number}\") == f\"value {element_number}\"", "def test_none_success_case(self):\r\n b = BatchQuery()\r\n\r\n q = TestMultiKeyModel.objects.batch(b)\r\n assert q._batch == b\r\n\r\n q = q.batch(None)\r\n assert q._batch is None", "def test_batches_are_accessible(\n monkeypatch,\n multibatch_generic_csv_generator,\n multibatch_generic_csv_generator_context,\n):\n\n context: DataContext = multibatch_generic_csv_generator_context\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n\n datasource = context.datasources[datasource_name]\n\n data_connector = datasource.data_connectors[data_connector_name]\n\n total_batches: int = 20\n file_list = multibatch_generic_csv_generator(\n data_path=data_path, num_event_batches=total_batches\n )\n\n assert (\n data_connector._get_data_reference_list_from_cache_by_data_asset_name(\n data_asset_name=asset_name\n )\n == file_list\n )\n\n batch_request_1 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -1,\n },\n )\n # Should give most recent batch\n validator_1 = context.get_validator(\n batch_request=batch_request_1,\n create_expectation_suite_with_name=\"my_expectation_suite_name_1\",\n )\n metric_max = validator_1.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches\n metric_value_set = validator_1.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n batch_request_2 = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -2,\n },\n )\n validator_2 = context.get_validator(\n batch_request=batch_request_2,\n create_expectation_suite_with_name=\"my_expectation_suite_name_2\",\n )\n metric_max = validator_2.get_metric(\n MetricConfiguration(\"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"})\n )\n assert metric_max == total_batches - 1\n metric_value_set = validator_2.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}\n\n for batch_num in range(1, total_batches + 1):\n batch_request = BatchRequest(\n datasource_name=\"generic_csv_generator\",\n data_connector_name=\"daily_data_connector\",\n data_asset_name=\"daily_data_asset\",\n data_connector_query={\n \"index\": -batch_num,\n },\n )\n validator = context.get_validator(\n batch_request=batch_request,\n create_expectation_suite_with_name=f\"my_expectation_suite_name__{batch_num}\",\n )\n metric_max = validator.get_metric(\n MetricConfiguration(\n \"column.max\", metric_domain_kwargs={\"column\": \"batch_num\"}\n )\n )\n assert metric_max == (total_batches + 1) - batch_num\n metric_value_set = validator.get_metric(\n MetricConfiguration(\n \"column.distinct_values\",\n metric_domain_kwargs={\"column\": \"string_cardinality_3\"},\n )\n )\n assert metric_value_set == {\"category0\", \"category1\", \"category2\"}", "def test_parallelRequests(self):\n for property in properties:\n th = threading.Thread(target=getattr(self, 'test_get_%s' %property))\n th.setDaemon(1)\n th.start()\n th.join()", "def test_multipleConcurrentRequests(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n # The first query should be passed to the underlying protocol.\n firstQuery = dns.Query('foo.example.com', dns.A)\n resolver.query(firstQuery)\n self.assertEqual(len(queries), 1)\n\n # A query for a different name is also passed to the underlying\n # protocol.\n secondQuery = dns.Query('bar.example.com', dns.A)\n resolver.query(secondQuery)\n self.assertEqual(len(queries), 2)\n\n # A query for a different type is also passed to the underlying\n # protocol.\n thirdQuery = dns.Query('foo.example.com', dns.A6)\n resolver.query(thirdQuery)\n self.assertEqual(len(queries), 3)", "def test_block_missing_batch(self):\n pass", "def testBasicSerialGet(self):\n self.client_connect()\n self.client_send(\"get keyNotThere0\\r\\n\")\n self.mock_recv('get keyNotThere0\\r\\n')\n self.mock_send('END\\r\\n')\n self.client_recv(\"END\\r\\n\")\n\n self.client_send(\"get keyNotThere1\\r\\n\")\n self.mock_recv('get keyNotThere1\\r\\n')\n self.mock_send('END\\r\\n')\n self.client_recv(\"END\\r\\n\")\n\n self.client_send(\"get keyNotThere2\\r\\n\")\n self.mock_recv('get keyNotThere2\\r\\n')\n self.mock_send('END\\r\\n')\n self.client_recv(\"END\\r\\n\")", "def test_get_settled_batch_list(self):\n self.trans_details.get_settled_batch_list(\n include_statistics = True,\n )\n\n self.trans_details.get_settled_batch_list(\n first_settlement_date=u\"2011-01-01T01:00:00\",\n )\n\n self.trans_details.get_settled_batch_list(\n last_settlement_date=u\"2011-01-01T01:00:00\",\n )\n\n # all three together\n self.trans_details.get_settled_batch_list(\n include_statistics = True,\n first_settlement_date=u\"2011-01-01T01:00:00\",\n last_settlement_date=u\"2011-01-02T01:00:00\"\n )", "def test_multiple(self):\n\n with self.assertRaises(MultipleObjectsReturned):\n RST_FBO().get()", "def test_multipleSequentialRequests(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n query = dns.Query('foo.example.com', dns.A)\n\n # The first query should be passed to the underlying protocol.\n resolver.query(query)\n self.assertEqual(len(queries), 1)\n\n # Deliver the response.\n queries.pop()[-1].callback(dns.Message())\n\n # Repeating the first query should touch the protocol again.\n resolver.query(query)\n self.assertEqual(len(queries), 1)", "def test_batch(self):\n req = '''[{\"foo\": \"boo\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},\n {\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"}\n ]'''\n\n resp = '''[{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Invalid members in request object\"}, \"id\": null},\n {\"jsonrpc\": \"2.0\", \"result\": 19, \"id\": \"2\"},\n {\"jsonrpc\": \"2.0\", \"id\": \"5\", \"error\": {\"message\": \"MethodNotFoundError: Method foo.get not found\", \"code\": -32601}}\n ]'''\n\n status = 200\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))", "def test_batch(self):\n batch = batch_test_utils.create_batch()\n self.job1.batch_id = batch.id\n self.job1.save()\n\n url = '/%s/jobs/?batch_id=%d' % (self.api, batch.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)", "def test_process_batch_and_handle_results(self):\n e1, p1 = self._edition(with_license_pool=True)\n i1 = e1.primary_identifier\n\n e2, p2 = self._edition(with_license_pool=True)\n i2 = e2.primary_identifier\n\n class MockProvider(AlwaysSuccessfulCoverageProvider):\n OPERATION = 'i succeed'\n\n def finalize_batch(self):\n self.finalized = True\n\n success_provider = MockProvider(self._db)\n\n batch = [i1, i2]\n counts, successes = success_provider.process_batch_and_handle_results(batch)\n\n # Two successes.\n assert (2, 0, 0) == counts\n\n # finalize_batch() was called.\n assert True == success_provider.finalized\n\n # Each represented with a CoverageRecord with status='success'\n assert all(isinstance(x, CoverageRecord) for x in successes)\n assert [CoverageRecord.SUCCESS] * 2 == [x.status for x in successes]\n\n # Each associated with one of the identifiers...\n assert set([i1, i2]) == set([x.identifier for x in successes])\n\n # ...and with the coverage provider's operation.\n assert ['i succeed'] * 2 == [x.operation for x in successes]\n\n # Now try a different CoverageProvider which creates transient\n # failures.\n class MockProvider(TransientFailureCoverageProvider):\n OPERATION = \"i fail transiently\"\n\n transient_failure_provider = MockProvider(self._db)\n counts, failures = transient_failure_provider.process_batch_and_handle_results(batch)\n # Two transient failures.\n assert (0, 2, 0) == counts\n\n # New coverage records were added to track the transient\n # failures.\n assert ([CoverageRecord.TRANSIENT_FAILURE] * 2 ==\n [x.status for x in failures])\n assert [\"i fail transiently\"] * 2 == [x.operation for x in failures]\n\n # Another way of getting transient failures is to just ignore every\n # item you're told to process.\n class MockProvider(TaskIgnoringCoverageProvider):\n OPERATION = \"i ignore\"\n task_ignoring_provider = MockProvider(self._db)\n counts, records = task_ignoring_provider.process_batch_and_handle_results(batch)\n\n assert (0, 2, 0) == counts\n assert ([CoverageRecord.TRANSIENT_FAILURE] * 2 ==\n [x.status for x in records])\n assert [\"i ignore\"] * 2 == [x.operation for x in records]\n\n # If a transient failure becomes a success, the it won't have\n # an exception anymore.\n assert ['Was ignored by CoverageProvider.'] * 2 == [x.exception for x in records]\n records = success_provider.process_batch_and_handle_results(batch)[1]\n assert [None, None] == [x.exception for x in records]\n\n # Or you can go really bad and have persistent failures.\n class MockProvider(NeverSuccessfulCoverageProvider):\n OPERATION = \"i will always fail\"\n persistent_failure_provider = MockProvider(self._db)\n counts, results = persistent_failure_provider.process_batch_and_handle_results(batch)\n\n # Two persistent failures.\n assert (0, 0, 2) == counts\n assert all([isinstance(x, CoverageRecord) for x in results])\n assert ([\"What did you expect?\", \"What did you expect?\"] ==\n [x.exception for x in results])\n assert ([CoverageRecord.PERSISTENT_FAILURE] * 2 ==\n [x.status for x in results])\n assert [\"i will always fail\"] * 2 == [x.operation for x in results]", "def test_batch_by_transaction_id_multiple_txn_ids():\n # Add batch that has txn 123\n testResponder = TestResponder()\n\n transaction = transaction_pb2.Transaction(header_signature=\"123\")\n batch = batch_pb2.Batch(\n header_signature=\"abc\", transactions=[transaction])\n testResponder.completer.add_batch(batch)\n # Request transactions 123 and 456\n message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\", \"456\"],\n time_to_live=1)\n testResponder.batch_by_txn_request_handler.handle(\n \"Connection_1\", message.SerializeToString())\n testResponder.batch_request_handler.handle(\n \"Connection_1\", message.SerializeToString())\n\n # Respond with a BatchResponse for transaction 123\n\n # Broadcast a BatchByTransactionIdRequest for just 456\n after_message = \\\n network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"456\"],\n time_to_live=0)", "def test_async_call_same_actor_multiple_times(self):\n actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n # 2 asynchronous call to actor 0.\n num_of_calls = manager.foreach_actor_async(\n lambda w: w.call(),\n remote_actor_ids=[0, 0],\n )\n self.assertEqual(num_of_calls, 2)\n\n # Now, let's actually fetch the results.\n results = manager.fetch_ready_async_reqs(timeout_seconds=None)\n # Returns 1 and 2, representing the first and second calls to actor 0.\n self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])\n\n manager.clear()", "async def test_datasets_access_call_multiple(self):\n pool = asynctest.CoroutineMock()\n pool.acquire().__aenter__.return_value = Connection(accessData=[{'accesstype': 'CONTROLLED', 'datasetid': 'mock:controlled:id'},\n {'accesstype': 'PUBLIC', 'datasetid': 'mock:public:id'}])\n result = await fetch_datasets_access(pool, None)\n # for now it can return a tuple of empty datasets\n # in order to get a response we will have to mock it\n # in Connection() class\n self.assertEqual(result, (['mock:public:id'], [], ['mock:controlled:id']))", "def test_get(self):\n log.info(\"START INTEG TEST GET\")\n\n # Start sampling.\n self.clear_sample_data()\n self.driver.start_sampling()\n self.clear_async_data()\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity record, time record.\n log.info(\"FIRST FILE A0000002 INTEG TEST GET\")\n self.create_sample_data('valid_A0000002.DEC', \"A0000002.DEC\")\n self.assert_data(None, 'valid_A0000002.yml', \n count=3, timeout=10)\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity records twice, time record.\n log.info(\"SECOND FILE A0000004 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('valid_A0000004.DEC', \"A0000004.DEC\")\n self.assert_data(None, 'valid_A0000004.yml', \n count=5, timeout=10)\n\n # Made-up data with all flags set to True.\n # Field values may not be realistic.\n log.info(\"THIRD FILE A0000003 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('all_A0000003.DEC', \"A0000003.DEC\")\n self.assert_data(None, 'all_A0000003.yml', \n count=4, timeout=10)\n log.info(\"END INTEG TEST GET\")", "def test_call_api_return_only_consolidated_calls(client, start_call_fx, stop_call_fx):\n\n post_url = reverse_lazy('calls:registry-list')\n\n start_call_fx_2 = copy(start_call_fx)\n start_call_fx_2['call_id'] = 2\n\n post_data = [start_call_fx, start_call_fx_2, stop_call_fx]\n\n for data in post_data:\n response = client.post(post_url, data, content_type='application/json')\n assert response.status_code == status.HTTP_201_CREATED\n\n get_url = reverse_lazy('calls:call-list')\n\n response = client.get(get_url)\n\n assert len(response.data) == 1", "def test_insert_batch_result_and_retrieve(self):\n batch = [self.successfulresult, self.failedresult]\n self.db.insert_result_batch(results=batch)\n successentry = self.db.get_result_by_primary_key(pk=self.successfulresult.get('id'))\n self.assertDictContainsSubset(self.successfulresult, successentry.__dict__)\n failureentry = self.db.get_result_by_primary_key(pk=self.failedresult.get('id'))\n self.assertDictContainsSubset(self.failedresult, failureentry.__dict__)", "def test_multi_cache(self):\n # cache params\n cache_key = 'test_multi_cache'\n cache_len = 60\n num_items = 20000\n\n # prepare cache data and save\n cache_data = self.get_cache_data(num_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "def batch_get(func: object, filt: str, catg: str):\n offset = 0\n running = True\n returned = []\n notified = False\n while running:\n lookup = func(filter=filt, offset=offset, limit=5000, fields=\"__full__\")\n total = lookup[\"body\"][\"meta\"].get(\"pagination\", {}).get(\"total\", 0)\n if not notified:\n notify = f\"Retrieving {total:,} {catg} results.\"\n if total > 50000:\n notify = f\"Retrieving first 50,000 of {total:,} {catg} results.\"\n print(notify)\n notified = True\n else:\n progress.next()\n if lookup[\"body\"][\"resources\"]:\n offset += len(lookup[\"body\"][\"resources\"])\n returned.extend(lookup[\"body\"][\"resources\"])\n if offset >= total:\n running = False\n\n return returned", "def test_bulk_download_happy_case_primary_pairs_only(flask_app, db, session):\n complete_db_insertion(session, db, 251, '923357891879', 251, 'G4', 'LG', 'shfy8JhZx', '2G,3G',\n 'Z45aWf6l', 251, '112233445566778')\n complete_db_insertion(session, db, 252, '923357891880', 252, 'G5', 'LG', 'JhOp8JhZx', '2G,3G',\n 'S5fRtf6l', 252, '998877665544332')\n first_pair_db_insertion(session, db, 251, '923007112390', 'jazz', 251)\n first_pair_db_insertion(session, db, 252, '923007112391', 'jazz', 252)\n url = \"{api}?mno=jazz\".format(api=MNO_BULK_DOWNLOAD)\n rs = flask_app.get(url)\n print(rs.data)\n assert rs.status_code == 200", "def test__run_one_single_list(self):\n\n # Set up\n class OneResource(BaseResource):\n\n def process(self, message):\n pass\n\n class OtherResource(BaseResource):\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n OneResource.init(api, 'one_route')\n OtherResource.init(api, 'other_route')\n\n session = ObjectId('57b599f8ab1785652bb879a7')\n a_request = Mock(context={'session': session})\n one_resource = OneResource(a_request)\n other_resource = OtherResource(a_request)\n\n one_resource._get_runnable = Mock(return_value=other_resource)\n other_resource.run = Mock()\n\n # Actual call\n one_resource._run_one('other_resource', [{}], 1, False)\n\n # Asserts\n one_resource._get_runnable.assert_called_once_with('other_resource')\n other_resource.run.assert_called_once_with({}, False, None)", "def test_block_missing_batch_dependency(self):\n pass", "def test_batch_delete(self):\n # Given:\n self.batch_setup()\n # When:\n self.client.delete(\"/api/account/user\", data={\"username\": self.user_0}, headers=self.headers)\n self.client.delete(\"/api/account/user\", data={\"username\": self.user_1}, headers=self.headers)\n self.client.delete(\"/api/account/user\", data={\"username\": self.user_3}, headers=self.headers)\n self.assertTrue(self.is_user_marked_for_deletion(self.user_0))\n self.assertTrue(self.is_user_marked_for_deletion(self.user_1))\n self.assertTrue(self.is_user_marked_for_deletion(self.user_3))\n self.assertFalse(self.is_user_marked_for_deletion(self.user_2))\n with patch(\"ras_rm_auth_service.batch_process_endpoints.requests.delete\") as mock_request:\n mock_request.return_value = mock_response()\n batch_delete_request = self.client.delete(\"/api/batch/account/users\", headers=self.headers)\n self.assertTrue(mock_request.called)\n self.assertEqual(3, mock_request.call_count)\n # Then:\n self.assertEqual(batch_delete_request.status_code, 204)\n self.assertTrue(self.does_user_exists(self.user_2))\n self.assertFalse(self.does_user_exists(self.user_0))\n self.assertFalse(self.does_user_exists(self.user_1))\n self.assertFalse(self.does_user_exists(self.user_3))", "def test_gettem_using_get(self):\n pass", "def test_get_token_supply_all_using_get(self):\n pass", "def test_call_second_time(self, query_repo_url, get_credentials, valid_revision, get):\n # Making sure the cache is filled so we don't depend on the order of the tests.\n query_jobs.JOBS_CACHE[(\"try\", \"146071751b1e\")] = json.loads(JOBS_SCHEDULE)\n self.assertEquals(\n self.query_api._get_all_jobs(\"try\", \"146071751b1e\"),\n json.loads(JOBS_SCHEDULE))\n # _get_all_jobs should return its value directly from\n # cache without calling get\n assert get.call_count == 0", "def concurrent_increments(self):\n start = threading.Barrier(2)\n end = threading.Barrier(2+1)\n\n for i in range(2):\n def func(i):\n try:\n # Read the other key, write key i.\n read_key = (\"value-%d\" % ((i+1) % 2)).encode('ascii')\n write_key = (\"value-%d\" % i).encode('ascii')\n # Wait until the other threads are running.\n start.wait()\n\n def callback(txn):\n # Retrieve the other key.\n gr = txn.call(Methods.Get, api_pb2.GetRequest(\n header=api_pb2.RequestHeader(key=read_key)))\n other_value = gr.value.integer\n\n txn.call(Methods.Increment, api_pb2.IncrementRequest(\n header=api_pb2.RequestHeader(key=write_key),\n increment=1+other_value))\n txn_opts = TransactionOptions(name='test-%d' % i)\n self.client.run_transaction(txn_opts, callback)\n finally:\n end.wait()\n self.executor.submit(func, i)\n # Wait for the threads to finish.\n end.wait()\n\n # Verify that both keys contain something and, more importantly, that one key\n # actually contains the value of the first writer and not only its own.\n total = 0\n results = []\n for i in range(2):\n read_key = ('value-%d' % i).encode('ascii')\n gr = self.client.call(\n Methods.Get, api_pb2.GetRequest(header=api_pb2.RequestHeader(key=read_key)))\n self.assertTrue(gr.HasField('value'))\n self.assertTrue(gr.value.HasField('integer'))\n total += gr.value.integer\n results.append(gr.value.integer)\n\n # First writer should have 1, second one 2.\n self.assertEqual(total, 3, \"got unserializable values %r\" % results)", "def test_multiple_gets(uris):\n\n for uri in uris:\n print('='*10 + ' Try uri : {uri} '.format(uri=uri) + '='*10)\n resp = get_api_url(uri)\n print(resp)\n try:\n pprint(resp.json())\n except Exception as e:\n print(resp.text)", "def test_simple_multi_cache(self):\n # cache params\n cache_key = 'test_simple_multi_cache'\n cache_len = 60\n\n # prepare cache data and save\n cache_data = self.get_cache_data(5000)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def test_batch_accepting():\n client = create_client()\n message = types.PubsubMessage(data=b'foo')\n\n # At first, there are no batches, so this should return a new batch\n # which is also saved to the object.\n ante = len(client._batches)\n batch = client.batch('topic_name', message, autocommit=False)\n assert len(client._batches) == ante + 1\n assert batch is client._batches['topic_name']\n\n # A subsequent request should return the same batch.\n batch2 = client.batch('topic_name', message, autocommit=False)\n assert batch is batch2\n assert batch2 is client._batches['topic_name']", "def testMultipleRequests(self):\n response = self.translator.parse_reply(self.factory.result)\n d = self._getClientConnection()\n for _ in range(1000):\n d.addCallback(lambda _: self.client.check_rate_limit())\n d.addCallback(self.assertEqual, response)\n return d", "def _get_batch(self):\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n response = apiCall.get(self._get_token(), url,self._proxy, {}, 10)\n logging.debug(response)\n return response", "def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def test_waiting(closed_job, bulk_request):\n assert closed_job.pending_batches == ['BATCHONE', 'BATCHTWO']\n\n bulk_request.reset_mock()\n bulk_request.side_effect = [\n '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <state>Completed</state>\n <id>BATCHONE</id>\n </batchInfo>''',\n '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <state>InProgress</state>\n <id>BATCHTWO</id>\n </batchInfo>''',\n '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <state>Completed</state>\n <id>BATCHTWO</id>\n </batchInfo>''',\n ]\n\n with mock.patch('salesforce_bulk_api.time.sleep') as sleep:\n closed_job.wait()\n sleep.assert_called_once_with(10)\n\n bulk_request.assert_has_calls([\n mock.call('get', 'https://salesforce/services/async/34.0/job/THEJOBID/batch/BATCHONE', expected_response=200),\n mock.call('get', 'https://salesforce/services/async/34.0/job/THEJOBID/batch/BATCHTWO', expected_response=200),\n mock.call('get', 'https://salesforce/services/async/34.0/job/THEJOBID/batch/BATCHTWO', expected_response=200),\n ])\n\n assert closed_job.pending_batches == []\n assert closed_job.finished_batches == ['BATCHONE', 'BATCHTWO']", "def test_get1(self):\n pass", "def test_block_batches_order(self):\n pass", "def test_get_all_jobs(\n globals, client, mock_test_responses, context_fixture):\n\n context = context_fixture('Healthy')\n mock_test_responses(task='upload', status=CoreStatus.DONE)\n data_list = []\n for i in range(2):\n data = client.upload(file=globals['test_csv_file'], name=str(i))\n data_list.append(data)\n\n context.run()\n jobs = context.get_all_jobs()\n\n assert jobs[0] is data_list[0]\n assert jobs[1] is data_list[1]", "def test_get2(self):\n pass", "def _batch(self, batch_request_entries):\n necessary_keys = [\"id\", \"version\", \"method\", \"params\"]\n\n results = []\n\n for (idx, request) in enumerate(batch_request_entries):\n error = None\n result = None\n\n # assert presence of important details\n for necessary_key in necessary_keys:\n if not necessary_key in request.keys():\n raise FakeBitcoinProxyException(\"Missing necessary key {} for _batch request number {}\".format(necessary_key, idx))\n\n if isinstance(request[\"params\"], list):\n method = getattr(self, request[\"method\"])\n result = method(*request[\"params\"])\n else:\n # matches error message received through python-bitcoinrpc\n error = {\"message\": \"Params must be an array\", \"code\": -32600}\n\n results.append({\n \"error\": error,\n \"id\": request[\"id\"],\n \"result\": result,\n })\n\n return results", "def test_fetch(self, mock_find_law, mock_process_join,\n mock_process_start, mock_process_manager):\n\n mock_find_law.return_value = [1, 2, 3]\n mock_process_manager.list.return_value = [3, 4, 5]\n self.law.fetch()\n self.assertEqual(mock_process_join.call_count, 3)\n self.assertEqual(mock_process_start.call_count, 3)\n self.assertEqual(mock_process_manager.call_count, 1)", "def test_grainbin_updates_latest_get_multiple(flaskclient, auth_headers, dbsession):\n\n grainbin = GrainbinFactory().save()\n\n # create two GrainbinUpdates for each iteration\n for x in range(5):\n grainbin_update = GrainbinUpdate(grainbin.id)\n grainbin_update.timestamp = dt.datetime.now()\n grainbin_update.update_index = x\n grainbin_update_2 = GrainbinUpdate(grainbin.id)\n grainbin_update_2.timestamp = dt.datetime.now()\n grainbin_update_2.update_index = x\n grainbin.total_updates = x\n dbsession.add(grainbin_update)\n dbsession.add(grainbin_update_2)\n\n dbsession.commit()\n\n url = url_for(\"grainbin.GrainbinUpdatesLatest\", grainbin_id=grainbin.id)\n rep = flaskclient.get(url, headers=auth_headers)\n fetched_update = rep.get_json()\n\n assert rep.status_code == 200\n assert len(fetched_update) == 2\n assert fetched_update[0][\"update_index\"] == 4", "def test_get_all(client: FlaskClient):\n response1 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Txt)\n )\n response2 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Jpg)\n )\n response3 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Png)\n )\n\n # Now retrieve them\n response_get = util.get_all_files(client, DEFAULT_USER)\n assert response_get.status == \"200 OK\"\n assert len(response_get.json) == 3\n assert response1.json in response_get.json\n assert response2.json in response_get.json\n assert response3.json in response_get.json", "def bulk_request():\n with mock.patch('salesforce_bulk_api.SalesforceBulkJob.request') as request:\n yield request", "def test_parallel_mutliple_rarefactions(self):\r\n r = ParallelMultipleRarefactions()\r\n params = {'min': 1,\r\n 'max': 100,\r\n 'step': 10,\r\n 'num_reps': 2,\r\n 'jobs_to_start': 2,\r\n 'suppress_lineages_included': False,\r\n 'subsample_multinomial': False}\r\n r(self.input1_fp,\r\n self.test_out,\r\n params,\r\n job_prefix='RARIFTEST',\r\n poll_directly=True,\r\n suppress_submit_jobs=False)\r\n biom_tables = glob('%s/*biom' % self.test_out)\r\n self.assertEqual(len(biom_tables), 20)\r\n biom_tables.sort()\r\n input_table = parse_biom_table(open(self.input1_fp))\r\n # sanity checks on first table (sampled at 11 seqs/sample)\r\n output_table = parse_biom_table(open(biom_tables[0]))\r\n self.assertEqual(output_table.SampleIds, input_table.SampleIds)\r\n self.assertEqual(output_table.sum(), 99)\r\n # sanity checks on first table (sampled at 91 seqs/sample)\r\n output_table = parse_biom_table(open(biom_tables[-1]))\r\n self.assertEqual(output_table.SampleIds, input_table.SampleIds)\r\n self.assertEqual(output_table.sum(), 819)", "def fetch_batch(self, phase):\n pass", "def test_get_all_failures(self):\n batch = [self.successfulresult, self.failedresult]\n self.db.insert_result_batch(results=batch)\n results = self.db.get_all_failures()\n self.assertTrue(len(results) == 1, msg=\"Retrieved more than a single failure unexpectedly.\")\n self.assertDictContainsSubset(self.failedresult, results[0].__dict__)", "def test_multiple_batch(sdc_builder, sdc_executor, cluster):\n topic = get_random_string()\n\n raw_data = {'key': 'value'}\n\n # Build pipeline.\n builder = sdc_builder.get_pipeline_builder()\n\n source = builder.add_stage('Dev Raw Data Source').set_attributes(\n data_format='JSON',\n raw_data=json.dumps(raw_data),\n stop_after_first_batch=False\n )\n\n destination = builder.add_stage(\n name='com_streamsets_pipeline_stage_destination_kafka_KafkaDTarget',\n library=cluster.kafka.standalone_stage_lib\n ).set_attributes(\n topic=topic,\n data_format='JSON'\n )\n\n source >> destination\n\n pipeline = builder.build(f'Kafka Destination Multiple Batches').configure_for_environment(cluster)\n\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'output_record_count', 100)\n sdc_executor.stop_pipeline(pipeline)\n\n consumer = cluster.kafka.consumer(consumer_timeout_ms=1000, auto_offset_reset='earliest')\n consumer.subscribe([topic])\n\n msgs_received = [json.loads(message.value.decode()) for message in consumer]\n\n history = sdc_executor.get_pipeline_history(pipeline)\n history_records = history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count\n\n assert len(msgs_received) == history_records\n assert all(msg == raw_data for msg in msgs_received)", "def test_populator_only_fetches_needy():\n o1, o2 = MediaBag(id=1), MediaBag(media=2)\n with build_multi_get(1) as multi_get:\n media.build_populator('id', multi_get)([o1, o2])", "def test_simple_multitask():\n bucket = []\n def _foo():\n for i in range(10):\n bucket.append(i)\n yield\n\n scheduler = Scheduler()\n scheduler.new(_foo())\n scheduler.new(_foo())\n scheduler.mainloop()\n\n expect_bucket = []\n for i in range(10):\n expect_bucket.append(i)\n expect_bucket.append(i)\n assert bucket == expect_bucket", "def test_adding_a_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHONE</id>\n <jobId>THEJOBID</jobId>\n <state>Queued</state>\n </batchInfo>\n '''\n\n fake_data = [('1', '2'), ('3', '4')]\n created_job.add_batch(['Id', 'Name'], iter(fake_data))\n\n assert created_job.pending_batches == ['BATCHONE']\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID/batch',\n content_type='text/csv; charset=UTF-8',\n data=mock.ANY\n )\n\n data = bulk_request.call_args[1]['data']\n assert b''.join(data) == b'Id,Name\\r\\n1,2\\r\\n3,4\\r\\n'", "def make_batch_request(self, batch):\n args = {}\n args['access_token'] = self.access_token\n args['batch'] = json.dumps(batch)\n args = {k.encode('utf-8'): unicode(v).encode('utf-8')\n for k, v in args.items()}\n logger.info('Making a batched request with %s' % args)\n try:\n f = urllib2.urlopen(self.api_root, urllib.urlencode(args))\n data = json.load(f)\n # For debugging\n self.data = data\n for idx, val in enumerate(data):\n data[idx] = json.loads(val['body'])\n return data\n except urllib2.HTTPError as e:\n logger.info('%s' % e)\n return json.load(e)\n except urllib2.URLError as e:\n logger.warn('URLError: %s' % e.reason)", "def GetBatchGet(self, request, global_params=None):\n config = self.GetMethodConfig('GetBatchGet')\n return self._RunMethod(\n config, request, global_params=global_params)", "def check_dispatch_one_job(backend):\r\n queue = list()\r\n\r\n def producer():\r\n for i in range(6):\r\n queue.append('Produced %i' % i)\r\n yield i\r\n\r\n Parallel(n_jobs=1, backend=backend)(\r\n delayed(consumer)(queue, x) for x in producer())\r\n nose.tools.assert_equal(queue,\r\n ['Produced 0', 'Consumed 0',\r\n 'Produced 1', 'Consumed 1',\r\n 'Produced 2', 'Consumed 2',\r\n 'Produced 3', 'Consumed 3',\r\n 'Produced 4', 'Consumed 4',\r\n 'Produced 5', 'Consumed 5']\r\n )\r\n nose.tools.assert_equal(len(queue), 12)", "def test_listtem_using_get(self):\n pass", "def test_bulk_download_happy_case_all_pairs(flask_app, db, session):\n complete_db_insertion(session, db, 253, '923354441879', 253, 'G6', 'LG', 'shfHHJhZx', '2G,3G',\n '9i8Pbr5T', 253, '112233445566778')\n complete_db_insertion(session, db, 254, '923358881880', 254, 'G7', 'LG', 'JhOUUJhZx', '2G,3G',\n 'G3eeR7vQ', 254, '998877665544332')\n first_pair_db_insertion(session, db, 253, '923006565747', 'warid', 253)\n add_pair_db_insertion(session, db, 254, 253, '923018112222', 253)\n add_pair_confrm_db_insertion(session, db, '923018112222', 253, 'warid')\n\n first_pair_db_insertion(session, db, 255, '923007575848', 'warid', 254)\n add_pair_db_insertion(session, db, 256, 255, '923018113333', 254)\n add_pair_confrm_db_insertion(session, db, '923018113333', 255, 'warid')\n\n url = \"{api}?mno=warid\".format(api=MNO_BULK_DOWNLOAD)\n rs = flask_app.get(url)\n print(rs.data)\n assert rs.status_code == 200", "def test_sync_call_same_actor_multiple_times(self):\n actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n # 2 synchronous call to actor 0.\n results = manager.foreach_actor(\n lambda w: w.call(),\n remote_actor_ids=[0, 0],\n )\n # Returns 1 and 2, representing the first and second calls to actor 0.\n self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])\n\n manager.clear()", "def test_block_extra_batch(self):\n pass", "def test_products_get_pattern_multiple_match(data, mocker):\n mocker.patch(\"sps.request.fetch\", autospec=True)\n request.fetch.return_value = data\n assert products.get(\"1\", \"fake-file-name\", False, False) == data[\"data\"]\n assert products.get(\"SUSE\", \"fake-file-name\", False, False) == data[\"data\"]\n assert products.get(\"x86\", \"fake-file-name\", False, False) == data[\"data\"]", "def test_batch_generate_and_combine_pdfs(self):\n with mock.patch.object(time, \"sleep\") as wait_patched:\n template_id = 'tpl_000000000000000001' # str |\n response = self.client.batch_generate_and_combine_pdfs({\n 'template_id': template_id,\n 'metadata': {'user_id': 123},\n 'test': True,\n 'submissions': [\n {\n 'data': {\n 'title': 'Test PDF',\n 'description': 'This PDF is great!',\n }\n },\n {\n 'data': {\n 'title': 'Test PDF 2',\n 'description': 'This PDF is also great!',\n }\n }\n ]}, wait=True)\n\n wait_patched.assert_called()\n combined_submission = response.combined_submission\n self.assertRegexpMatches(combined_submission.id, '^com_')\n self.assertEquals(combined_submission.expired, False)\n self.assertEquals(len(combined_submission.submission_ids), 2)\n self.assertEquals(combined_submission.state, 'processed')", "def test_results_workers(self, affiliate_items):\n success_count = 0\n updater = mock.Mock()\n\n few_workers = BatchJob(affiliate_items, updater, workers=1)\n for result in few_workers.run():\n success_count += int(not result.is_error)\n\n many_workers = BatchJob(affiliate_items, updater, workers=4)\n for result in many_workers.run():\n success_count += int(not result.is_error)\n\n assert success_count == 8\n assert updater.call_count == 8", "def test_get_request_body(self):\n\n batch = ObjectsBatchRequest()\n expected_return = []\n self.assertEqual(batch.get_request_body(), {\"fields\": [\"ALL\"], \"objects\": expected_return})\n\n # add an object without 'uuid' and 'vector'\n obj = {\n 'class': \"Philosopher\",\n 'properties': {\"name\": \"Socrates\"}\n }\n expected_return.append({\n 'class': \"Philosopher\",\n 'properties': {\"name\": \"Socrates\"}\n })\n batch.add(obj['properties'], obj['class'])\n self.assertEqual(batch.get_request_body(), {\"fields\": [\"ALL\"], \"objects\": expected_return})\n\n # add an object without 'vector'\n obj = {\n 'class': \"Chemist\",\n 'properties': {\"name\": \"Marie Curie\"},\n 'id': \"d087b7c6-a115-5c89-8cb2-f25bdeb9bf92\"\n }\n expected_return.append({\n 'class': \"Chemist\",\n 'properties': {\"name\": \"Marie Curie\"},\n 'id': \"d087b7c6-a115-5c89-8cb2-f25bdeb9bf92\"\n })\n batch.add(obj['properties'], obj['class'], obj['id'])\n self.assertEqual(batch.get_request_body(), {\"fields\": [\"ALL\"], \"objects\": expected_return})\n\n # add an object without 'uuid'\n obj = {\n 'class': \"Writer\",\n 'properties': {\"name\": \"Stephen King\"},\n 'vector': [1, 2, 3]\n }\n expected_return.append({\n 'class': \"Writer\",\n 'properties': {\"name\": \"Stephen King\"},\n 'vector': [1, 2, 3]\n })\n batch.add(obj['properties'], obj['class'], vector=obj['vector'])\n self.assertEqual(batch.get_request_body(), {\"fields\": [\"ALL\"], \"objects\": expected_return})\n\n # add an object with all arguments\n obj = {\n 'class': \"Inventor\",\n 'properties': {\"name\": \"Nikola Tesla\"},\n 'id': \"d087b7c6-a115-5c89-8cb2-f25bdeb9bf93\",\n 'vector': [1, 2, 3]\n }\n expected_return.append({\n 'class': \"Inventor\",\n 'properties': {\"name\": \"Nikola Tesla\"},\n 'id': \"d087b7c6-a115-5c89-8cb2-f25bdeb9bf93\",\n 'vector': [1, 2, 3]\n })\n batch.add(obj['properties'], obj['class'], obj['id'], obj['vector'])\n self.assertEqual(batch.get_request_body(), {\"fields\": [\"ALL\"], \"objects\": expected_return})", "def test_async_call(self):\n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n manager.foreach_actor_async(lambda w: w.call())\n results.extend(manager.fetch_ready_async_reqs(timeout_seconds=None))\n # Wait for actors to recover.\n wait_for_restore()\n\n # Note that we can hardcode the numbers here because of the deterministic\n # lists of random numbers we use.\n # 7 calls succeeded, 4 failed.\n # The number of results back is much lower than 40, because we do not probe\n # the actors with this test. As soon as an actor errors out, it will get\n # taken out of the lineup forever.\n self.assertEqual(len([r for r in results if r.ok]), 7)\n self.assertEqual(len([r for r in results if not r.ok]), 4)\n\n manager.clear()", "def test_multi_partition_consistent_reads_after_write(self):\n self._consistent_reads_after_write_test(5)", "def test_very_many_partitions_and_instances_in_fsic(self):\n fsics = {\"super\": {}, \"sub\": {\"\": {self.data[\"group1_id\"].id: 1, self.data[\"group2_id\"].id: 1}}}\n for i in range(99):\n fsics[\"sub\"][uuid.uuid4().hex] = {uuid.uuid4().hex: i for i in range(999)}\n self.transfer_session.client_fsic = json.dumps(fsics)\n self.transfer_session.server_fsic = json.dumps({\"super\": {}, \"sub\": {}})\n _queue_into_buffer_v2(self.transfer_session)\n # ensure all store and buffer records are buffered\n assertRecordsBuffered(self.data[\"group1_c1\"])\n assertRecordsBuffered(self.data[\"group1_c2\"])\n assertRecordsBuffered(self.data[\"group2_c1\"])", "async def _multi_get(self, keys):\n with await self._connect() as redis:\n return await redis.mget(*keys)", "def test_get_all(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n task_list = self.task_storage.get_all()\n\n self.assertEqual(task_list[0], self.my_task)", "def test_multiple_inference_runs_yield_same_result(count, mock_model_runtime):\n runtime = mock_model_runtime[0]\n net_id = mock_model_runtime[1]\n input_tensors = mock_model_runtime[2]\n output_tensors = mock_model_runtime[3]\n\n expected_results = np.array([[4, 85, 108, 29, 8, 16, 0, 2, 5, 0]])\n\n for _ in range(count):\n runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)\n\n output_vectors = ann.workload_tensors_to_ndarray(output_tensors)\n\n for i in range(len(expected_results)):\n assert output_vectors[i].all() == expected_results[i].all()", "def test_get_query_with_api_key(self):\r\n users = UserFactory.create_batch(3)\r\n app = AppFactory.create(owner=users[0], info={'total': 150})\r\n task = TaskFactory.create(app=app, info={'url': 'my url'})\r\n taskrun = TaskRunFactory.create(task=task, user=users[0],\r\n info={'answer': 'annakarenina'})\r\n for endpoint in self.endpoints:\r\n url = '/api/' + endpoint + '?api_key=' + users[1].api_key\r\n res = self.app.get(url)\r\n data = json.loads(res.data)\r\n\r\n if endpoint == 'app':\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'task':\r\n assert len(data) == 1, data\r\n task = data[0]\r\n assert task['info']['url'] == 'my url', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'taskrun':\r\n assert len(data) == 1, data\r\n taskrun = data[0]\r\n assert taskrun['info']['answer'] == 'annakarenina', data\r\n assert res.mimetype == 'application/json', res\r\n\r\n if endpoint == 'user':\r\n assert len(data) == 3, data\r\n user = data[0]\r\n assert user['name'] == 'user1', data\r\n assert res.mimetype == 'application/json', res", "def test_multi(self):\n self.assertEqual(6, multi(2, 3))", "def test_get_devices1(self):\n pass", "def test_successful_resources(self):\n\n url = '/%s/jobs/%i/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['resources']['resources']['cpus'], 1.0)\n self.assertEqual(result['resources']['resources']['mem'], 128.0)\n self.assertEqual(result['resources']['resources']['disk'], 10.0)\n\n url = '/%s/jobs/%i/' % (self.api, self.job2.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['resources']['resources']['cpus'], 1.0)\n self.assertEqual(result['resources']['resources']['mem'], 1024.0)\n self.assertEqual(result['resources']['resources']['disk'], 1040.0)", "def test_getting_state_parallel(self):\n no_replicates = 25\n replicate(experiment2, no_replicates, parallel=True, no_processes=2)\n for i in range(no_replicates):\n self.assertNotIn(SUBSTATE_KEY_PATTERN % i + '.result', state)", "def test_before_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('set', 'abcd', 0)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 10)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 20)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are before each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'abcd')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'inject')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'abcd')", "def test_singleConcurrentRequest(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n query = dns.Query('foo.example.com', dns.A, dns.IN)\n # The first query should be passed to the underlying protocol.\n firstResult = resolver.query(query)\n self.assertEqual(len(queries), 1)\n\n # The same query again should not be passed to the underlying protocol.\n secondResult = resolver.query(query)\n self.assertEqual(len(queries), 1)\n\n # The response to the first query should be sent in response to both\n # queries.\n answer = object()\n response = dns.Message()\n response.answers.append(answer)\n queries.pop()[-1].callback(response)\n\n d = defer.gatherResults([firstResult, secondResult])\n def cbFinished((firstResponse, secondResponse)):\n self.assertEqual(firstResponse, ([answer], [], []))\n self.assertEqual(secondResponse, ([answer], [], []))\n d.addCallback(cbFinished)\n return d", "def test_identity_single_batched(self, dev):\n qml.enable_tape()\n dev = qml.device(dev, wires=1)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.Identity(wires=[0]))\n\n res = dev.batch_execute([tape1])\n assert len(res) == 1\n assert np.allclose(res[0], np.array([1]))\n qml.disable_tape()", "async def test_get_multiple_pages():\n writer = SimpleWriter()\n work_queue = asyncio.Queue()\n await work_queue.put(TestData.JOB_TEMPLATE_PAYLOAD_ALL_PAGES)\n worker = tower_api_worker.TowerApiWorker(TestData.config, writer, work_queue)\n headers = {\"Content-Type\": \"application/json\"}\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE1_RESPONSE),\n headers=headers,\n )\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL_PAGE_2,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE2_RESPONSE),\n headers=headers,\n )\n res = await worker.start()\n assert writer.data[\"count\"] == 3\n assert writer.called == 2", "def test_execute_get_success_with_multiple_pages():\n response_queue = queue.Queue()\n message = FakeMessage()\n message.raw_payload = json.dumps(TestData.JOB_TEMPLATE_PAYLOAD_ALL_PAGES)\n headers = {\"Content-Type\": \"application/json\"}\n\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE1_RESPONSE),\n headers=headers,\n )\n mocked.get(\n TestData.JOB_TEMPLATES_LIST_URL_PAGE_2,\n status=200,\n body=json.dumps(TestData.JOB_TEMPLATES_PAGE2_RESPONSE),\n headers=headers,\n )\n worker.execute(message, TestData.RECEPTOR_CONFIG, response_queue)\n\n validate_get_response(\n response_queue.get(),\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_1, TestData.JOB_TEMPLATE_2],\n )\n validate_get_response(\n response_queue.get(),\n 200,\n TestData.JOB_TEMPLATE_COUNT,\n [TestData.JOB_TEMPLATE_3],\n )", "def test_populator_aborts_early():\n o1, o2 = MediaBag(media=1), MediaBag(media=2)\n\n def multi_get(*keys):\n raise AssertionError('tried calling multi_get')\n\n results = media.build_populator('id', multi_get)([o1, o2])\n assert results == [o1, o2]", "def test_batch_exec_multiple_workflow(\n self, keep, dev_name, tmpdir, monkeypatch, test_batch_result\n ):\n\n qml.enable_tape()\n\n with qml.tape.QuantumTape() as tape1:\n qml.RX(0.133, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.RX(0.432, wires=0)\n qml.RY(0.543, wires=0)\n qml.expval(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape3:\n qml.RX(0.432, wires=0)\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2, tape3]\n\n # Setting batch size: allow only a single circuit for each workflow\n dev = qml.device(dev_name, wires=3, batch_size=1, keep_files=keep)\n\n # Check that no workflow files were created before\n test_uuid = \"1234\"\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}-0.yaml\"))\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}-1.yaml\"))\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}-2.yaml\"))\n\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: test_batch_result,\n )\n\n # Disable random uuid generation\n m.setattr(uuid, \"uuid4\", lambda *args: test_uuid)\n\n res = dev.batch_execute(circuits)\n\n # Correct order of results is expected\n assert np.allclose(res[0], test_batch_res0)\n assert np.allclose(res[1], test_batch_res1)\n assert np.allclose(res[2], test_batch_res2)\n file0_kept = os.path.exists(tmpdir.join(f\"expval-{test_uuid}-0.yaml\"))\n file1_kept = os.path.exists(tmpdir.join(f\"expval-{test_uuid}-1.yaml\"))\n file2_kept = os.path.exists(tmpdir.join(f\"expval-{test_uuid}-2.yaml\"))\n\n # Check that workflow files were either all kept or all deleted\n files_kept = file0_kept and file1_kept and file2_kept\n assert files_kept and file0_kept if keep else not files_kept\n\n qml.disable_tape()", "def test_task_query_with_params(self):\r\n app = AppFactory.create()\r\n TaskFactory.create_batch(10, app=app)\r\n # Test for real field\r\n res = self.app.get(\"/api/task?app_id=1\")\r\n data = json.loads(res.data)\r\n # Should return one result\r\n assert len(data) == 10, data\r\n # Correct result\r\n assert data[0]['app_id'] == 1, data\r\n\r\n # Valid field but wrong value\r\n res = self.app.get(\"/api/task?app_id=99999999\")\r\n data = json.loads(res.data)\r\n assert len(data) == 0, data\r\n\r\n # Multiple fields\r\n res = self.app.get('/api/task?app_id=1&state=ongoing')\r\n data = json.loads(res.data)\r\n # One result\r\n assert len(data) == 10, data\r\n # Correct result\r\n assert data[0]['app_id'] == 1, data\r\n assert data[0]['state'] == u'ongoing', data\r\n\r\n # Limits\r\n res = self.app.get(\"/api/task?app_id=1&limit=5\")\r\n data = json.loads(res.data)\r\n for item in data:\r\n assert item['app_id'] == 1, item\r\n assert len(data) == 5, data", "def test_multiple_batch(sdc_builder, sdc_executor, azure, number_batches):\n container_name = get_random_string(string.ascii_lowercase, 10)\n event_hub_name = get_random_string(string.ascii_lowercase, 10)\n\n number_records = 1000\n\n builder = sdc_builder.get_pipeline_builder()\n\n azure_iot_event_hub_consumer = builder.add_stage(name=AZURE_IOT_EVENT_HUB_STAGE_NAME).set_attributes(\n container_name=container_name,\n data_format='JSON',\n event_hub_name=event_hub_name)\n\n wiretap = builder.add_wiretap()\n\n azure_iot_event_hub_consumer >> wiretap.destination\n\n consumer_origin_pipeline = builder.build().configure_for_environment(azure)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n create_blob_container(azure, container_name)\n\n try:\n eh_service_bus = azure.event_hubs.service_bus\n\n logger.info('Creating event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n assert eh_service_bus.create_event_hub(event_hub_name)\n\n total_send_records = []\n for i in range(0, number_records):\n send_records = [{'Body': get_random_string(string.ascii_lowercase, 10)}]\n total_send_records.append(send_records)\n eh_service_bus.send_event(event_hub_name, json.dumps(send_records))\n\n sdc_executor.start_pipeline(consumer_origin_pipeline)\n sdc_executor.wait_for_pipeline_metric(consumer_origin_pipeline, 'data_batch_count', number_batches,\n timeout_sec=120)\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n records = [record.field for record in wiretap.output_records]\n items = [r[0].items() for r in records]\n assert len(items) >= number_batches\n\n batches = wiretap.batches\n len_records_in_batches = 0\n for bat in batches:\n len_records_in_batches = len_records_in_batches + len(bat)\n assert len(items) == len_records_in_batches\n\n finally:\n try:\n if sdc_executor.get_pipeline_status(consumer_origin_pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n except Exception as err:\n logger.error('Could not stop pipeline. Reason found: %s', err)\n\n try:\n logger.info('Deleting event hub %s under event hub namespace %s', event_hub_name, azure.event_hubs.namespace)\n event_hub_exists = True\n while event_hub_exists:\n eh_service_bus.delete_event_hub(event_hub_name)\n try:\n eh_service_bus.get_event_hub(event_hub_name)\n except Exception:\n event_hub_exists = False\n except Exception as err:\n logger.error('Failure deleting event hub %s. Reason found: %s', event_hub_name, err)\n\n try:\n logger.info('Deleting container %s on storage account %s', container_name, azure.storage.account_name)\n azure.storage.delete_blob_container(container_name)\n except Exception as err:\n logger.error('Failure deleting container %s. Reason found: %s', container_name, err)", "def test_data_source_soaps_find_one_get(self):\n pass", "def test_after_batch_injection(self):\n intkey_batch1 = create_batch(self.signer, [('inc', 'abcd', 30)])\n intkey_batch2 = create_batch(self.signer, [('inc', 'abcd', 40)])\n intkey_batch3 = create_batch(self.signer, [('inc', 'abcd', 50)])\n batches = create_batch_list(\n [intkey_batch1, intkey_batch2, intkey_batch3])\n\n post_batch(batches)\n\n # Assert injected batches are after each intkey transaction\n # get last committed block (first from the list)\n last_block = get_blocks()[0]\n\n family_name = get_family_from(last_block['batches'][0])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][0]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][1])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][1]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][2])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][2]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][3])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][3]))\n self.assertEqual(payload.get('Name'), 'inject')\n\n family_name = get_family_from(last_block['batches'][4])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][4]))\n self.assertEqual(payload.get('Name'), 'abcd')\n family_name = get_family_from(last_block['batches'][5])\n self.assertEqual(family_name, 'intkey')\n payload = decode(get_payload_from(last_block['batches'][5]))\n self.assertEqual(payload.get('Name'), 'inject')", "async def test_get_all(self):\n await self.collection.create({'id': 'foo', 'token': 'foo:bar'})\n await self.collection.create({'id': 'baz', 'token': 'baz:qux'})\n expected = (\n {'id': 'baz', 'username': 'baz'},\n {'id': 'foo', 'username': 'foo'})\n self.assertEqual(expected, await self.resource.get_all())", "def testLoadTestRequestsMultipleUsers(self):\n user_list = ['alice', 'bob', 'charles']\n def sendRequestExpect200():\n for user in user_list:\n response = requests.get(\"http://localhost:%d/weather/%s\" % (self.port_number, user))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.text, 'cloudy')\n # Subscribe all users to weather updates so that messages\n # are persisted when posted.\n for user in user_list:\n response = requests.post(\"http://localhost:%d/weather/%s\" % (self.port_number, user), data='')\n self.assertEqual(response.status_code, 200)\n # Check that server stays up when subjected to requests from multiple users.\n self.runMultipleRequests(50, sendRequestExpect200)", "async def test_17() -> None:\n LOG.debug(\"Test post query (request two controlled, having access to one)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 9,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome:controlled\", \"urn:hg:1000genome:controlled1\"],\n \"includeDatasetResponses\": \"HIT\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is True, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 2, sys.exit(\"Should be able to retrieve both requested.\")", "def test_get_next_to_arrive(self, mock_requests):\n\n r = services.get_next_to_arrive(self.a, self.b)\n params = {'req1': self.a, 'req2': self.b}\n\n self.assertTrue(\n mock.call.get(services.SEPTA_NEXTTOARRIVE_URL, params=params) in\n mock_requests.mock_calls)", "def batched(self) -> bool:\n return False", "async def test_25() -> None:\n LOG.debug(\"Test query for targeting three datasets, using ALL. (expect data shown)\")\n payload = {\n \"referenceName\": \"MT\",\n \"start\": 10,\n \"referenceBases\": \"T\",\n \"alternateBases\": \"C\",\n \"assemblyId\": \"GRCh38\",\n \"datasetIds\": [\"urn:hg:1000genome\", \"urn:hg:1000genome:controlled\", \"urn:hg:1000genome:registered\"],\n \"includeDatasetResponses\": \"ALL\",\n }\n headers = {\"Authorization\": f\"Bearer {TOKEN}\"}\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\"http://localhost:5050/query\", data=json.dumps(payload)) as resp:\n data = await resp.json()\n assert data[\"exists\"] is False, sys.exit(\"Query POST Endpoint Error!\")\n assert len(data[\"datasetAlleleResponses\"]) == 3, sys.exit(\"Should be able to retrieve data for all datasets.\")", "def test_multiple_commands_at_same_time(self):" ]
[ "0.65004903", "0.63475984", "0.61924875", "0.6180004", "0.6172568", "0.6104523", "0.60755324", "0.60726875", "0.60234624", "0.5951735", "0.5937699", "0.59353304", "0.5891406", "0.588716", "0.5864232", "0.58354574", "0.5834128", "0.5820583", "0.58080596", "0.57994694", "0.57958084", "0.5749813", "0.5746214", "0.5713961", "0.57056874", "0.5702881", "0.56788063", "0.5667716", "0.5660455", "0.5623676", "0.5620624", "0.5616352", "0.55875754", "0.5581955", "0.5566137", "0.5546914", "0.5544338", "0.5543954", "0.5538419", "0.5534179", "0.55161196", "0.55139095", "0.5506968", "0.5500643", "0.549846", "0.54946834", "0.5486677", "0.54743594", "0.5468425", "0.5468379", "0.54574084", "0.54422665", "0.54303014", "0.54266894", "0.54115254", "0.54114", "0.54043525", "0.53997624", "0.5397489", "0.5388597", "0.5387577", "0.5386434", "0.5385187", "0.5384187", "0.53803945", "0.53791136", "0.5367493", "0.5364593", "0.5364231", "0.536255", "0.53574294", "0.5339869", "0.5337517", "0.53372073", "0.53358895", "0.53330195", "0.53295076", "0.53273845", "0.53247035", "0.53217846", "0.53172195", "0.53169674", "0.53096426", "0.5308769", "0.5306515", "0.5306451", "0.5299255", "0.529817", "0.5297766", "0.52958685", "0.52928585", "0.52918106", "0.52912456", "0.5290568", "0.5288586", "0.5284802", "0.528281", "0.52788615", "0.5272442", "0.526941" ]
0.6140659
5
Write a function that adds 2 integers.
def add_integer(a, b=98): if not isinstance(a, (int, float)): raise TypeError('a must be an integer') if not isinstance(b, (int, float)): raise TypeError('b must be an integer') if type(a) or type(b) is float: a, b = int(a), int(b) return a + b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_numbers(a: int, b: int) -> int:\n return a + b", "def add_numbers(x,y):\n return x + y", "def add_numbers(a,b):\r\n return a+ b", "def add_numbers(x, y):\n return x + y", "def add_numbers(x, y):\r\n return x + y", "def add_ints(num1, num2):\n print(int(num1) + int(num2))", "def add(n1, n2):\n return n1 + n2", "def add(n1, n2):\n return n1 + n2", "def add_two_numbers(number1, number2):\n number3 = number1 + number2\n return number3", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def add(num1, num2):\n return num1 + num2", "def addition(value1, value2):\n if not isinstance(value1, int) or not isinstance(value2, int):\n raise TypeError(\"Arguments must be integers\")\n\n return value1 + value2", "def sum(num_1, num_2):\n return num_1 + num_2", "def add(first, second):\n return first + second", "def add(num1, num2):\n sum = num1 + num2\n return sum", "def addition(number1, number2):\n return number1 + number2", "def sum_num(a, b):\n return a + b", "def sum(num1, num2):\n return num1 + num2", "def plus_two(x):\n return x + 2", "def suma(a, b) -> int:\n return a+b", "def addition(a, b):\n pass", "def add(num1, num2):\n\n sums = num1 + num2\n return sums", "def addNum(num1, num2):\n return num1 + num2", "def addition(a, b):\n return a + b", "def add_numbers(first_number, second_number):", "def add_2(number: float):\n return number + 2", "def sum(self, a, b):\n return int(a) + int(b)", "def add( a, b ):\n return a + b", "def addition(a,b):\n return a+b", "def add(left: int, right: int) -> int:\n\n return left + right", "def add(x, y):\n\n return x + y", "def sum(a, b):\n return a + b", "def sum(a, b):\n return a + b", "def add(x, y):\n sum = 0\n sum = x + y\n return sum", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def add(a, b):\n return a+b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def addition(a, b):\r\n\r\n result = a + b\r\n return result", "def func(num1, num2) :\n return num1 + num2", "def add(a,b):\n return a + b", "def add(a,b):\r\n return a+b", "def sum_nums(n1=0, n2=0):\n return n1 + n2", "def add(*args):\n\n result = int(args[0]) + int(args[1])\n\n return str(result)", "def add(a,b):\n\treturn a+b", "def add(a, b):\n result = a+b\n return result", "def my_addition(number1, number2):\n return number1 + number2", "def sum_2_num(num1, num2):\n result = num1 + num2\n # print(\"%d + %d = %d\" % (num1, num2, result))\n return result", "def addition(x, y):\n\n if isinstance(x, int) and isinstance(y, int):\n return x + y\n else:\n return (\"Invalid type.\")", "def sum1(a,b):\n c = a + b\n return c", "def add(a: int, b: int) -> int:\n if a == 0:\n return b\n elif b == 0:\n return a\n else: return add(a-1, b+1)", "def add(a,b):\r\n result=a+b\r\n return result", "def add_integer(a, b):\n if isinstance(a, (int, float)) and isinstance(b, (int, float)):\n return (int(a) + int(b))\n else:\n if not isinstance(a, (int, float)):\n raise TypeError(\"a must be an integer\")\n if not isinstance(b, (int, float)):\n raise TypeError(\"b must be an integer\")", "def sum_num(n1=2, n2=4):\n return n1 + n2", "def sum(a,b):\r\n c=a+b\r\n return c", "def add_integer(a, b=98):\n z = [a, b]\n s = ['a', 'b']\n\n for i in range(2):\n if type(z[i]) is float and type(z[i]) is not int:\n z[i] = int(z[i])\n elif type(z[i]) is not int:\n raise TypeError(\"{} must be an integer\".format(s[i]))\n return(z[0] + z[1])", "def addition(self, first_value, second_value):\n return first_value + second_value", "def add_integer(a, b=98):\n if type(a) not in (int, float):\n raise TypeError(\"a must be an integer\")\n elif type(b) not in (int, float):\n raise TypeError(\"b must be an integer\")\n return int(a) + int(b)", "def suma(x, y):\n return x + y", "def add_integer(a, b=98):\n\n if not isinstance(a, (int, float)):\n raise TypeError(\"a must be an integer\")\n elif not isinstance(b, (int, float)):\n raise TypeError(\"b must be an integer\")\n else:\n return int(a) + int(b)", "def add(num1,num2):\n if(num2==0):\n return num1\n return add((num1^num2),(num1&num2)<<1)", "def add_integer(a, b=98):\n\n if not (isinstance(a, int) or isinstance(a, float)):\n raise TypeError(\"a must be an integer\")\n\n if not (isinstance(b, int) or isinstance(b, float)):\n raise TypeError(\"b must be an integer\")\n\n return (int(a) + int(b))", "def add(self, a, b):\n return a + b", "def add_integer(a, b=98):\n if type(a) is not int:\n if type(a) is float:\n a = int(a)\n else:\n raise TypeError(\"a must be an integer\")\n if type(b) is not int:\n if type(b) is float:\n b = int(b)\n else:\n raise TypeError(\"b must be an integer\")\n return (a + b)", "def add_integer(a, b=98):\n\n if not isinstance(a,(int, float)):\n raise TypeError(\"a must be an integer\")\n elif not isinstance(b,(int, float)):\n raise TypeError(\"b must be an integer\")\n\n return int(a) + int(b)", "def add_integer(a, b=98):\n if not isinstance(a, int) and not isinstance(a, float):\n raise TypeError(\"a must be an integer\")\n if not isinstance(b, int) and not isinstance(b, float):\n raise TypeError(\"b must be an integer\")\n return int(a) + int(b)", "def add(a,b):\n s = a+b\n return s", "def add_integer(a, b=98):\n if type(a) != int and type(a) != float:\n raise TypeError(\"a must be an integer\")\n if type(b) != float and type(b) != int:\n raise TypeError(\"b must be an integer\")\n else:\n return int(a) + int(b)", "def add_integer(a, b=98):\n\n if not(isinstance(a, int) or isinstance(a, float)):\n raise TypeError(\"a must be an integer\")\n if not(isinstance(b, int) or isinstance(b, float)):\n raise TypeError(\"b must be an integer\")\n return int(a) + int(b)", "def plus(self, a, b):\n return a + b", "def add_integer(a, b=98):\n if type(a) is not int and type(a) is not float:\n raise TypeError('a must be an integer')\n if type(b) is not int and type(b) is not float:\n raise TypeError('b must be an integer')\n res = a + b\n if res < 0:\n res = -res\n if res == float('inf'):\n raise ValueError('Float overflow')\n return int(a) + int(b)", "def add_integer(a, b=98):\n if type(a) != int and type(a) != float:\n raise TypeError(\"a must be an integer\")\n if type(b) != int and type(b) != float:\n raise TypeError(\"b must be an integer\")\n if type(a) == float:\n a = int(a)\n if type(b) == float:\n b = int(b)\n return a + b", "def sum(a,b):\n return a*b", "def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n result = ListNode(0)\n node = result\n carry = 0\n while True:\n if not l1 is None:\n x = l1.val\n l1 = l1.next\n l1_n = True\n else:\n x = 0\n l1_n = False\n if not l2 is None:\n y = l2.val\n l2 = l2.next\n l2_n = True\n else:\n y = 0\n l2_n = False\n res = x + y + carry\n if res == 0 and not l1_n and not l2_n:\n return result.next\n if res > 9:\n res = res % 10\n carry = 1\n else:\n carry = 0\n\n addNode(node, res)\n node = node.next", "def add_integer(a, b=98):\n handle_a = isinstance(a, (int, float))\n handle_b = isinstance(b, (int, float))\n if handle_a is False or handle_b is False:\n raise TypeError(\"{} must be an integer\"\n .format(\"a\" if handle_a is False else \"b\"))\n else:\n return int(a) + int(b)", "def ADD (self, n1, n2):", "def func(arg1, arg2):\n return arg1 + arg2", "def add(iter_1, iter_2):\n return tuple(int(a + b) for a, b in zip(iter_1, iter_2))" ]
[ "0.8246698", "0.8066365", "0.80515903", "0.8043867", "0.8028387", "0.8026784", "0.79746944", "0.79707396", "0.7965914", "0.79455554", "0.79455554", "0.79455554", "0.79455554", "0.79455554", "0.7943379", "0.79035944", "0.78871435", "0.7832339", "0.7813779", "0.7810564", "0.7802507", "0.779617", "0.7794414", "0.7769373", "0.7754216", "0.7734738", "0.7734638", "0.7734328", "0.7704479", "0.7702261", "0.7701941", "0.7697691", "0.76959467", "0.76904714", "0.7669047", "0.7668638", "0.7668638", "0.7661548", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.76565236", "0.7638256", "0.7638256", "0.7634697", "0.7612629", "0.7612629", "0.7612629", "0.7612629", "0.7612629", "0.7612629", "0.76046395", "0.759376", "0.75897646", "0.7587503", "0.7515674", "0.75083727", "0.74794096", "0.7446067", "0.7436075", "0.743241", "0.7429436", "0.74162394", "0.74125105", "0.7399798", "0.73964286", "0.7389642", "0.7325527", "0.7276703", "0.7244712", "0.7239855", "0.7214475", "0.7192257", "0.71664864", "0.71622145", "0.7138094", "0.71373636", "0.71297646", "0.7121", "0.7103436", "0.7096713", "0.70880693", "0.70736253", "0.7058647", "0.70585525", "0.7049361", "0.7044859", "0.70256305", "0.69652176", "0.6961636", "0.69598544" ]
0.72065014
81
To test the list of contributing centers
def test_ls_contributing(self): sv = nao(gto=mol) pb = prod_basis() pb.sv = sv pb.sv.ao_log.sp2rcut[0] = 10.0 pb.prod_log = sv.ao_log pb.prod_log.sp2rcut[0] = 10.0 pb.ac_rcut = max(sv.ao_log.sp2rcut) pb.ac_npc_max = 10 lsc = pb.ls_contributing(0,1) self.assertEqual(len(lsc),10) lsref = [ 0, 1, 13, 7, 5, 43, 42, 39, 38, 10] for i,ref in enumerate(lsref) : self.assertEqual(lsc[i],ref)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_distribution_centers(self):\n pass", "def test_center(self):\n\n self.assertTrue((self.cs.center == np.array([[0], [0]])).all())", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def find_centroid_for_each(self):", "def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")", "def c_centers(self):\n self.compute_c_centers(self)\n return self._c_centers", "def centers(self):\n return self.xc, self.yc", "def centre_of_points(list_of_points):\n\n cp = np.average(list_of_points, axis=0)\n return cp", "def exact_centroids(self, ptclCoords, phaseSpaceList):\n\n translation1 = np.array([-1,1,-1,1])\n translation2 = np.array([-1,-1,-1,-1])\n translation3 = np.array([1,-1,1,-1])\n\n ptcl1 = ptclCoords * translation1\n ptcl2 = ptclCoords * translation2\n ptcl3 = ptclCoords * translation3\n\n phaseSpaceList.append(ptcl1)\n phaseSpaceList.append(ptcl2)\n phaseSpaceList.append(ptcl3)", "def centers(self):\n def make_centers(x):\n return 0.25 * (x[:-1, :-1] + x[:-1, 1:] + x[1:, :-1] + x[1:, 1:])\n\n polar_centers = make_centers(self.polar_corners)\n azimuthal_centers = make_centers(self.azimuthal_corners)\n\n assert azimuthal_centers.shape == polar_centers.shape\n return polar_centers, azimuthal_centers", "def xcenters(self):\n return self.centers[0]", "def test_set_center_coord():\r\n da = xr.DataArray(np.arange(2), dims=\"lead\", coords={\"lead\": [\"1-3\", \"2-4\"]})\r\n actual = _set_center_coord(da).lead_center.values\r\n expected = [2.0, 3.0]\r\n assert (actual == expected).all()", "def p_centers(self):\n self.compute_p_centers(self)\n return self._p_centers", "def atlas_clusters():\n pass", "def centers(self):\n return tuple(0.5 * (e[1:] + e[:-1]) for e in self.edges)", "def random_centers(k,):\n #centr = np.random.random((k, pos.shape[1]))\n return", "def gen_centers(self):\n\n \"\"\"x_track = self.cs.discrete_rollout()\n t = np.arange(len(x_track))*self.dt\n # choose the points in time we'd like centers to be at\n c_des = np.linspace(0, self.cs.run_time, self.n_bfs)\n self.c = np.zeros(len(c_des))\n for ii, point in enumerate(c_des):\n diff = abs(t - point)\n self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]\"\"\"\n\n # desired activations throughout time\n des_c = jnp.linspace(0, self.cs.run_time, self.n_bfs)\n\n self.c = np.ones(len(des_c))\n for n in range(len(des_c)):\n # finding x for desired times t\n self.c[n] = jnp.exp(-self.cs.ax * des_c[n])\n self.c = jnp.array(self.c)", "def test_get_center():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_center = get_center(center, radius, 25)\n\n assert returned_center.get_x() == center.get_x()\n assert returned_center.get_y() == center.get_y()", "def test_lattice_centroid():\n\n # Setup\n lattice = tg.lattice_from_csv(os.path.join(\n sample_data_path, \"rdam_lattice.csv\"))\n\n expected_centroids = tg.cloud_from_csv(\n os.path.join(sample_data_path, \"rdam_centroids.csv\"))\n\n # Exercise\n computed_centroids = lattice.centroids\n\n # Verify\n np.testing.assert_allclose(\n expected_centroids, computed_centroids, rtol=1e-6, atol=0)\n\n # Cleanup", "def test_get_center_of_mass(self):\n symbols = ['C', 'H', 'H', 'H', 'H']\n coords = np.array([[0.0000000, 0.0000000, 0.0000000],\n [0.6269510, 0.6269510, 0.6269510],\n [-0.6269510, -0.6269510, 0.6269510],\n [-0.6269510, 0.6269510, -0.6269510],\n [0.6269510, -0.6269510, -0.6269510]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n for cm_coord in center_of_mass:\n self.assertEqual(cm_coord, 0.0)\n\n symbols = ['O', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H']\n coords = np.array([[1.28706525, 0.52121353, 0.04219198],\n [0.39745682, -0.35265044, -0.63649234],\n [0.36441173, -1.68197093, 0.08682400],\n [-0.59818222, 0.10068325, -0.65235399],\n [0.74799641, -0.48357798, -1.66461710],\n [0.03647269, -1.54932006, 1.12314420],\n [-0.31340646, -2.38081353, -0.41122551],\n [1.36475837, -2.12581592, 0.12433596],\n [2.16336803, 0.09985803, 0.03295192]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, symbols=symbols)\n self.assertAlmostEqual(center_of_mass[0], 0.7201, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.4880, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.1603, 3)\n\n numbers = [6, 6, 8, 1, 1, 1, 1, 1, 1]\n coords = np.array([[1.1714680, -0.4048940, 0.0000000],\n [0.0000000, 0.5602500, 0.0000000],\n [-1.1945070, -0.2236470, 0.0000000],\n [-1.9428910, 0.3834580, 0.0000000],\n [2.1179810, 0.1394450, 0.0000000],\n [1.1311780, -1.0413680, 0.8846660],\n [1.1311780, -1.0413680, -0.8846660],\n [0.0448990, 1.2084390, 0.8852880],\n [0.0448990, 1.2084390, -0.8852880]], np.float64)\n center_of_mass = get_center_of_mass(coords=coords, numbers=numbers)\n self.assertAlmostEqual(center_of_mass[0], -0.0540, 3)\n self.assertAlmostEqual(center_of_mass[1], -0.0184, 3)\n self.assertAlmostEqual(center_of_mass[2], -0.0000, 3)", "def get_object_centers(data, north_offset, east_offset, drone_altitude, safety_distance):\n points = []\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n points.append([north - north_offset, east - east_offset])\n return points;", "def centroids(img):\n _, _, _, centr = cv2.connectedComponentsWithStats(img)\n return centr[1:]", "def findClosetCentroids(X, centroids):\n\tm, n = X.shape\n\tK = centroids.shape[0]\n\tidx = np.zeros(m) # m\n\n\tfor i in range(m):\n\t\ttemp = np.tile(X[i, :], K).reshape(centroids.shape)\n\t\tidx[i] = np.argmin(np.sum((centroids - temp) ** 2, axis=1))\n\treturn idx", "def nt_3d_centers(cif_file, consider_all_atoms):\n result =[]\n try:\n structure = MMCIFParser().get_structure(cif_file, cif_file)\n except Exception as e:\n warn(f\"\\n{cif_file.split('/')[-1]} : {e}\", error=True)\n with open(runDir + \"/errors.txt\", \"a\") as f:\n f.write(f\"Exception in nt_3d_centers({cif_file.split('/')[-1]})\\n\")\n f.write(str(e))\n f.write(\"\\n\\n\")\n return result\n for model in structure:\n for chain in model:\n for residue in chain:\n if consider_all_atoms:\n temp_list = []\n for atom in residue:\n temp_list.append(atom.get_coord())\n lg = len(temp_list)\n summ = np.sum(temp_list, axis = 0)\n res_isobaricentre = [summ[0]/lg, summ[1]/lg, summ[2]/lg]\n result.append([res_isobaricentre[0], res_isobaricentre[1], res_isobaricentre[2]])\n else:\n coordinates = None\n for atom in residue:\n if atom.get_name() == \"C1'\":\n coordinates = atom.get_coord()\n if coordinates is None:\n # Residue has no C1'\n res = np.nan\n else:\n res = [coordinates[0], coordinates[1], coordinates[2]]\n result.append(res)\n return(result)", "def __get_centers(data, clusters):\n\n centers = [[] for i in range(len(clusters))]\n dimension = len(data[0])\n\n for index in range(len(clusters)):\n point_sum = [0.0] * dimension\n\n for index_point in clusters[index]:\n point_sum = list_math_addition(point_sum, data[index_point])\n\n centers[index] = list_math_division_number(point_sum, len(clusters[index]))\n\n return centers", "def centers(pos):\n number_of_nodes = len(pos)\n ε, δ = 0.03, 0.1\n r = 1 / (ε**2) * (int(np.log2(number_of_nodes - 1)) + 1 + np.log(1/δ))\n k = int(np.sqrt(r))\n k = np.min((k, number_of_nodes))\n \n return k_means_pp(k, pos)", "def _identify_centroids( locations, medians ):\n log.info(\"Identifying the centroid of each amplicon\")\n min_pos = min([s for s, e in locations.itervalues()])\n max_pos = max([e for s, e in locations.itervalues()])\n mid_pos = (min_pos + max_pos) / 2\n five_prime, three_prime = _split_medians( medians, mid_pos )\n #five_prime_center = _calculate_centroid( five_prime )\n #three_prime_center = _calculate_centroid( three_prime )\n five_prime_center = (min_pos + mid_pos) / 2\n three_prime_center = (max_pos + mid_pos) / 2\n return (five_prime_center, three_prime_center)", "def test_constructor_with_points(self):\n points = np.array([\n [0.1, 0.1, 0.1],\n [1.1, 1.1, 1.1],\n [1.3, 1.2, 1.4]])\n vg = VoxelGrid(0.5, min_corner=Vector3f(0, 0, 0), points=points)\n centers = vg.voxel_centers()\n expected_centers = np.array([\n [0.25, 0.25, 0.25],\n [1.25, 1.25, 1.25]])\n np.testing.assert_array_almost_equal(centers, expected_centers)", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def get_clusters(self,points):\n self.points = points\n self.__dabest = [self.__cmeans(points,i) for i in range(self.__start,self.__end)]\n ##self.hull = \n return self.__dabest", "def newCenter(x, y, group, iteration, lastKSet1, lastKSet2):\n\tsumOneX = 0\n\tsumOneY = 0\n\tsumTwoX = 0\n\tsumTwoY = 0\n\tnumOne = 0\n\tnumTwo = 0\n\n\tfor i in range(len(group[iteration])):\n\t\tif (group[iteration][i] == 1):\n\t\t\tsumOneX += x[i]\n\t\t\tsumOneY += y[i]\n\t\t\tnumOne += 1\n\t\telse:\n\t\t\tsumTwoX += x[i]\n\t\t\tsumTwoY += y[i]\n\t\t\tnumTwo += 1\n\n\tif(numOne == 0):\n\t\tkSet1 = lastKSet1\n\tif(numTwo == 0):\n\t\tkSet2 = lastKSet2\n\telse:\n\t\tkSet1 = [sumOneX/numOne, sumOneY/numOne]\n\t\tkSet2 = [sumTwoX/numTwo, sumTwoY/numTwo]\n\n\treturn (kSet1, kSet2)", "def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)", "def center_of_coor(coordinates):\n return (np.sum(coordinates, axis=0) / coordinates.shape[0])", "def cluster_centers(X, k, bounds):\n\n mu = random_plane_points(k, bounds)\n kmeans = sklearn.cluster.KMeans(init=mu)\n kmeans.fit(X)\n\n # Restart if a mean has no associated points\n if len(set(kmeans.labels_)) < k:\n return cluster_centers(X, k, bounds)\n\n return kmeans.cluster_centers_", "def compute_centers(points, labels):\n df = pd.DataFrame(points)\n return df.groupby(labels).mean().values", "def _select_centers(self, X):\n random_args = np.random.choice(len(X), self.hidden_shape)\n centers = X[random_args]\n return centers", "def _expectation(self, centroids, x_assignee):\n\n\n\t\t# Assign data plot to each cluster\n\t\tr_list = np.array([], dtype= np.int)\n\n\t\tN, D = x_assignee.shape\n\n\t\tfor i in np.ndindex(x_assignee.shape[0]):\n\t\t\tr = np.zeros(self._k, dtype=np.int)\n\t\t\tdist = np.linalg.norm(x_assignee[i] -centroids, axis = 1)\n\t\t\tr[np.argmin(dist)]=1\n\t\t\tr_list = np.append(r_list, r)\n\t\tr_list = r_list.reshape((-1, self._k))\n\t\n\t\treturn r_list", "def centroid(coords,masses,divider):\n\treturn np.array([np.dot(masses[r].T,coords[r])/np.sum(masses[r]) for r in divider])", "def find_center(\n x,\n centers,\n distance,\n):\n return np.argmin(\n [distance(x, centers[i, :]) for i in range(centers.shape[0])])", "def get_cell_centroid2(cents, extents):\n cells_in_ee = np.empty(0,int)\n for i in range(len(cents)):\n c = cents[i]\n if( (c > extents[0]).all() and (c <= extents[1]).all() ):\n cells_in_ee = np.append(cells_in_ee, [i], axis=0)\n\n return cells_in_ee", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def init_centroids(self, data_points):\n # print(\"Init centroid\")\n # return list(map(lambda x: x[1], random.sample(labelled_data, self.k)))\n\n # Project the data: this step will take several seconds\n\n centroids_scaled = self.naive_sharding(data_points, self.k)\n return list(centroids_scaled)\n\n #sample = np.random.permutation(len(labelled_data))[:self.k]\n\n #return list(map(lambda x: labelled_data[x][1], sample))", "def computeCenters3d(self, data):\n\n\n for i in range(self.nPoints):\n print(\"Label of point \", i, \" is \", self.labels[i])\n for j in range(3):\n self.centers[self.labels[i]][j] += data[i][j]\n\n for c in range(self.n):\n for j in range(3):\n self.centers[c][j] /= self.tots[c]", "def assign_centroids(data, centroids) :\r\n distances = []\r\n for centroid in centroids :\r\n distances.append(np.linalg.norm(data - centroid, axis=1))\r\n distances = np.column_stack(distances)\r\n assigned_centroids = np.argmin(distances, axis=1).flatten()\r\n return assigned_centroids", "def randCent(data,k):\n index = set()\n while len(index) != k:\n index.add(random.randint(0, data.shape[0]))\n index = list(index)\n centroids = data[index]\n return centroids", "def populateCenters(matrix, row, col, frame, midRange, roughness, perturbance):\n maxIndex = matrix.shape[0]-1\n quarterRange = midRange/2\n\n pf = perturbanceFactor(matrix.shape[0], midRange, perturbance)\n noiseLevel = roughness * pf\n\n \"\"\"\n For each subdivided cube, getIndexRef is used to get the indicies, and center is used\n to determine the points that should be averaged and the point to be set. \n setValue does the calculations.\n \"\"\"\n indexRef = getIndexRef(row, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n indexRef = getIndexRef(row + midRange, col + midRange, frame + midRange, quarterRange, maxIndex)\n setValue(matrix, center, indexRef, noiseLevel)\n \n\n #printAllowCancel(matrix)", "def get_geom_center(coordlist):\n return sum(coordlist) / len(coordlist)", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy", "def Test_GenerateClusters(numClusters, pts_minmax=(10, 100), x_mult=(1, 4), y_mult=(1, 3), x_off=(0, 50), y_off=(0, 50)):\n\n # Initialize some empty lists to receive cluster member positions\n testClustersx = []\n testClustersy = []\n # Genereate random values given parameter ranges\n n_points = np.random.randint(pts_minmax[0], pts_minmax[1], numClusters)\n x_multipliers = np.random.randint(x_mult[0], x_mult[1], numClusters)\n y_multipliers = np.random.randint(y_mult[0], y_mult[1], numClusters)\n x_offsets = np.random.randint(x_off[0], x_off[1], numClusters)\n y_offsets = np.random.randint(y_off[0], y_off[1], numClusters)\n\n # Generate random clusters given parameter values\n for idx, npts in enumerate(n_points):\n xpts = np.random.randn(npts) * x_multipliers[idx] + x_offsets[idx]\n ypts = np.random.randn(npts) * y_multipliers[idx] + y_offsets[idx]\n testClustersx.append(xpts)\n testClustersy.append(ypts)\n\n # Convert to a single dataset in OpenCV format\n testClusters = np.float32((np.concatenate(testClustersx), np.concatenate(testClustersy))).transpose()\n\n # Return cluster positions\n return testClusters, testClustersx, testClustersy", "def cluster(ops, seeds, mut):\n\n old_centers = []\n centers = copy.deepcopy(seeds)\n\n while (set(centers) != set(old_centers)):\n\n old_centers = copy.deepcopy(centers)\n centers = []\n groups = grouping(old_centers, ops, mut)\n\n for i in range(len(groups)):\n result = group_evaluation(groups[i], mut)\n centers.append(result)\n\n return centers", "def get_viable_pos_cluster_centers(self, index, radius, cutoff, num_pts=None):\n\n viable_pos = self.get_viable_positions(index, radius, cutoff, num_pts)\n ms = MeanShift(bin_seeding=True)\n ms.fit(np.array(viable_pos))\n cluster_centers = ms.cluster_centers_\n return cluster_centers", "def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center", "def compute_centers(landmarks):\n b = landmarks.shape[0]\n lms = landmarks.reshape((b, -1, 3))\n\n eye_left_centers = lms[:, EYE_LEFT_CONTOUR, :2].mean(axis=1)\n eye_right_centers = lms[:, EYE_RIGHT_CONTOUR, :2].mean(axis=1)\n mouth_centers = lms[:, MOUTH_INNER_CONTOUR, :2].mean(axis=1)\n\n a = np.concatenate((eye_left_centers, eye_right_centers, mouth_centers), axis=1)\n\n return a", "def assign_k_clusters(data, centers):\n clusters = []\n center_data = np.take(data, centers, axis=0)\n best_center = np.argmax(center_data, axis=0)\n for i in range(len(centers)):\n inds = [ind for ind in np.where(best_center == i)[0]]\n clusters.append(inds)\n return clusters", "def get_labelled_centers(image):\n\n # Find all labelled areas, disable caching so properties are only calculated if required\n rps = measure.regionprops(image, cache = False)\n\n return [(r.label, r.centroid) for r in rps]", "def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.", "def __eq__(self, clst):\n return self.centroid == clst.centroid", "def get_component_centers(atoms, unwrap=False):\n if unwrap:\n atoms = unwrap_atoms_from_cell(atoms)\n pos = atoms.get_positions()\n masses = atoms.get_masses()\n components = get_connected_components(pos, atoms.get_chemical_symbols())\n centers = []\n for c in components:\n centers.append(np.dot(masses[c], pos[c]) / masses[c].sum())\n return np.array(centers)", "def test_centeredEquation(self):\n\n A33, K = self.cs.centeredEquation\n self.assertTrue((self.A33 == A33).all())\n self.assertEqual(K, 1.)", "def center(self):\n return [self.position[i]+self.radius for i in range(2)]", "def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters", "def get_patch_centers(self):\n rows, cols = np.where(self.inv_overlap_mask)\n patch_centers = tuple(zip(rows, cols))\n # diff = np.diff(patch_centers)\n # ind_stop_cont = np.where(np.abs(np.diff(np.reshape(diff, diff.shape[0]))) > 1)[0][0]\n self.patch_centers = patch_centers[:: self.sampling_int // 2]\n print(\"# of samples: {}\".format(len(self.patch_centers)))", "def findCentroid(pivlist):\n centroid = [0.0, 0.0, 0.0]\n for p in pivlist:\n centroid[0] += p[0]\n centroid[1] += p[1]\n centroid[2] += p[2]\n\n centroid[0] /= len(pivlist)\n centroid[1] /= len(pivlist)\n centroid[2] /= len(pivlist)\n return centroid", "def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num", "def centroids(self):\n return self.mdm_.covmeans_", "def calcCentroids(data_points, clusters):\n #initiate empty list for the new centroids\n newCentroids = []\n\n #For position in each cluster, calculate the average for each position\n #The lists are zipped so each position can have an average\n for c in clusters:\n newCentroids.append(map(calcAverage, zip(*c)))\n\n #This is the check that a centroid is not empty. If a centroid is empty,\n #delete it, the filled centroids are added to the new list\n correctCentroid = []\n for centroid in newCentroids:\n #If centroid is not empty\n if centroid:\n correctCentroid.append(centroid)\n\n return len(correctCentroid), correctCentroid", "def centroid(self, region_list):\n centroid_list = [] # a list of [(distance from robot, centroid)]\n robot = map_helper.map_to_world(self.start[0], self.start[1], self.resolution, self.x_offset, self.y_offset)\n\t#rospy.loginfo(region_list)\n for region in region_list:\n n = len(region)\n i = math.trunc(n/2)\n centroid = region[i]\n\n x = abs(centroid[0] - robot[0])\n y = abs(centroid[1] - robot[1])\n dist = math.hypot(x, y)\n centroid_list.append((dist, centroid))\n return self.smallest_centroid(centroid_list)", "def check_clusters (centers, threshold=30.) :\n ones = numpy.ones(nclusters)\n distances = get_distance(ones*centers.reshape((nclusters,1)), ones*centers.reshape((1,nclusters)))\n distances = distances[numpy.triu(distances)!=0]\n return (abs(distances-120)).max() <= threshold", "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1", "def test_get_distribution_center_orders(self):\n pass", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def test_pcca_1():\n n = 1000 #number of data points\n kk = 3 #number of points where data accumulates\n k = 10 #number of cluster_centers\n factor = 0.1 #how much is the data perturbed\n data = np.zeros((n,1))\n for i in range(0,n):\n data[i] = i % kk + factor * np.random.rand() * math.pow(-1,int(2*np.random.rand()))\n #plt.scatter(data[:,0],np.zeros((n,1)))\n \n clustering = cl.KMeans(data,k)\n cluster_centers = clustering.cluster_centers\n cluster_labels = clustering.cluster_labels\n \n #plt.scatter(cluster_centers[:],np.zeros((k,1)),c='r')\n \n estimator = est.Estimator(cluster_labels, 1, 1)\n matrix = estimator.reversible_transition_matrix\n msm = ana.MarkovStateModel(matrix)\n \n n_pcca_states = 4;\n #fig, ax = plt.subplots(figsize=(6.5, 5))\n pcca_labels = msm.metastable_set_assignments(n_pcca_states)\n #im = ax.scatter(cluster_centers[:, 0], np.zeros((k,1)), c=pcca_labels, s=200)\n #cbar = fig.colorbar(im, ax=ax)\n error = 0;\n for j in range(0,kk):\n for i in range(0,k):\n if (round(cluster_centers[i,0]) == j):\n test = i\n for i in range(0,k):\n if (np.abs(cluster_centers[i,0] - cluster_centers[test,0]) < 2*factor):\n if (not pcca_labels[i] == pcca_labels[test]):\n error = 1\n print(error)\n assert_true(error == 0)", "def _evaluate_centroids(self):\n\n for c in self.centroids:\n _prev_cent = self._prev_centroids[c]\n _curr_cent = self.centroids[c]\n\n if self._euclidean_distance(_prev_cent, _curr_cent) > self.tol:\n return\n self._optimized = True", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def get_centroids(self):\n if not self._learned:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def getBeliefsCentroid(self, idx):\n x = 0.0\n y = 0.0\n total = 0.0\n for p in self.beliefs[idx]:\n x += p[0]\n y += p[1]\n total += 1.0\n return (round(x / total), round(y / total))", "def get_centroids(self):\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def get_centroids(self):\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid", "def __get_initial_centroid(self, x_train, seed=None):\n if self.init == \"random\":\n # randomly select n_cluster point from the input dataset\n if seed:\n random.seed(seed)\n return np.asarray(random.choices(x_train, k=self.n_cluster))", "def test_main_on_cluster(self):\r\n\r\n command = \" \".join([\"denoiser.py\",\r\n \"--force\", \"-o\", self.test_dir, \"-c\", \"-n\", \"2\",\r\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME,\r\n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME])\r\n\r\n result = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT).stdout.read()\r\n self.result_dir = self.test_dir\r\n\r\n observed = \"\".join(list(open(self.result_dir + \"centroids.fasta\")))\r\n self.assertEqual(observed, self.expected)", "def is_center(self):\n if self.pupils_located:\n return self.is_right() is not True and self.is_left() is not True", "def test_centroids_mask():\n data = np.ones((2, 2)).astype(np.float)\n mask = [[False, False], [True, True]]\n centroid = centroid_com(data, mask=None)\n centroid_mask = centroid_com(data, mask=mask)\n assert_allclose([0.5, 0.5], centroid, rtol=0, atol=1.e-6)\n assert_allclose([0.5, 0.0], centroid_mask, rtol=0, atol=1.e-6)", "def private_centers(\n data, norm, epsilon, delta\n):\n sigma = np.sqrt(2 * np.log(1.25 / delta)) / epsilon\n n, d = data.shape\n return np.mean(data, 0) + norm / n * np.random.normal(0, sigma, d)", "def ycenters(self):\n return self.centers[1]", "def get_center_of_mass_allies(self,obs):", "def _get_center_pos(self):\n if not hasattr(self, 'lon_center'):\n raise ValueError('ERROR: You need to specify first the center position!')\n d = np.abs((self.x.lon - self.lon_center) ** 2. + (self.x.lat - self.lat_center) ** 2.)\n dmin = d.min()\n m = d == dmin\n\n idx = np.indices(d.shape)\n i = idx[0][m][0]\n j = idx[1][m][0]\n\n if (np.abs(1. - self.x.lon[i, j] / self.lon_center) > 0.05) or (np.abs(1. - self.x.lat[i, j] / self.lat_center) > 0.05): # at least 5% acc.\n print 'lon: ', self.x.lon[i, j], self.lon_center\n print 'lat: ', self.x.lat[i, j], self.lat_center\n i = None\n j = None\n return i, j", "def getcenter(self):\n return self.centro.cartesianas()", "def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)", "def calculate_cluster_center(self, threshold):\n gamma = self.gamma\n self.cluster_center = np.where(gamma >= threshold)[0]", "def findcenters(image, diameter = 10, clip_limit=0.04, threshold = 0.2):\n def close_divsor(n, m):\n n = int(n)\n m = int(m)\n if m > n:\n return n\n while n % m:\n m += 1\n return m\n\n image = norm(image)\n kernel_size = (close_divsor(image.shape[0], 2*diameter), close_divsor(image.shape[1], 2*diameter))\n image = equalize_adapthist(image, kernel_size=kernel_size, clip_limit=clip_limit)\n image = norm(image)\n coords = blob_log(image, min_sigma=diameter/2.335, max_sigma=diameter/2.335*2, num_sigma=10, threshold=threshold)\n return array([x[:2].astype(int) for x in coords if x[0] > diameter/2 and x[1] > diameter/2 and image.shape[0] - x[0] > diameter/2 and image.shape[1] - x[1] > diameter/2])", "def test_centroids_from_polygon_data(self):\n\n for vectorname in ['kecamatan_jakarta_osm.shp',\n 'OSM_subset.shp']:\n\n # Read and verify test data\n filename = '%s/%s' % (TESTDATA, vectorname)\n p_layer = read_layer(filename)\n p_geometry = p_layer.get_geometry()\n p_attributes = p_layer.get_data()\n N = len(p_layer)\n assert FEATURE_COUNTS[vectorname] == N\n\n # Read reference centroids generated by Qgis\n filename = '%s/%s' % (TESTDATA, vectorname[:-4] + '_centroids.shp')\n r_layer = read_layer(filename)\n r_geometry = r_layer.get_geometry()\n r_attributes = r_layer.get_data()\n assert len(r_layer) == N\n\n # Compute centroid data\n c_layer = convert_polygons_to_centroids(p_layer)\n assert len(c_layer) == N\n c_geometry = c_layer.get_geometry()\n c_attributes = c_layer.get_data()\n\n # Check that attributes are the same\n for i in range(N):\n p_att = p_attributes[i]\n c_att = c_attributes[i]\n r_att = r_attributes[i]\n for key in p_att:\n assert key in c_att\n assert c_att[key] == p_att[key]\n\n assert key in r_att\n assert c_att[key] == r_att[key]\n\n # Check that coordinates are the same up to machine precision\n for i in range(N):\n c_geom = c_geometry[i]\n r_geom = r_geometry[i]\n\n assert numpy.allclose(c_geom, r_geom,\n rtol=1.0e-8, atol=1.0e-12)\n\n # Check that each centroid fall within its polygon\n for i in range(N):\n point = c_geometry[i]\n polygon = p_geometry[i]\n assert is_inside_polygon(point, polygon, closed=False)\n\n # Write to file (for e.g. visual inspection)\n out_filename = unique_filename(prefix='centroid', suffix='.shp')\n #print 'writing to', out_filename\n c_layer.write_to_file(out_filename)", "def points_on_circumference(center=(0, 0), r=50, n=100):\n\treturn [\n (\n center[0]+(cos(2 * pi / n * x) * r), \n center[1] + (sin(2 * pi / n * x) * r) \n\n ) for x in range(0, n + 1)]", "def center(self):\n cyl = (len(self.cells) - 1) / 2 # Lower and upper bound of list slices\n cxl = (len(self.cells[0]) - 1) / 2\n cyu = len(self.cells) / 2 + 1\n cxu = len(self.cells[0]) / 2 + 1\n\n # candidates are all the cells in the middle,\n # accounting for even dimensions\n candidates = []\n\n for r in self.cells[cyl:cyu]:\n candidates += r[cxl:cxu]\n\n # center is the candidate with the most carrots\n center = max(candidates, key=lambda c: c.carrots)\n\n return center", "def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))", "def test_get_lcp(self):\n\n expected = [0, 6, 5, 0, 5, 4, 0, 4, 3, 0, 3, 2, 0, 2, 1, 0, 1, 0, 0, 0, 0]\n\n self.assertEqual(self.sf.get_lcp(), expected)", "def assign_points(data, centers):\n result = {c: [] for c in centers}\n for point in data:\n min_distance = float(\"inf\")\n for c in centers:\n dist = euclidean_distance(point, c)\n if dist < min_distance:\n min_distance = dist\n min_center = c\n result[min_center].append(point)\n\n return result", "def initial_clusters(self, points):\n groups = {}\n d = int(256 / (self.initial_k))\n for i in range(self.initial_k):\n j = i * d\n groups[(j, j, j)] = []\n for i, p in enumerate(points):\n # if i%100000 == 0:\n # print('processing pixel:', i)\n go = min(groups.keys(), key=lambda c: euclidean_distance(p, c)) \n groups[go].append(p)\n return [g for g in groups.values() if len(g) > 0]", "def center_coords(self):\n coords = set()\n for x in range(self.radius, self.container.width - self.radius):\n for y in range(self.radius, self.container.height - self.radius):\n coords.add((x, y))\n\n return coords", "def _init_homolog_centers(self, method=\"kmeans\", min_spot_num=2, axis_infos=Axis3D_infos):\n if hasattr(self, 'chr_2_homolog_centers') and not self.overwrite:\n if self.verbose:\n print(f\"- directly return chr_2_homolog_centers\")\n return\n if method == 'kmeans':\n from sklearn.cluster import KMeans\n # chr_2_init_centers\n self.chr_2_homolog_centers = {}\n self.chr_2_cand_hzxys = {}\n self.chr_2_cand_ids = {}\n # loop through chrs\n for _chr_name, _exp_num in self.chr_2_copyNum.items():\n _chr_coords_df = self.merged_coords.loc[self.merged_coords['chr']==str(_chr_name)]\n # if not spots exists, skip\n if len(_chr_coords_df) < min_spot_num:\n continue\n # get coordinates\n _chr_hzxys = _chr_coords_df[['center_intensity']+[f\"center_{_x}\" for _x in axis_infos]].values\n _chr_ids = _chr_coords_df['chr_order'].values\n # append\n self.chr_2_cand_hzxys[_chr_name] = _chr_hzxys\n self.chr_2_cand_ids[_chr_name] = _chr_ids\n # calculate weights\n _uinds, _uind_counts = np.unique(_chr_ids, return_counts=True)\n _ind_2_weight = {_i:1/_c for _i,_c in zip(_uinds, _uind_counts)}\n _chr_weights = np.array([_ind_2_weight[_i] for _i in _chr_ids])\n # K-means\n if method =='kmeans':\n _model = KMeans(n_clusters=_exp_num, random_state=0)\n _model.fit(_chr_hzxys[:,1:], sample_weight=_chr_weights)\n #_init_labels = _model.labels_\n _init_centers = _model.cluster_centers_\n # save for now\n self.chr_2_homolog_centers[_chr_name] = _init_centers" ]
[ "0.7282083", "0.7242888", "0.6997606", "0.677467", "0.6724305", "0.6636216", "0.65077364", "0.6447535", "0.641797", "0.6415763", "0.626583", "0.6246309", "0.62288153", "0.6155707", "0.61542696", "0.6153966", "0.6150618", "0.6147836", "0.6143063", "0.6143059", "0.61429036", "0.6137567", "0.6100806", "0.60992515", "0.6073463", "0.60703754", "0.6056925", "0.60464686", "0.60198617", "0.5996103", "0.5990492", "0.59888184", "0.59886104", "0.5974709", "0.59703594", "0.59242254", "0.5922968", "0.59217894", "0.59047645", "0.59028125", "0.58859974", "0.587853", "0.58783525", "0.58706284", "0.58703625", "0.5868754", "0.5867008", "0.58637154", "0.5863532", "0.5861345", "0.5861345", "0.5859162", "0.5856759", "0.5851079", "0.5849462", "0.5846781", "0.58456546", "0.5840214", "0.5839356", "0.58381605", "0.5837653", "0.5837047", "0.58312035", "0.5827132", "0.58261603", "0.5820902", "0.58205855", "0.5813481", "0.57996696", "0.579465", "0.57906175", "0.57868147", "0.57785785", "0.5778096", "0.5777929", "0.5773146", "0.5766156", "0.5764774", "0.5760681", "0.5760681", "0.57554483", "0.57550824", "0.5752841", "0.57401997", "0.57386446", "0.5736146", "0.5734064", "0.5727524", "0.5725369", "0.57227826", "0.57201725", "0.5718333", "0.5717763", "0.5717629", "0.571494", "0.57138234", "0.5713776", "0.5711889", "0.5704093", "0.5702143", "0.5701036" ]
0.0
-1
Initialize puzzle with default height and width Returns a Puzzle object
def __init__(self, puzzle_height, puzzle_width, initial_grid=None): self._height = puzzle_height self._width = puzzle_width self._grid = [[col + puzzle_width * row for col in range(self._width)] for row in range(self._height)] if initial_grid != None: for row in range(puzzle_height): for col in range(puzzle_width): self._grid[row][col] = initial_grid[row][col]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid = None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def __init__(self, puzzle):\n self.puzzle = puzzle", "def __init__(self, size_of_puzzle, number_of_rows=2):\n # Define the puzzle properties\n self.puzzle_length = size_of_puzzle\n self.number_of_rows = number_of_rows\n self.puzzle_width = int(size_of_puzzle / number_of_rows)\n\n # Define the costs for each of the types of moves\n regular_move_cost = 1\n wrapping_move_cost = 2\n diagonal_move_cost = 3\n\n # These are all of the 'regular' moves\n self.cost_of_move_up = regular_move_cost\n self.cost_of_move_down = regular_move_cost\n self.cost_of_move_right = regular_move_cost\n self.cost_of_move_left = regular_move_cost\n\n # These are all of the 'wrapping' moves\n self.cost_of_wrap_move = wrapping_move_cost\n\n # These are all of the 'diagonal' moves\n self.cost_of_diagonal_adjacent = diagonal_move_cost\n self.cost_of_diagonal_across = diagonal_move_cost", "def __init__(self, width, height):\n\n self.WIDTH = width\n self.HEIGHT = height\n\n self.active_piece = None\n\n # 0,0 is defined as the top left\n self.board = [[0] * self.WIDTH for y in range(self.HEIGHT)]\n\n self.cleared_lines = 0\n self.score = 0", "def __init__(self, initial, size, horizontalChunks, verticalChunks, goal = \"\"):\n\t\tself.initial = initial\n\t\tself.size = size\n\t\tself.horChunks = horizontalChunks\n\t\tself.verChunks = verticalChunks\n\n\t\t# Goal holds the solution, once we find it.\n\t\tself.goal = goal\n\n\t\t# For a puzzle of size n, initializes blank n x n 2d array\n\t\tself.graph = [[0 for x in range(self.size)] for x in range(self.size)] \n\t\tfor i in range (0,self.size):\n\t\t\tfor j in range (0,self.size):\n\t\t\t\tself.graph[i][j] = initial[i*self.size + j] \n\t\tself.initial = \"\"", "def __init__(self, width, height):\n self.clean_tiles = []\n self.width = width\n self.height = height", "def __init__(self, width, height):\n self.w = width\n self.h = height\n self.cleanTiles = []\n self.tiles = [[False] * width for i in range(height)]\n self.cleaned = 0", "def __init__(self, size):\n self.size = size\n self.num_queens_placed = 0\n self.board = self.generate_board()", "def __init__(self,width=8,height=8):\n\t\tif height > 32 or width < 1 or height < 1:\n\t\t\traise \"Height must be between 1 and 32, width must be greater than 0\"\n\n\t\tself.Width = width\n\t\tself.Height = height\n\t\tself.Grid = [0] * width # we'll use 8 bits of the number in the array", "def __init__(self, grid_size, num_pokemon):\n self._game_board = UNEXPOSED * (grid_size ** 2)\n self._num_pokemon = num_pokemon\n self._pokemon_location = self.generate_pokemons(grid_size)", "def __init__(self, height, width):\n self.height, self.width = height, width\n self.board = self.create_board_matrix(height, width)\n self.refresh_rate = 0.3\n self.points = 0 # pieces successfully added\n self.level = 1", "def __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.numTiles = width*height\n\t\tself.tiles = []\n\t\tfor i in range(0, width):\n\t\t\tfor j in range(0, height):\n\t\t\t\tself.tiles.append(Tile(i, j))", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.numTiles = width * height\n\n for w in range(0,width):\n for h in range(0,height):\n #NOTE--float width,height as tuple keys don't work?!\n #so could not use Position(), since those x,y's can be floats\n #tuples of ints (w,h) could be used\n self.tiles[(w,h)] = 0 # value of key tuple (w,h) = 0 = dirty (or vice versa, 1 = clean)\n #self.printTiles()\n #raise NotImplementedError", "def __init__(self, size: int) -> None:\n if not MIN_BOARD_SIZE <= size or not MAX_BOARD_SIZE >= size or size % 2 != 0:\n raise BoardSizeError(f\"{size} is invalid size!\")\n\n self.size = size\n\n center = size // 2 - 1\n self._grid = [[disc.get_disc()] * size for _ in range(size)]\n self._grid[center][center] = disc.get_disc(Player.WHITE)\n self._grid[center][center + 1] = disc.get_disc(Player.BLACK)\n self._grid[center + 1][center] = disc.get_disc(Player.BLACK)\n self._grid[center + 1][center + 1] = disc.get_disc(Player.WHITE)", "def __init__(self):\n self.rows = None\n self.columns = None\n self.squares = None\n # max is useful as a way to track range for iteration, and also as a way\n # to track the maximum number in any spot.\n self.max = 0", "def __init__(self, width, height, walls = None):\r\n self.width = width\r\n self.height = height\r\n if walls:\r\n self.walls = walls\r\n else:\r\n self.walls = []\r\n self.goals = []\r\n self.tiles = []\r\n self._clear_map()", "def __init__ (self, cols = 6, rows = 7, requiredToWin = 4):\r\n\t\tself.cols = cols\r\n\t\tself.rows = rows\r\n\t\tself.win = requiredToWin\r\n\t\tself.board = [[NONE] * rows for _ in range(cols)]", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def __init__(self, origin_x=-2.5, origin_y=-2.5, resolution=.1,\n width=50, height=50):\n self.origin_x = origin_x\n self.origin_y = origin_y\n self.resolution = resolution\n self.width = width\n self.height = height\n self.grid = np.zeros((height, width))", "def __init__(self, squares=None, ncols=8, nrows=8):\n self.ncols = ncols\n self.nrows = nrows\n\n if not squares:\n self.squares = dict((i, None) for i in xrange(ncols * nrows))\n\n # 0 begins as the top of the board, making it black\n for i in xrange(ncols * 3):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"black\")\n # red would be the bottom 3 rows\n for i in xrange(ncols * (nrows - 3), ncols * nrows):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"red\")", "def __init__(self, points, width, height):\n \n # Length in pixels\n self.length = self.__getSideLength(width, height)\n # Screen size in pixels\n self.screenSize = self.length * (width+2), self.length * (height+2)\n # Width in tiles\n self.width = width\n # Height in tiles\n self.height = height\n \n # Creating the static and normal tiles.\n self.statics = self.__createStatics(points)\n self.rectangles, self.centrePoints = self.__createTiles(self.length, width, height)", "def __init__(self, width, height):\r\n\t\tself.grid = np.zeros(width*height, dtype=np.bool).reshape(width,height)\r\n\t\tself.width = width\r\n\t\tself.height = height", "def __init__(self, grid_height, grid_width):\n self._grid_height = grid_height\n self._grid_width = grid_width\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def __init__(self, width, height, x, y):\n self.w = width\n self.h = height\n self.x = x\n self.y = y", "def __init__(self, size=4, text=None):\n if size < 2: raise ValueError(\"Board has to be at least 2 by 2 tiles large\")\n self._size = size\n size_sq = size * size\n self.original = text\n\n if text != None:\n values = [int(n) for n in text.split(\",\")]\n\n # make sure we have valid input\n if sorted(values) != list(range(size_sq)):\n raise ValueError(\"Invalid tile values supplied\")\n else:\n # we are not given a string input, create a plain board\n values = list(range(1, size_sq)) + [0]\n \n # list comprehension voodoo to put the values into a nested list\n self._tiles = [[n if n > 0 else None for n in values[y * size:(y + 1) * size]] for y in range(size)]\n \n # store the location of the empty tile\n self._empty = values.index(0) % size, values.index(0) // size\n \n # store the goal location of each tile\n self.goals = {}\n for x in range(size_sq):\n self.goals[x + 1] = x % size, x // size\n self.goals[None] = self.goals[x + 1]", "def setPuzzle():\n matrix = tuple() # This will be a tuple of tuples to hold the original puzzle set\n\n matrix += ((0, 25, 0, 21, 0, 4, 0, 8, 0, 17, 0),)\n matrix += ((12, 22, 13, 8, 18, 8, 0, 18, 2, 13, 8),)\n matrix += ((0, 14, 0, 24, 0, 21, 0, 22, 0, 22, 0),)\n matrix += ((5, 13, 26, 20, 0, 16, 20, 9, 13, 7, 13),)\n matrix += ((0, 7, 0, 5, 0, 20, 0, 3, 0, 0, 9),)\n matrix += ((20, 16, 22, 0, 0, 0, 0, 0, 21, 17, 3),)\n matrix += ((17, 0, 0, 8, 0, 23, 0, 1, 0, 21, 0),)\n matrix += ((9, 21, 10, 11, 4, 20, 0, 10, 21, 3, 18),)\n matrix += ((0, 18, 0, 4, 0, 8, 0, 13, 0, 3, 0),)\n matrix += ((7, 22, 6, 21, 0, 18, 21, 25, 17, 20, 18),)\n matrix += ((0, 9, 0, 18, 0, 19, 0, 8, 0, 15, 0),)\n\n return matrix", "def __init__(self, width: int, height: int, food: List[List[int]]):\n self.n = height\n self.m = width\n self.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\n self.food = collections.deque(food)\n self.snake_set = {(0, 0)}\n self.snake = collections.deque([(0, 0)])", "def __init__(self, width, height):\n roomDict = {}\n for w in range(width):\n for h in range(height):\n roomDict[Position(w, h)] = 'dirty'\n self.tiles = roomDict\n self.width = width\n self.height = height", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def clone(self):\r\n new_puzzle = Puzzle(self._height, self._width, self._grid)\r\n return new_puzzle", "def __init__(self, origin_x=0., origin_y=0., resolution=myRes,\n width=30, height=30):\n self.origin_x = origin_x\n self.origin_y = origin_y\n self.resolution = resolution\n self.width = width / resolution\n self.height = height / resolution\n self.grid = 0.5 * np.ones((height / resolution, width / resolution))", "def __init__(self, width = 7, height = 7):\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.pos_x = START_X\n self.pos_y = START_Y\n self.col_d = False\n self.col_l = False\n self.col_r = False", "def __init__(self, width, height):\n # IMPLEMENT ME\n self.width = width\n self.height = height\n self.board = []\n for i in range(height):\n row = []\n for j in range(width):\n row.append(\".\")\n self.board.append(row)", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.tilesCleaned = []\n #self.room = [[1 for x in range(width)] for x in range(height)]\n #initialises 2 dimensional array with all values set to one.\n #value of one represents dirty, zero represents clean\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html", "def __init__( self, width, height ):\n\n self.width = width\n self.height = height\n\n W = self.width\n H = self.height\n\n self.data = [ [' ']*W for row in range(H) ]\n\n # we do not need to return inside a constructor!", "def __init__(self):\n self.board = {} # dict of (x,y) to PlacedTile\n self.board[(0,0)] = STARTING_PIECE", "def __init__(self, height, width, mines):\n self.x = int(width)\n self.y = int(height)\n self.table_state = [\n ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)]\n self.mine_locations = self.generate_mines(int(mines))\n self.final_table = self.generate_answer()", "def __init__(self, puzzle):\n # Split the given string input and find the side length and block size of the puzzle\n puz = [int(i) for i in puzzle.split(' ') if i]\n self.sl = int(math.sqrt(len(puz))) \n self.bs = int(math.sqrt(self.sl))\n\n # If side length squared not the same length as total puzzle, or if side lengths\n # not a square length, raise error\n if not (self.sl**2 == len(puz)) or not (self.bs**2 == self.sl):\n raise Sudoku_Errors.InvalidPuzzleException(puzzle, \"Puzzle side lengths not a perfect square\")\n\n # For each value in the puzzle, if not in correct range, raise error\n for ind in range(len(puz)):\n row = ind // self.sl\n col = ind % self.sl\n if not (0 <= puz[ind] <= self.sl):\n raise Sudoku_Errors.InvalidPuzzleException(puzzle,\n \"Puzzle value at ({}, {}) is out of range in puzzle \\n{}\".format(row, col, puzzle))\n\n # Split string by spaces into single list\n self.puzzle = [[j for j in puz[(i*self.sl):(i*self.sl)+self.sl]] for i in range(self.sl)]\n\n # For each value in the puzzle, check that it is a valid value for that square\n for row in range(self.sl):\n for col in range(self.sl):\n # This temporary replacing of each value with 0 is a trick so that\n # the valid_square method can be used on every square\n val = self.puzzle[row][col]\n self.puzzle[row][col] = 0\n\n if not self.valid_square(row, col, val):\n # If not a valid puzzle, reset self.puzzle and raise error\n self.puzzle = None\n raise Sudoku_Errors.InvalidPuzzleException(puzzle,\n \"Puzzle value at ({}, {}) is incorrect in puzzle \\n{}\".format(row, col, puzzle))\n\n # If value is valid, replace that square with prior value that was input\n self.puzzle[row][col] = val", "def __init__(self, board_dim= DEFAULT_DIM):\r\n self.width = board_dim\r\n self.height = board_dim\r\n\r\n self.grid = np.array([[' '] * self.width for i in range(self.height)])\r\n self.num_checkers = 0 # keeps track of how many checkers have been added\r\n\r\n self.available_moves = [(row, col) for row in range(self.height) for col in range(self.width)]\r\n\r\n # Specify the winning condition based on the board's dimension\r\n if (self.width < 5):\r\n self.win_condition = self.width\r\n else:\r\n self.win_condition = 5", "def __init__(self, width, height, title):\n ## INIT FUNCTION ##\n super().__init__(width, height, title)\n\n ## APPENDING THE SPRTIES ##\n self.shape_list = None\n self.num_key = 0\n\n self.win = arcade.load_texture(\"Numbers/won.png\")\n self.lost = arcade.load_texture(\"Numbers/lost.png\")\n\n # Define variables to check for completeness and accuracy\n self.done = False\n self.correct = False\n self.incorrect = False\n\n self.current_selected = None\n\n # If continuing saved game, convert strings from saved game file to lists and set equal to self.grid and self.fixed_answer\n if new == False:\n self.fixed_answer = Cameron.str_to_list(answer)\n self.grid = Cameron.str_to_list(progress)\n # If starting new game, generate unique board and save solution to text file\n elif new == True:\n self.board = SuDoku(SIZE, (DIV_ROW, DIV_COL), difficulty)\n self.answer = self.board.get_solution()\n self.grid = self.board.get_puzzle()\n self.fixed_answer = self.answer\n\n ## GENERATES BACKGROUND ##\n arcade.set_background_color(arcade.color.BLACK)\n self.recreate_grid()", "def __init__(self, width=20, height=20):\n self.width = width\n self.height = height\n self.cells = []\n for y in range(self.height):\n for x in range(self.width):\n self.cells.append(Cell(x, y, [N, S, E, W]))", "def initialize(self, height, width,):\n grid = list()\n for x in xrange(height):\n grid.append(list())\n for y in xrange(width):\n grid[x].append(Node(x, y))\n return grid", "def __init__(self, width, height):\n self.integer_validator(\"width\", width)\n self.__width = width\n self.integer_validator(\"height\", height)\n self.__height = height", "def __init__(self, width, height):\n self.integer_validator(\"width\", width)\n self.integer_validator(\"height\", height)\n self.__width = width\n self.__height = height", "def __init__(self):\n self._board = []\n for i in range(10):\n self._board.append([None for i in range(9)])\n self.place_pieces()", "def __init__(self, height, width):\n self.width = width\n self.height = height\n self.slots = []\n for rows in range(height):\n row = [' '] * width # a row containing width 0s\n self.slots += [row]", "def __init__(self,m,n):\n self.columns = m\n self.rows = n\n self.board = makeBoard(m,n)", "def clone(self):\n new_puzzle = Puzzle(self._height, self._width, self._grid)\n return new_puzzle", "def clone(self):\n new_puzzle = Puzzle(self._height, self._width, self._grid)\n return new_puzzle", "def clone(self):\n new_puzzle = Puzzle(self._height, self._width, self._grid)\n return new_puzzle", "def clone(self):\n new_puzzle = Puzzle(self._height, self._width, self._grid)\n return new_puzzle", "def __init__(self, rows=6, columns=7, win_length=4):\n\n self._board = [[0 for i in xrange(columns)] for i in xrange(rows)]\n self._rows = rows\n self._columns = columns\n self._win_length = win_length\n self.current_player = None\n self.winner = None\n print \"The game is afoot!\"", "def test_create_tile_puzzle(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,6],[7,8,0]])\n p = hw.create_tile_puzzle(2, 4)\n self.assertEqual(p.get_board(), [[1,2,3,4],[5,6,7,0]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertEqual(p.get_board(), [[1,2,3,0]])", "def __init__(self, n: int):\n self.size = n\n self.board = [[CellValues.EMPTY.value] * n for _ in range(n)]\n self.num_empty_cells = n * n", "def __init__(self, width, height):\n self.width = width\n self.height = height\n self.table = [False] * (width * height)", "def __init__(self, width, height, x=0, y=0, id=None):\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y", "def __init__(self, width, height, x=0, y=0, id=None):\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y", "def __init__(self, row=4, col=4, initial=2):\n self.grid = Grid(row, col, initial)", "def __init__(self, width, height):\n Game.__init__(self, width, height)", "def test_default_board_dimensions(self):\n\n self.assertEqual(self.game.width, 90)\n self.assertEqual(self.game.height, 45)", "def __init__(self):\n\n self._length = 8\n self.board = []\n self.columns = \"ABCDEFGH\"\n for colNum in range(0, self._length):\n self.board.append([])\n for rowNum in range(0, self._length):\n self.board[colNum].append(Tile(colNum, rowNum))\n\n self.board[3][3].color = \"blue\"\n self.board[3][4].color = \"red\"\n self.board[4][3].color = \"red\"\n self.board[4][4].color = \"blue\"", "def __init__(self, dim, reverse = False, board = None):\n self.empty_squares = []\n if board == None:\n self.board = [[\"\", \"\", \"\"],\n [\"\", \"\", \"\"],\n [\"\", \"\", \"\"]]\n self.dim = dim\n self.board = board\n self.reverse = reverse\n self.win = None\n self.DRAW = 4\n self.EMPTY = 1\n self.PLAYERO = 2\n self.PLAYERX = 3", "def __init__(self):\n self.grid = {}\n for i in range(21):\n self.grid[i] = [' ']*21\n self._len_x = len(self.grid[0])\n self._len_y = len(self.grid)\n self.forbidden_tiles = []\n self.allowed_tiles = []\n self.exit = None\n self.entrance = None", "def __init__(self, size):\n\t\tself.size = size\n\t\tself.board = []\n\t\tnew = []\n\t\tfor i in range(0, size, 1):\n\t\t\tfor j in range(0, size, 1):\n\t\t\t\tnew.append(0)\n\t\t\tself.board.append(new)\n\t\t\tnew = []", "def __init__(self, mine_count=BOARD_DIM[\"MINE_COUNT\"], width=BOARD_DIM[\"BOARD_WIDTH\"],\n height=BOARD_DIM[\"BOARD_HEIGHT\"]):\n if height is None:\n height = width\n if mine_count > height * width:\n raise TooManyMineException\n self.height = height\n self.width = width\n self.mine_count = mine_count\n self.chessboard = [[Point(x, y) for x in range(width)] for y in range(height)]\n self.mines = [-1 for z in range(mine_count)]\n self.initialise()", "def __init__(self, width, height):\n if type(height) != int or height <= 0 or type(width) != int or width <= 0:\n raise ValueError('room size must be > 0, and must be an integer')\n self.width = width\n self.height = height\n self.CleanBlocks = []", "def __init__(self, width, height, x=0, y=0, id=None):\n\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y", "def __init__(self, width, height, x=0, y=0, id=None):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)", "def __init__(self, width, height, x=0, y=0, id=None):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)", "def __init__(self, width, height, x=0, y=0, id=None):\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)", "def __init__(self):\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]", "def __init__(self):\n self.t_height = 291\n self.t_left = 65\n self.t_right = 144\n self.points = Pix()", "def __init__(self, size):\n self.size = size\n self.grid = {}\n self.init_grid(size)\n self.current_direction = \"down\"\n # if not empty, should be 4 tuples representing the coordinates of the moving piece\n self.active_piece = None\n self.game_over = False", "def __init__(self, width, height, x=0, y=0, id=None):\n Base.__init__(self, id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y", "def __init__(self, rows: int = 1, columns: int = 2):\n super().__init__()\n self.__squares = [[Floor._clean for i in range(columns)] for j in range(rows)]", "def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)", "def __init__(self):\n self._grid = [[None]]", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h", "def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h", "def __init__(self):\n self._width = 0\n self._height = 0\n self._invalidPositions = frozenset()", "def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)", "def __init__(self, game_size):\n\n self.game_size = game_size\n self.screen_size = 10 * self.game_size, 20 * self.game_size\n self.game_board = GameBoard()\n self.background_color = 55, 55, 40\n self.tetrino_set = dict()\n self.tetrino_id = 1\n self.score = 0", "def __init__(self, puzzle=None, children=None, parent=None):\n self.puzzle, self.parent = puzzle, parent\n if children is None:\n self.children = []\n else:\n self.children = children[:]", "def __init__(self):\n self.board = [[T.Tile().getColor() for x in range(6)] for y in range(6)]", "def __init__(self, layout, player):\n self.layout = [x[:] for x in layout] #this state's layout is a copy\n self.height = len(layout[0])\n self.width = len(layout)\n self.who_played = player\n self.score = self._scoring() #score for this board", "def __init__(self, board=None, workers=None):\n if board:\n self._board = []\n for row in range(self.BOARD_SIZE):\n self._board.append([])\n for col in range(self.BOARD_SIZE):\n try:\n height = board[row][col]\n except IndexError:\n height = 0\n self._board[row].append(Building(height))\n else:\n self._board = [[Building() for col in range(self.BOARD_SIZE)]\n for row in range(self.BOARD_SIZE)]\n\n if workers:\n self._workers = workers\n else:\n self._workers = {}", "def __init__(self, width, height):\n self.width =width\n self.height = height\n self.box_width = width/self._BOXES_WIDE\n print 'box width: ', self.box_width\n self.box_height = height/self._BOXES_TALL\n\n self.tiles = []\n self.changes = set()\n y = 0\n for i in range(World._BOXES_TALL):\n y += self.box_height\n x = 0\n self.tiles.append([])\n for j in range(World._BOXES_WIDE):\n x += self.box_width\n tile = Tile(self.changes, x, y, self.box_width, self.box_height)\n self.tiles[i].append(tile)", "def setup(\n self, x: Number=None, y:Number=None, width: Number=None,\n height: Number=None\n ):\n if x is not None:\n self.x = x\n if y is not None:\n self.y = y\n if width is not None:\n self.width = width\n if height is not None:\n self.height = height", "def __init__(self, gridSize):\n self.grid = []\n self.player1Pos = []\n self.player2Pos = []\n self.winner = None\n self.gridSize = gridSize\n self.generate()", "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "def __init__(self, size):\n self.array = [[Square() for x in xrange(size)] for x in xrange(size)]\n self.init_game()" ]
[ "0.7760379", "0.7760379", "0.7760379", "0.7755133", "0.77309585", "0.72809523", "0.6797846", "0.6736831", "0.6706259", "0.66702473", "0.6657214", "0.6637784", "0.6606009", "0.6548512", "0.6464826", "0.6438714", "0.6434132", "0.6341456", "0.63368994", "0.6271822", "0.6270025", "0.62655383", "0.617088", "0.61619955", "0.615826", "0.61566687", "0.6156085", "0.6141137", "0.61376214", "0.61217415", "0.6112952", "0.6099208", "0.6097241", "0.60887736", "0.60887736", "0.60887736", "0.60870665", "0.6083381", "0.60739636", "0.6073045", "0.6066684", "0.60586756", "0.6053104", "0.6048091", "0.6041583", "0.6033833", "0.6026471", "0.6012553", "0.60074675", "0.600006", "0.5997648", "0.59938127", "0.5985312", "0.59817237", "0.59814036", "0.59814036", "0.59814036", "0.59814036", "0.5963344", "0.5956065", "0.5954769", "0.5951109", "0.5941419", "0.5941419", "0.59326094", "0.59162927", "0.5916211", "0.5913188", "0.59026885", "0.5893027", "0.58815867", "0.586552", "0.586364", "0.5862946", "0.5854734", "0.5854734", "0.5854734", "0.58540213", "0.58332396", "0.5824421", "0.5822033", "0.58197445", "0.5816225", "0.5807105", "0.5806973", "0.58066654", "0.58066654", "0.5780908", "0.5780011", "0.57767487", "0.57698333", "0.5767213", "0.57636446", "0.57635194", "0.57609844", "0.57576054", "0.5753463", "0.57491535", "0.5711769" ]
0.7743432
5
Generate string representaion for puzzle Returns a string
def __str__(self): ans = "" for row in range(self._height): ans += str(self._grid[row]) ans += "\n" return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr", "def __str__(self):\n rep = \"\"\n for row in range(self._dim):\n for col in range(self._dim):\n rep += STRMAP[self._board[row][col]]\n if col == self._dim - 1:\n rep += \"\\n\"\n else:\n rep += \" | \"\n if row != self._dim - 1:\n rep += \"-\" * (4 * self._dim - 3)\n rep += \"\\n\"\n return rep", "def generate_strings(self, new_puzzle):\n return new_puzzle._start", "def __str__(self):\n result = \"\"\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 5:\n result += \" x\"\n elif self.board[i][j] == 7:\n result += \" о\"\n else:\n result += \" #\"\n result += \"\\n\"\n return result", "def __str__(self):\n board = \"\"\" 0 1 2 3 4 5\\n\"\"\"\n\n for y in range(Board.board_size):\n board += str(y) + \" \"\n for x in range(Board.board_size):\n piece = self.board[x][y] if self.board[x][y] is not None else \".\"\n\n piece = str(piece).lower() if piece in self.player_1_pieces else str(piece)\n\n board += piece\n board += \" \"\n board += \"\\n\"\n return board", "def __str__(self):\r\n out = \"##\"*(self.width+1)+\"\\n\"\r\n for i in range(self.height):\r\n out += \"#\"\r\n for j in range(self.width):\r\n if self.grid[i][j] == 0:\r\n out += \"##\"\r\n else:\r\n if not self.showSolution:\r\n out += \" \"\r\n elif (i,j) in self.solution:\r\n out += \"**\"\r\n else:\r\n out += \" \"\r\n out += \"#\\n\"\r\n return out + \"##\"*(self.width+1)", "def __str__(self):\r\n # replace with your code\r\n result = ''\r\n for row in range(0, self._grid_height):\r\n result += str(self._grid_tile[row]) + '\\n'\r\n return result", "def solution(self) -> str:\n\n # \"Starting after the cup labeled 1, collect the other cups' labels clockwise into a single string with no\n # extra characters.\"\n\n self.current = 1\n eight_cups = self.pick_up_cups(8) # 9 cups in the circle, so all cups except '1' is 8 cups.\n\n answer = ''\n for cup in eight_cups:\n answer += str(cup)\n return answer", "def print_hint_board(self):\n board = \"\"\n count = 1 \n for i in range(3):#need to change this in the future\n for j in range(3):#need to change this in the future\n board += str(count) \n count += 1\n if j != 2:#need to change this in the future\n board += \" | \"\n board += \"\\n\"\n return board", "def __str__(self):\n\t\tstring = \"\"\n\t\tfor i in self.board:\n\t\t\tfor j in i:\n\t\t\t\tstring += str(j)\n\t\t\tstring += \"\\n\"\n\t\treturn string", "def __str__(self):\n return \"{}\\n\\n{}\".format(self.puzzle,\n \"\\n\".join([str(x) for x in self.children]))", "def solve_2x2(self):\n # replace with your code\n string = ''\n num1 = self.get_number(0, 0)\n num2 = self.get_number(0, 1)\n num3 = self.get_number(1, 0)\n max_num = max([num1, num2, num3])\n min_num = min([num1, num2, num3])\n if num1 == min_num and num2 == max_num:\n string += 'ul'\n elif num1 == max_num and num3 == min_num:\n string += 'ul'\n string += 'rdlu' * 2\n elif num2 == min_num and num3 == max_num:\n string += 'ul'\n string += 'rdlu'\n print '2x2 Path', string\n self.update_puzzle(string)\n return string", "def __str__(self):\r\n # The full representative string\r\n str_matrix = \"\"\r\n\r\n if self.matrix is not None:\r\n # Save the lenght into a variable\r\n # to send this number to the tiles method\r\n # and calculate the number of spaces\r\n spaces = len(self.matrix)\r\n for i in range(0, spaces):\r\n nums = list(filter(lambda x: x != \"_\", self.matrix[i]))\r\n str_matrix += self.tiles(nums, (i+1), (spaces - i))\r\n\r\n return str_matrix", "def __str__(self):\r\n \r\n #return \"The 2048 board is \" + str(self._cells)\r\n string = \"\"\r\n for row in range(self._grid_height):\r\n for column in range(self._grid_width):\r\n if column == self._grid_width -1:\r\n string += str(self._cells[row][column]) + \"\\n\"\r\n else:\r\n string += str(self._cells[row][column]) +\", \"\r\n return \"The 2048 board is \"+ str(self._grid_height) + \"x\" + str(self._grid_width) + \" and contains: \" + \"\\n\" + string", "def gen_solve_to_text(self):\n\n count = 0\n self.url = \"scramble: \\n\"\n for move in self.scramble.split():\n self.url += \"{} \".format(move)\n self.url += \"\\n\\nsolve:\\n\"\n\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"\\n//{}\\n\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n self.url += \"{} \".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"//{}\\n\".format(piece) + alg\n\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n self.url += \"// {} \\n\".format(move[\"comment\"])", "def __str__(self):\n\t\tboardString = \"\\n{0}|{1}|{2}\\n-----\\n{3}|{4}|{5}\\n-----\\n{6}|{7}|{8}\\n\"\n\t\treturn boardString.format(self.board[0], self.board[1], self.board[2], self.board[3],\n\t\t\tself.board[4], self.board[5], self.board[6], self.board[7], self.board[8])", "def __str__(self):\n string = \"\"\n for row in self.layout:\n for tile in row:\n string+= str(tile) + \" \"\n string+= \"\\n\"\n return string", "def __str__(self):\r\n string_rep_of_grid=\"\"\r\n row=\"\"\r\n for dummy_j in range(self._height):\r\n for dummy_i in range(self._width):\r\n row=row+str(self._grid[dummy_j][dummy_i])+\" \" \r\n string_rep_of_grid=string_rep_of_grid+\"row number \"+str(dummy_j)+\": \"+row\r\n row=\"\"\r\n return string_rep_of_grid", "def random_puzzle(N=17):\n values = dict((s, digits) for s in squares)\n for s in shuffled(squares):\n if not assign(values, s, random.choice(values[s])):\n break\n ds = [values[s] for s in squares if len(values[s]) == 1]\n if len(ds) >= N and len(set(ds)) >= 8:\n return ''.join(values[s] if len(values[s]) == 1 else '.' for s in squares)\n return random_puzzle(N) ## Give up and make a new puzzle", "def __str__(self):\n # replace with your code\n board = \"\"\n for index in range(self.grid_height):\n board += \"[\"\n for inner_index in range(self.grid_width):\n board += str(self.board[index][inner_index]) + \" \"\n else:\n board += \"]\\n\"\n return board", "def __str__(self):\n result = ''\n result += '+---+\\n'\n for i in range(3):\n result += '|' + self[i*3] + self[i*3+1] + self[i*3+2] + '|\\n'\n result += '+---+'\n return result", "def __str__(self):\n return '\\n'.join(str(self._board[j]) for j in range(self._n))", "def __str__(self):\n board = ''\n for row in range(self.height):\n if row > 0:\n board += '\\n'\n for col in range(self.width):\n if self.board[row][col] == '':\n board += '| '\n else:\n board += ('|' + self.board[row][col])\n board += '|'\n board += ('\\n' + '-' * 2 * self.width + '\\n')\n for i in range(self.width):\n board += (' ' + str(i))\n return board", "def __str__(self):\n # replace with your code\n grid = '['\n for row in range(0,self._grid_height):\n grid += '['\n for col in range(0,self._grid_width):\n if col == self._grid_width - 1:\n grid += str(self.get_tile(row, col))\n else:\n grid += str(self.get_tile(row, col)) + ', '\n if row == self._grid_height - 1:\n grid += ']'\n else:\n grid += '], '\n \n grid += ']'\n return grid", "def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))", "def __str__(self):\n \n # top row\n result = ' '\n result = '\\n ' + '-' * (self.DIM*2+5) + '\\n'\n \n # board rows\n for row in range(self.DIM):\n if row is 3 or row is 6:\n result += '|' + '-' * (self.DIM*2+5) + '|' + '\\n'\n # result += '|-------+-------+-------|\\n'\n result += '| '\n for col in range(self.DIM):\n if col is 3 or col is 6:\n result += '| '\n if self.board[row][col] == SudokuConfig.EMPTY:\n result += '.'\n else:\n result += str(str(self.board[row][col]))\n if col != self.DIM-1:\n result += ' '\n result += ' |' + '\\n'\n \n # bottom row\n result += ' ' + '-' * (self.DIM*2+5) + '\\n'\n result += ' '\n result += '\\n'\n \n return result", "def __str__(self):\n # string accumulator\n result = \"\\n\"\n\n for n in self.from_grid:\n for m in n:\n result += \" \" + m\n result += \"\\n\"\n\n return result", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __repr__(self):\n\t\tret = \"\"\n\t\tfor i, x in enumerate(self.squares):\n\n\t\t\tret += \"\\t\"\n\t\t\tfor j in range(32): ret += u\"\\u2015\"\n\t\t\tret += \"\\n\\t|\"\n\t\t\tfor y in x:\n\t\t\t\tret += str(y)\n\t\t\t\tret += \" | \"\n\n\t\t\tret += str(i+1) + \"\\n\"\n\n\t\tret += \"\\t\"\n\t\tfor i in range(32): ret += u\"\\u2015\"\n\t\tret += \"\\n \"\n\n\t\tfor l in self.letters:\n\t\t\tret += l+\" \"\n\t\treturn ret", "def __str__(self):\n string = ''\n for y in range(self.size_y-1, -1, -1):\n for x in range(self.size_x):\n piece = self._openings[x][y]\n string += str(piece)\n if y > 0:\n string += '\\n'\n return string", "def board_string(self):\n s = \"\"\n for i, v in enumerate(self.board):\n # if i % 81 == 0:\n # s += \"\\n\"\n if v is None:\n s += \"0\"\n else:\n if v.color == StoneColor.black:\n s += \"1\"\n else:\n s += \"2\"\n return s", "def __str__(self):\n inside_list = lambda _v, _h, a: any(x == _h and y == _v for y, x in a)\n resultant = ''\n for _v in range(1, self.size_v + 1):\n for _h in range(1, self.size_h + 1):\n if self.current_location[1] == _h and self.current_location[0] == _v:\n resultant = resultant + '@'\n elif inside_list(_v, _h, self.boxes):\n resultant = resultant + '$'\n elif inside_list(_v, _h, self.storage_locations):\n resultant = resultant + '.'\n elif inside_list(_v, _h, self.wall_squares):\n resultant = resultant + '#'\n else:\n resultant = resultant + ' '\n resultant = resultant + '\\n'\n\n return resultant", "def convertBoard(self):\n \n board = \"\"\n \n for m in self.squares:\n board += str(convertMarker(m)) + \" \"\n \n return board", "def __str__(self):\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s", "def __repr__(self):\n W = self.width\n H = self.height\n\n s = '' # the string to return\n for row in range(0, H):\n s += '|'\n for col in range(0, W):\n s += self.data[row][col] + '|'\n s += '\\n'\n\n s += (2 * W + 1) * '-' # bottom of the board\n s += '\\n'\n\n x = -1\n for i in range(W):\n if x == 9:\n x = 0\n s += \" \" + str(x)\n else:\n x += 1\n s += \" \" + str(x)\n\n return s # the board is complete, return it", "def __str__(self):\n table_data = [\n ['', 'C', 'G', 'A', 'T'],\n ['total', str(self.total['C']), str(self.total['G']), str(self.total['A']), str(self.total['T'])],\n ['reverse half strand', str(self.reverse['C']), str(self.reverse['G']), str(self.reverse['A']),\n str(self.reverse['T'])],\n ['forward half strand', str(self.forward['C']), str(self.forward['G']), str(self.forward['A']),\n str(self.forward['T'])]\n ]\n table = AsciiTable(table_data)\n return \"Generation #{}\\n\".format(self.epoch) + table.table", "def generate_str (obj):\n d = obj.dim(obj)\n units = [ obj( ( [zer]*i + [one] + [zer]*(d-i-1) )) for i in range(d) ]\n table = []\n raw_table = []\n for j in units:\n table.append([])\n raw_table.append([])\n for i in units:\n if DEBUG: raw_table[-1].append(str(j*i))\n table[-1].append(str(obj([c.name_in(s) for c in (j*i).state])))\n if DEBUG: print('{} × {} = {}'.format(j,i,j*i))\n return table", "def MakePuzzleTitleForDisplay(p):\n pack = models.GetPackForPuzzle(p)\n return '%s, Puzzle %s' % (pack.title, p.name)", "def __str__(self):\n #formatting board correctly\n formatted_board = \"\"\n for i in range(self.size):\n formatted_board += str(self.board[i]) + \"\\n\"\n return \"Board size: \" + str(self.size) + \"\\n\" + \"Number of Queens placed: \" + str(self.num_queens_placed) + \"\\n\" + str(formatted_board)", "def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str", "def __repr__(self):\r\n numLetters = self.numLetters\r\n S = ''\r\n S += 3*'\\n'\r\n S += ' '\r\n for i in range(numLetters):\r\n S += self.currentBoard[i] + ' '\r\n\r\n return S", "def __str__(self):\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))", "def __str__(self) -> str:\r\n output: str = \"\"\r\n\r\n for row_i in range(Board._NUM_ROWS):\r\n for col_i in range(Board._NUM_COLS):\r\n pos: Pos2D = Pos2D(col_i, row_i)\r\n output += (\"{} \".format(self.squares[pos].get_representation()))\r\n # Finished row, add new line.\r\n output += \"\\n\"\r\n\r\n return output", "def __str__(self):\n s = \"\"\n for i in range(13,25):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n s += '\\n'\n for i in range(12, 0,-1):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n return s", "def __str__(self):\n def align_column(grid):\n board = \"\"\n for i in range(self.n):\n board += str(grid[i]) + \"\\n\"\n return board.strip()\n return (\"===Current Stage===\\n\"\n \"{}\\n\"\n \"====Goal Board=====\\n\"\n \"{}\".format(align_column(self.from_grid),\n align_column(self.to_grid)))", "def _get_problem_str(self):\n return ''", "def __str__(self):\r\n result = \"\"\r\n for line in self.board:\r\n for i in line:\r\n if i is None:\r\n result += \" \"\r\n else:\r\n result += i + \" \"\r\n result += \"\\n\"\r\n\r\n return result", "def __str__(self):\n b = ''\n for i in range(7): # 7 steps in the board\n if i == self.chaser_i: # chaser position\n b += '|' + str(i) + '| chaser |\\n'\n elif i == self.player_i: # player position\n b += '|' + str(i) + '| player |\\n'\n else:\n b += '|' + str(i) + '| |\\n'\n b += '|7| bank |\\n' # bank position\n return b", "def StringFromBoard(board):\n\trows = []\n\tfor row in board:\n\t\trows.append('|'.join([' '+square+' ' for square in row]))\n\treturn '\\n-----------\\n'.join(rows)", "def __repr__(self):\n t = ''\n for x in range(len(self.board)):\n for y in range(len(self.board[0])):\n t += str(self.board[x][y]) + ' '\n t += '\\n'\n return t", "def __repr__(self):\n ncols, nrows = self.ncols, self.nrows # easier to read\n\n rep = \" \".join([str(n-1) if n else \" \" for n in xrange(self.ncols+1)])\n for piece in self.squares:\n if piece in [i for i in xrange(0, ncols * nrows, ncols)]:\n rep += \"\\n{0} \".format(piece // ncols)\n\n if self.squares[piece]:\n rep += \"{0} \".format(self.squares[piece])\n else:\n rep += \"- \"\n\n return rep", "def __str__(self):\n s = \"\"\n for r in range(1,self.size+1):\n for c in range(1,self.size+1):\n s += str(self.gameState[r,c])\n return s", "def print_puzzle(board):\n\n row_size = get_row_size(board)\n output = '\\n'\n\n for idx, val in enumerate(board):\n output += \" {} \".format(val)\n if idx % row_size == row_size - 1:\n output += \"\\n\"\n\n return output", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def board(self) -> str:\n divider = \"+\" + \"-\" * 23 + \"+\"\n b = [divider]\n for i in range(9):\n r = []\n for j in range(3):\n s = tool.index_counter(i, j * 3)\n r.append(' '.join(str(i) if i > 0 else ' '\n for i in self.grid[s:s+3]))\n b.append(f\"| {r[0]} | {r[1]} | {r[2]} |\")\n if (i + 1) % 3 == 0:\n b.append(divider)\n return \"\\n\".join(b)", "def __str__(self):\n s = \" 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\\n\"\n board = initial_board()\n count = 1\n for i in self.occupied:\n board[i[0]][i[1]] = self.occupied[i]\n space = ''\n for i in range(0, 16):\n space += ' '\n start = '---'.join(space)\n s += start+'\\n|'\n for row in range(1,16):\n for col in range(1,16):\n if use_color and (row, col) == self.action:\n s += '\\033[91m'\n if board[row][col] == 0:\n s += ' |'\n elif board[row][col] == 1:\n s += ' O |'\n else:\n s += ' X |'\n if use_color and (row, col) == self.action:\n s += '\\033[0m'\n s += '\\033[0m'\n s+=str(count)+'\\n'+start+'\\n|'\n count += 1\n\n s = s[:len(s)-1]\n s += \"\\n*****************************************************************************\"\n return s[:len(s)-1]", "def get_hash_string(self) -> str:\n\t\ts = ''\n\t\tfor i in range(self.size):\n\t\t\ts += ''.join(map(str,self.tiles[i]))\n\t\treturn s", "def __repr__(self):\r\n # Initialize the return string\r\n s = ''\r\n\r\n for row in range(self.height):\r\n # Print the index of the row\r\n s = s + str(row % 10) + ' |'\r\n\r\n for col in range(self.width):\r\n s += self.grid[row][col]\r\n s += '|'\r\n\r\n s += '\\n'\r\n s += '--' * (self.width + 1)\r\n s += '-'\r\n s += '\\n'\r\n \r\n s += ' '\r\n for i in range(self.width):\r\n s += ' ' + str(i % 10) \r\n \r\n return s", "def __str__(self):\n return str(self.__squares)", "def __str__(self):\n string = ''\n for row in self.board:\n for item in row:\n if item == None:\n string += \"_ \"\n else:\n string += f\"{item.name:<2}\"\n string += '\\n'\n \n return string", "def __str__(self):\r\n grid_text = \"\\n-------------------\\n|\"\r\n for i in range(len(self.grid)):\r\n grid_text = grid_text + ' %s '%(self.grid[i][-1])\r\n\r\n if i%3 == 2:\r\n grid_text = grid_text + '|\\n-------------------\\n|'\r\n else:\r\n grid_text = grid_text + '|'\r\n return grid_text[0:len(grid_text)-1]", "def get_board_as_string(game):\n str_board = \"\\n\" # every board starts with a blank line\n row = 0 # used to print the board\n\n # creates a board of 5 lines. 3 rows, 2 dashed.\n for line in range(1, 6):\n\n # every odd line\n if line % 2 != 0:\n\n # add a row to the string str_board\n str_board += \"{} | {} | {}\".format(game['board'][row][0], game['board'][row][1], game['board'][row][2])\n\n # increment the row\n row += 1\n\n # every even line\n else:\n str_board += \"--------------\"\n\n str_board += \"\\n\" # add line break at the end of every line\n\n return str_board", "def toString(self):\n printme = \"\"\n for i in range (0,len(self.maze)):\n for j in self.maze[i]:\n printme = printme + j\n printme = printme + \"\\n\"\n return printme", "def __str__(self):\n return f'{self.text}: {self.chs}, correct answer: {self.solution}'", "def __repr__(self):\n s = dashes = \"\".join([ ' -' for i in range(self.BoardSize) ])\n for row in range( self.BoardSize ):\n sRow = '|'\n for col in range( self.BoardSize ):\n sRow += str(self.CurrentGameboard[row][col]) + '|'\n s += '\\n' + sRow + '\\n' + dashes\n return s", "def get_puzzle(self):\n return [[str(self.puzzle[i][j]) for j in range(len(self.puzzle[0]))] for i in range(len(self.puzzle))]", "def __repr__(self) -> str:\n bits_str = \"\"\n\n raw_key = self.raw_key()\n for byte_idx, chunk in enumerate(raw_key):\n # Range from 7 to 0 inclusively:\n for bit in range(7, -1, -1):\n i = byte_idx * 8 + bit\n if i < self.start() or i >= self.end():\n bits_str += \"_\"\n else:\n bits_str += \"0\" if (1 << bit) & chunk == 0 else \"1\"\n\n bits_str += \"|\"\n\n format_str = \"ProofPath [ start: {}, end: {}, bits: {} ]\".format(self.start(), self.end(), bits_str)\n return format_str", "def __str__(self):\n ans = \"\"\n for row in range(self._grid_height):\n ans += str(self._cells[row])\n ans += \"\\n\"\n return ans", "def __str__(self) -> str:\n rowline = f\" +{'-' * (self.size * 2 - 1)}+\"\n rowlines = [rowline] * (self.size + 1)\n\n rows = [f\"{i+1}|{'|'.join(row)}|\" for i, row in enumerate(self._grid)]\n columns = string.ascii_lowercase[: self.size]\n rows.append(f\" {' '.join(columns)} \")\n\n return \"\\n\".join(itertools.chain(*zip(rowlines, rows)))", "def generate_new_puzzle():\n new_puzzle = pb() \n\n # only generate solvable puzzles\n while not new_puzzle.is_solvable():\n new_puzzle = pb()\n\n return new_puzzle", "def __str__(self):\n s=''\n for r in range(self.n):\n for c in range(self.n):\n s += str(self.state[r][c]) + ' '\n s += '\\n'\n s += str('hcost') + ' : ' + str(self.hcost)\n s += '\\n'\n return s", "def __str__(self):\n s=\"\"\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n s+=str(self.gameState[x,y])\n return s", "def toString():", "def __str__(self):\n s = \"\"\n for x in range(self.length):\n line = []\n #print (f\"x: {x}\")\n for y in range(self.length):\n #print (f\"y: {y}\")\n line.append(str(self.lst[x*self.length +y][1])) \n #print (line)\n s += \" | \".join(line) + \"\\n\"\n return (s)", "def __str__(self):\n # replace with your code\n return_string = ''\n for row in range(self._grid_height):\n return_string += str(self._grid[row]) + '\\n'\n return str(return_string)", "def _to_string(board: Tuple[Tuple[Optional[int]]], width: int) -> str:\n display = \"\\n\"\n for i in range(width):\n for j in range(width):\n line = board[j][i * width:i * width + width]\n start = j * width ** 2 + i * width\n for k, space in enumerate(line):\n if space == 0:\n space = start + k\n else:\n space = (\"X\" if space == 1\n else \"O\" if space == -1\n else \"-\")\n display += \"{0:>4}\".format(space)\n display += \" \" * width\n display += \"\\n\"\n return display", "def clean_puzzle(puzzle):\n output = ''\n for val in puzzle.values():\n if val == '':\n output += '.'\n elif int(val) in range(1, 10):\n output += val\n return output", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def __str__(self):\n [r,c],f = self.D, self.F\n lmax = len(str(max(iter(self)))) + 1\n s = '\\n'.join( (' '.join('{0:{l}G}'.format(f(i,j),l=lmax) if isinstance(f(i,j), int) or isinstance(f(i,j), float) else str(f(i,j)) for j in range(c))) for i in range(r))\n return s", "def __str__(self):\n st1 = self.ques + '\\n'\n for i in range(4):\n st1 = st1 + str(i+1) + '. ' + self.options[i] + '\\n'\n return st1", "def __str__(self) -> str:\n not_actual = self.current_board\n representation = self.current_board\n\n for index in range(len(not_actual)):\n if not_actual[index: index + 2] in ['31', '32', '33', '34', '36',\n '37', '38']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n if not_actual[index: index + 2] in ['41', '42', '43', '44', '45',\n '46', '47', '48']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n if not_actual[index: index + 2] in ['51', '52', '53', '54', '55',\n '56', '57', '58']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n return representation", "def __str__(self):\r\n # replace with your code\r\n return str(self._board[0]) + \"\\n\" + str(self._board[1]) + \"\\n\" + str(self._board[2]) + \"\\n\" + str(self._board[3]) + \"\\n\\n\"", "def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty lattice polytope'\n else:\n desc += 'A ' + repr(self.affine_dimension()) + '-dimensional lattice polytope'\n desc += ' in ZZ^' + repr(self.space_dimension())\n\n if self.n_vertices()>0:\n desc += ' with '\n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n return desc", "def __str__(self):\n outstr = \"\"\n for i in range(3):\n for j in range(3):\n outstr += str(self.pos_to_num[(i, j)]) + \" \"\n outstr = outstr[:-1]\n outstr += \"\\n\"\n outstr += \"\\n\"\n return outstr", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def get_strings(mask):\r\n def get_sudoku(p):\r\n x0 = p[0]\r\n y0 = p[1]\r\n sudoku = [(x0-1, y0-1), (x0, y0-1), (x0+1, y0-1), \r\n (x0-1, y0), (x0+1, y0), \r\n (x0-1, y0+1), (x0, y0+1), (x0+1, y0+1)]\r\n return sudoku\r\n \r\n def get_next_point(points_sorted, points):\r\n p0 = points_sorted[-1]\r\n for s in get_sudoku(p0):\r\n if s in points and s not in points_sorted:\r\n points_sorted.append(s)\r\n get_next_point(points_sorted, points)\r\n break\r\n return points_sorted\r\n \r\n def get_string(start, points):\r\n string1 = get_next_point([start], points)\r\n string = get_next_point([string1[-1]], points)\r\n start2 = min([string[0], string[-1]])\r\n string = get_next_point([start2], points)\r\n \r\n return string\r\n \r\n mask = copy.copy(mask)\r\n x, y = np.where(mask.T)\r\n points = zip(x, y)\r\n start = points[0]\r\n strings = []\r\n while 1:\r\n string = get_string(start, points)\r\n strings.append(string)\r\n sx, sy = zip(*string)\r\n mask[sy, sx] = 0\r\n x, y = np.where(mask.T)\r\n points = zip(x, y)\r\n if len(points):\r\n start = points[0]\r\n else:\r\n break\r\n \r\n # Order strings\r\n starts = [min([string[0], string[-1]]) for string in strings]\r\n starts, strings = zip(*sorted(zip(starts, strings))) # Sort strings based on starts\r\n strings = map(np.array, strings)\r\n \r\n return strings", "def gen_build_str_def():\n\treturn \"\"", "def triplet():\n ends = digraphs + list(consonants)\n spaces = [ends, list(vowels), ends]\n bit = \"\".join([random.sample(x, 1)[0] for x in spaces])\n if random.randint(0, 1) == 1:\n bit = bit.title()\n return bit", "def __str__(self):\n\t\t\n\t\tdef mapping(x):\n\t\t\tif x == 1:\n\t\t\t\t# WHITE\n\t\t\t\treturn 'O'\n\t\t\telif x == -1:\n\t\t\t\t# BLACK\n\t\t\t\treturn 'X'\n\t\t\telse:\n\t\t\t\t# Empty\n\t\t\t\treturn '-'\n\t\t\n\t\ts = 'BLACK - X\\n'\n\t\ts += 'WHITE - O\\n\\n'\n\t\tfor j in self.rows:\n\t\t\ts += j\n\t\t\ts += ' '\n\t\t\ts += ''.join(mapping(self[i+j]) for i in self.columns)\n\t\t\ts += '\\n'\n\t\treturn s + '\\n ' + self.columns + '\\n'", "def __str__(self):\n reprStr = 'Help Mario build Iron Man suit!'+'\\n' +'To make the ' + self._name + ',you need:'+'\\n'\n for part in self._supplies:\n reprStr = reprStr + str(part.getCount()) + ' ' + part.getData() + '\\n'\n return reprStr", "def __str__(self):\n grid_str = \"\"\n for row in range(self.grid_height):\n grid_str += str(self.grid[row])+'\\n'\n return grid_str" ]
[ "0.7646727", "0.7117016", "0.71114206", "0.7055232", "0.6831262", "0.6665103", "0.6616839", "0.6554321", "0.6554", "0.6470969", "0.646938", "0.6457912", "0.6453044", "0.64245826", "0.6423748", "0.6408869", "0.63958913", "0.6389163", "0.6384862", "0.6339044", "0.6336588", "0.6330396", "0.63253003", "0.63110006", "0.62799615", "0.6275596", "0.6274555", "0.62581843", "0.6241095", "0.6206554", "0.619979", "0.61985826", "0.61985826", "0.61985826", "0.6198567", "0.61937344", "0.61872816", "0.6148857", "0.61399657", "0.61321634", "0.6130452", "0.6123882", "0.61065775", "0.61043894", "0.6104039", "0.6100068", "0.6098907", "0.6079989", "0.6073999", "0.6072844", "0.60713047", "0.60712445", "0.60685676", "0.6062594", "0.6050239", "0.6047523", "0.6031478", "0.60229206", "0.6017962", "0.6008602", "0.60030216", "0.60018045", "0.59989154", "0.59703654", "0.5962003", "0.59551775", "0.59411716", "0.59404296", "0.59402305", "0.5933555", "0.59280974", "0.59214705", "0.5916403", "0.59106475", "0.58958614", "0.58958113", "0.5889628", "0.5884019", "0.5879983", "0.5861328", "0.5857057", "0.58524936", "0.58518046", "0.5848519", "0.5848166", "0.5842992", "0.5839091", "0.5836935", "0.5836187", "0.5832582", "0.5829479", "0.5819724", "0.5819685", "0.5814502", "0.58119106", "0.58094394", "0.58021975" ]
0.62557524
29
Getter for puzzle height Returns an integer
def get_height(self): return self._height
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:", "def tileHeight(self):\n return self._tileHeight", "def height(self):\n yy = self.yy\n return max(yy) - min(yy)", "def height(self):\n return self[\"height\"]", "def height(self):\n return self[\"height\"]", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"height\")", "def get_height(self):\r\n return self.state['h']", "def height(self):\n return self.client.call('GET', self.name + 'height')", "def expected_height(self):\n\t\treturn self.expected_tile_height * TILE_SIZE", "def height(self):\n return self.board.shape[0]", "def height(self) -> int:\n return self.__height", "def height (self):\n return self._h", "def height(self) -> int:\n\t\treturn self._raw_result['data']['height']", "def height(self) -> int:\n return self._height", "def height(self) -> int:\n return self._height", "def getHeight(self):\n return self.height", "def getHeight(self):\n return self.height", "def height(self):\n return self.maxy - self.miny", "def height(self):\n try:\n return max(elem.height for elem in self[1:])+1\n except ValueError:\n return 0", "def height(self) :\n return self.m_height", "def bottom_height_px(self):\n return self.bottom_pieces * PipePair.PIECE_HEIGHT", "def get_height(self):\n return self.__height", "def height(self):\n return self.__size[1]", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_pyramid_height(self):\n\t\treturn self.variables.get('pyramid_height')", "def height(self):\r\n return self._height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def getHeight(self):\n return self._height", "def height(self):\n return self.y.max() - self.y.min()", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def height(self):\n return self._height", "def get_grid_height(self):\n # replace with your code\n return self._height", "def height(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"height\")", "def height(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"height\")", "def height(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"height\")", "def height(self):\n return (self.__height)", "def height(self) -> int:\n return self._obj[self.y_dim].size", "def get_height(self) -> int:\n return int(self._surface.get_height())", "def height(self):\n return self.get_delta_value(self.Y_INDEX)", "def height(self) -> int:\n if self.props.max_height:\n max_height = UIMetric.parse(self.props.max_height).to_pixels(self.parent.height)\n return min(self.isize[1].to_pixels(self.parent.height), max_height)\n else:\n return self.isize[1].to_pixels(self.parent.height)", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"height\")", "def height(self):\n # type: () -> float\n return self._height", "def height(self):\n return self.row", "def get_dimension_height(self):\n pass", "def height(self):\n\n return self.__height", "def height(self):\n\t\tpass", "def get_grid_height(self):\n # replace with your code\n return self._grid_height", "def get_grid_height(self):\n # replace with your code\n return self._grid_height", "def GetHeight(self):\r\n\r\n return self._height", "def height(self, p):\n if self._heights is None:\n return None\n return self._heights[p.index()]", "def height(self) -> int:\r\n return self.rect_uv.h", "def height(self):\n return self.i_node.distance(self.n_node)", "def get_grid_height(self):\r\n return self._height", "def get_grid_height(self):\n # replace with your code\n return self.grid_height", "def get_grid_height(self):\n # replace with your code\n return self.grid_height" ]
[ "0.77167135", "0.77167135", "0.77167135", "0.74308836", "0.735531", "0.7349286", "0.7349286", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.7335078", "0.73231614", "0.72989076", "0.72947294", "0.725975", "0.7238212", "0.7222822", "0.72143847", "0.72131264", "0.72131264", "0.7191308", "0.7191308", "0.7182854", "0.71742535", "0.71585536", "0.7154052", "0.7125444", "0.7118147", "0.7113414", "0.7113414", "0.7113414", "0.7094726", "0.70939773", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.70925826", "0.708179", "0.7066971", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70575935", "0.70498264", "0.70345783", "0.70345783", "0.70345783", "0.70335007", "0.69409406", "0.6939978", "0.6935003", "0.69284177", "0.6904025", "0.6904025", "0.6904025", "0.6904025", "0.6904025", "0.6904025", "0.69029355", "0.69001937", "0.6893518", "0.6891912", "0.68890715", "0.6871702", "0.6871702", "0.68604034", "0.6837392", "0.68367493", "0.68228716", "0.6822004", "0.6815385", "0.6815385" ]
0.70786816
60
Getter for puzzle width Returns an integer
def get_width(self): return self._width
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_grid_width(puzzle: str) -> int:\r\n return int(len(puzzle) ** (1 / 2))", "def width(self) -> int:", "def width(self) -> int:", "def width(self):\n return self.board.shape[1]", "def tileWidth(self):\n return self._tileWidth", "def expected_width(self):\n\t\treturn self.expected_tile_width * TILE_SIZE", "def getWidth(self) -> int:\n ...", "def get_width ( self ):\n return self.width", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> int:\n return self.__width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def width(self) -> int:\n return self._width", "def width(self):\n return len(self.mine_map[0])", "def get_grid_width(self):\r\n # replace with your code\r\n return self._width", "def width (self):\n return self._w", "def get_width(self):\n return self.width", "def get_width(self):\r\n return self._width", "def get_width(self):\r\n return self._width", "def get_width(self):\r\n return self._width", "def get_dimension_width(self):\n pass", "def get_width(self):\n return self.__width", "def width(self) -> int:\n\t\treturn self._raw_result['data']['width']", "def get_width(self):\n\t\treturn len(self._background) if self._background else 0", "def get_grid_width(self):\n # replace with your code\n return self._grid_width", "def get_grid_width(self):\n # replace with your code\n return self._grid_width", "def width(self):\n return self['width']", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(cls):\n return cls._width", "def get_width(self) -> int:\n return int(self._surface.get_width())", "def get_grid_width(self):\n # replace with your code\n return self.grid_width", "def get_grid_width(self):\n # replace with your code\n return self.grid_width", "def width(self):\n return (self.scene.shape[2] - self.size) // self.size + 1", "def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width", "def get_grid_width(self):\r\n # replace with your code\r\n return self._grid_width", "def __len__(self) -> int:\n return self.width * self.height", "def width(self):\n xx = self.xx\n return max(xx) - min(xx)", "def width(self) -> int:\n if self.props.max_width:\n max_width = UIMetric.parse(self.props.max_width).to_pixels(self.parent.width)\n return min(self.isize[0].to_pixels(self.parent.width), max_width)\n else:\n return self.isize[0].to_pixels(self.parent.width)", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self) -> int:\n\n return self._width", "def w(self):\n return self.width", "def width(self) -> int:\n return self._image_data.width", "def width(self) :\n return self.m_width", "def columns(self) -> int:\n return self.__squares[0].__len__()", "def getWidth(self):\n return self.width", "def getWidth(self):\n return self.width", "def twidth(self) -> int:\n return self.isize[0].to_pixels(self.parent.width)", "def width(self):\n return self.figure.scene.get_size()[0]", "def width(self) -> float:\n return self._width", "def width(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"width\")", "def width(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"width\")", "def width(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"width\")", "def get_grid_width(self):\r\n # replace with your code\r\n return self.grid_width", "def get_grid_width(self):\r\n return self.width", "def width(self):\n # type: () -> float\n return self._width", "def width(self) -> int:\r\n return self.rect_uv.w", "def get_size(self):\n return len(self.board)", "def width(self):\n return (self.__width)", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def getNumTiles(self):\n return (self.width) * (self.height)" ]
[ "0.8019565", "0.7344483", "0.7344483", "0.7160062", "0.7134164", "0.70831394", "0.70282716", "0.6884904", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.68540174", "0.6836778", "0.6815141", "0.6815141", "0.6804246", "0.6803066", "0.677592", "0.67748034", "0.67653507", "0.6755648", "0.6755648", "0.6755648", "0.6754968", "0.67021644", "0.6689736", "0.66793793", "0.66753185", "0.66753185", "0.6663978", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.66352946", "0.6630845", "0.662726", "0.66250145", "0.66250145", "0.66205883", "0.66169775", "0.66169775", "0.6616192", "0.6607031", "0.6587126", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6586865", "0.6584122", "0.6579246", "0.657749", "0.6568548", "0.6565546", "0.65496", "0.65496", "0.6540079", "0.65351915", "0.6533966", "0.6530981", "0.6530981", "0.6530981", "0.652754", "0.65032905", "0.6497651", "0.6496073", "0.6494149", "0.6483176", "0.6480997", "0.64790267" ]
0.66701204
43
Getter for the number at tile position pos Returns an integer
def get_number(self, row, col): return self._grid[row][col]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetTileIndex(self, pos):\r\n #pixel = rpg_image.GetPixel(self.image, pos)\r\n try:\r\n pixel = self.image_buffer[pos[0]][pos[1]]\r\n except IndexError, e:\r\n pixel = -1\r\n \r\n return pixel", "def tile_index_at(self, position: TilePosition) -> int:\r\n tile_index: int = pyxel.tilemap(self.tilemap_id).get(\r\n self.rect_uv.x + position.tile_x, self.rect_uv.y + position.tile_y)\r\n return tile_index", "def getTilePos(self, pos = None):\n\n if not pos:\n pos = self.actor.getPos()\n \n for i in range(len(pos)):\n pos[i] = int(math.floor( (pos[i] + self.dimensions[i]) / 2.0))\n #pos[i] = int(math.floor( pos[i] / 2.0))\n\n return pos", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_number(self, row, col):\r\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return 0", "def number(cls, tileName):\n return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None", "def get_tile(self, row, col):\n #print 'The value of tile at position: (',row,',',col,') is: ',self.grid[row][col]\n return self.grid[row][col]", "def __getnum__(self, i, j):\n return self.pos_to_num[(i, j)]", "def get_tile(left, up, right, down):\n tile = 0\n if left:\n tile += 1\n if up:\n tile += 2\n if right:\n tile += 4\n if down:\n tile += 8\n return tile", "def get_tileoffset(self, gid):\n return self.tileoffset_index.get(gid, (0, 0))", "def pos_number(self):\n return self._pos_number.zfill(2)", "def number_at_cell(self, pokemon_locations, grid_size, index):\n if self.get_game()[index] != UNEXPOSED:\n return int(self.get_game()[index])\n\n number = 0\n for neighbour in self.neighbour_directions(index, grid_size):\n if neighbour in pokemon_locations:\n number += 1\n\n return number", "def __getpos__(self, num):\n return self.num_to_pos[num]", "def get_tile(self, row, col):\r\n value = self.board[row][col]\r\n return value", "def top_left_tile_value(self):\n\t\treturn 1", "def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)", "def get_tile_coordinates_for_registration(self, grid_number, tile_number):\n dx, dy = self.get_tile_coordinates_d(grid_number, tile_number)\n width_d = self.get_tile_width_d(grid_number)\n height_d = self.get_tile_height_d(grid_number)\n return int((dx - width_d/2) * 1000), int((dy - height_d/2) * 1000)", "def get_grid_position(self, as_int=True):\n if as_int:\n return (\n int(self.x // Constant.TILE_SIZE),\n int(self.y // Constant.TILE_SIZE),\n )\n else:\n return (\n self.x / Constant.TILE_SIZE,\n self.y / Constant.TILE_SIZE,\n )", "def get_tile_location(self):\n if self.rect.x == 0:\n tile_x = 0\n elif self.rect.x % 32 == 0:\n tile_x = (self.rect.x / 32)\n else:\n tile_x = 0\n\n if self.rect.y == 0:\n tile_y = 0\n elif self.rect.y % 32 == 0:\n tile_y = (self.rect.y / 32)\n else:\n tile_y = 0\n\n return [tile_x, tile_y]", "def tile_id(self):\n return self._tile_id", "def get_position(self): # maybe encoded in filepath at some point\n result = (self.iter * self.row_step)% self.row_size, self.iter // (self.row_size * self.row_step)* self.col_step\n self.iter += 1\n return result", "def position_to_tile(self, position):\r\n return position[1] + self.width * position[0]", "def _get_coordinates(self, tile, position=None):\n if not position:\n position = self.position\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if position[i][j] == tile:\n return i, j\n\n return RuntimeError('Invalid tile value')", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, position):\n return self.tiles[position[x]][position[y]]", "def findTile(self, tileImage):\n str = tileImage.tostring()\n if str in self.TileDict:\n return self.TileDict[str] + 1\n else:\n return 0", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._grid_tile[row][col]", "def get_tile(self, row, col):\n # replace with your code\n if row < self._grid_height and col < self._grid_width:\n return self._grid_2048[row][col]", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._cells[row][col]", "def get_tile(self, row, col):\n return self.grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._cells[row][col]", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def get_tile(self, row, col):\r\n \r\n return self._cells[row][col]", "def get_tile_at_position(level, position):\n size = level_size(level)\n index = position_to_index(position, size)\n return level[index]", "def get_tile(self, row, col):\r\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self.board[row][col]", "def __get_cell_index(self, x, y) -> int:\n # \"The map data, in row-major order, starting with (0,0)\"\n return x + y * self.occupancy_map.info.width", "def get_tile(self, point):\n print \"Getting tile for %s\" % repr(point)\n return self.matrix[point.y][point.x]", "def getTile(self):\n return self.tile", "def to_index(self):\r\n return (BOARD_HEIGHT - 1 - self.y) * BOARD_HEIGHT + (BOARD_WIDTH - 1 - self.x)", "def getTile(self, position):\n columns = \"ABCDEFGH\"\n if not position[0] in columns or not position[1].isdigit():\n raise invalidPlacementError\n return self.board[columns.find(position[0])][int(position[1]) - 1]", "def tile_at(self, zoom, position):\n x, y = self.project_pixels(position, zoom)\n return (zoom, int(x/self.tileSize), int(y/self.tileSize))", "def get_tile(self, row, col):\n return self._grid[row][col]", "def get_position(self) -> Tuple[int]:\n return self.position.copy()", "def get_position(self, number):\n for rowidx, row in enumerate(self.numbers):\n for colidx, num in enumerate(row):\n if num == number:\n return rowidx, colidx", "def get_tile(self):\n return Tile.get_tile(self.get_number())", "def get(self) -> int:\n return self.nums.pop() if self.nums else -1", "def positionInPile(self):\n \n return self._pile.cardLabelIndex(self)", "def get_tile(self, row, col):\r\n\r\n return self._board[row][col]", "def _get_image_position(self):\n tile_conf = self.get_tile_configuration()\n image_conf = tile_conf.get(\"image\", None)\n if image_conf:\n return image_conf.get(\"position\", u\"left\")", "def position(self) -> int:\n return self.__pos", "def position_to_index(self, position, grid_size):\n x, y = position\n return x * grid_size + y", "def get_tile(self, row, col):\n tile_index = (row - 1) * self.num_col_tiles + (col - 1)\n tile = self.tiles[tile_index]\n return tile", "def index_from_position_tuple(self, position):\n x = self.base_values.index(position[0])\n y = self.base_values.index(position[1])\n return y * self.size + x", "def get_tile(self, row, col):\n return self._cells[row][col]", "def getNumTiles(self):\n return self.numTiles\n #raise NotImplementedError", "def index_to_position(self, index):\n col = index % self._grid_size\n row = index // self._grid_size\n return row, col", "def getNumTiles(self):\n return (self.width) * (self.height)", "def position(self) -> int:\n return self._position", "def position(self) -> int:\n return self._position", "def getNumTiles(self):\n\t\treturn self.numTiles", "def getNumTiles(self):\n return self.height * self.width", "def getNumTiles(self):\n return self.w * self.h", "def index(self, pos):\n for i, n in enumerate(self):\n if i == pos: return n\n raise Exception('Index out of bounds.')", "def get(self):\n return self.x-self.offset", "def get_tile(self, x, y):\n if x < 0 or x >= Settings.SIZE_X or y < 0 or y >= Settings.SIZE_Y:\n return MarkerType.NONE\n return self.__grid[y][x]", "def number_at_cell(game, pokemon_locations, grid_size, index):\n num = 0\n # number of Pokemon in neighbouring cells\n neighbours = neighbour_directions(index,grid_size)\n for neighbour in neighbours:\n if neighbour in pokemon_locations:\n num += 1\n return num", "def get_index(self, row, col):\n return (row * self.cols) + col", "def get_target_tile(self):\n if self.tank.flag is not None:\n x, y = self.tank.start_position\n else:\n self.get_flag() # Ensure that we have initialized it.\n x, y = self.flag.x, self.flag.y\n return Vec2d(int(x), int(y))", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def _get_image_index_position(self) :\n \n return self._image_index_position", "def which_cell(loc_x, loc_y):\n column = int(math.ceil((loc_x - LEFT_MARGIN) / CELL_SIZE))\n row = int(math.ceil((loc_y - TOP_MARGIN) / CELL_SIZE))\n cell_id = (row - 1) * CELL_COLUMN + column\n return cell_id", "def current_cover_position(self) -> int:\n return int(self._value)", "def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx", "def getScore(self,board):\n return board.getScore()[self.tile]", "def _get_pos(self):\n return self._pos", "def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)", "def positions(self, tileID, numSamples):", "def position_to_index(position, grid_size):\n return position[0]*grid_size+position[1]", "def _toTileNum(self, x, y, transpose=False):\n # TIFFCheckTile and TIFFComputeTile require pixel coordinates\n if not transpose:\n pixelX = int(x * self._tileWidth)\n pixelY = int(y * self._tileHeight)\n if x < 0 or y < 0 or pixelX >= self._imageWidth or pixelY >= self._imageHeight:\n raise InvalidOperationTiffError(\n 'Tile x=%d, y=%d does not exist' % (x, y))\n else:\n pixelX = int(x * self._tileHeight)\n pixelY = int(y * self._tileWidth)\n if x < 0 or y < 0 or pixelX >= self._imageHeight or pixelY >= self._imageWidth:\n raise InvalidOperationTiffError(\n 'Tile x=%d, y=%d does not exist' % (x, y))\n # We had been using TIFFCheckTile, but with z=0 and sample=0, this is\n # just a check that x, y is within the image\n # if libtiff_ctypes.libtiff.TIFFCheckTile(\n # self._tiffFile, pixelX, pixelY, 0, 0) == 0:\n # raise InvalidOperationTiffError(\n # 'Tile x=%d, y=%d does not exist' % (x, y))\n if self._tiffInfo.get('istiled'):\n tileNum = libtiff_ctypes.libtiff.TIFFComputeTile(\n self._tiffFile, pixelX, pixelY, 0, 0).value\n else:\n # TIFFComputeStrip with sample=0 is just the row divided by the\n # strip height\n tileNum = int(pixelY // self._stripHeight)\n return tileNum", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def ind(self, pos):\n row = int(pos[1:]) - 1\n column = self.letter_to_column(pos[0])\n return row, column", "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def get_tile_coordinates_d(self, grid_number, tile_number):\n origin_dx, origin_dy = self.cs.get_grid_origin_d(grid_number)\n return (origin_dx + self.grid_map_d[grid_number][tile_number][0],\n origin_dy + self.grid_map_d[grid_number][tile_number][1])", "def get(self, position):\n return self.numbers[position[0]][position[1]]", "def get_cell_value(self, index):\n x, y = index\n return self.grid[y][x]", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def get_entry(self, index = None):\n if self.land_tiledata and index and index < 0x4000:\n return self.land_tiledata[index]\n elif self.static_tiledata and index:\n return self.static_tiledata[index]\n else:\n return self.land_tiledata + self.static_tiledata", "def get_tile(self, row, col):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # only return if the row and column are ok\n return self._grid[row][col]" ]
[ "0.8069442", "0.76283735", "0.744377", "0.7299405", "0.7299405", "0.7299405", "0.71124285", "0.700416", "0.69980204", "0.686322", "0.6761352", "0.67372394", "0.6710306", "0.67092806", "0.67069876", "0.66530937", "0.6636823", "0.66345775", "0.6570085", "0.65366554", "0.65349215", "0.6499566", "0.6481782", "0.64753324", "0.6471532", "0.6468014", "0.64675784", "0.6446589", "0.64459515", "0.64459515", "0.64459515", "0.6434608", "0.6432376", "0.64142025", "0.641369", "0.6397792", "0.63745874", "0.6369533", "0.6360108", "0.63385695", "0.633799", "0.6331737", "0.6325139", "0.6307753", "0.6283626", "0.6282504", "0.6264083", "0.62482226", "0.62466925", "0.6224925", "0.62050015", "0.6198411", "0.6198173", "0.61868703", "0.618456", "0.6178556", "0.61711735", "0.6169904", "0.6169404", "0.6155223", "0.61293674", "0.61254525", "0.6120202", "0.611692", "0.61163235", "0.60713077", "0.60713077", "0.60697645", "0.60690594", "0.606356", "0.6063054", "0.6047831", "0.6027818", "0.60194564", "0.6012652", "0.60121036", "0.6004931", "0.5985901", "0.59836346", "0.59741896", "0.5972241", "0.59715146", "0.595949", "0.5941235", "0.5939513", "0.59385794", "0.59311146", "0.59190315", "0.59190315", "0.59190315", "0.5914522", "0.5912268", "0.5908268", "0.5890082", "0.5885636", "0.58850086", "0.5880949" ]
0.7249243
9