query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Unwraps the private key into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey object
def unwrap(self): if self.algorithm == 'rsa': return self.asn1['private_key'].parsed if self.algorithm == 'dsa': params = self.asn1['private_key_algorithm']['parameters'] return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': self.public_key.unwrap(), 'private_key': self.asn1['private_key'].parsed, }) if self.algorithm == 'ec': output = self.asn1['private_key'].parsed output['parameters'] = self.asn1['private_key_algorithm']['parameters'] output['public_key'] = self.public_key.unwrap() return output
[ "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)", "def unwrap(p8_private_key, passphrase=None):\n\n if passphrase:\n passphrase = tobytes(passphrase)\n\n found = False\n try:\n p8_private_key = PBES1.decrypt(p8_private_key, passphrase)\n found = True\n except PbesError as e:\n error_str = \"PBES1[%s]\" % str(e)\n except ValueError:\n error_str = \"PBES1[Invalid]\"\n\n if not found:\n try:\n p8_private_key = PBES2.decrypt(p8_private_key, passphrase)\n found = True\n except PbesError as e:\n error_str += \",PBES2[%s]\" % str(e)\n except ValueError:\n error_str += \",PBES2[Invalid]\"\n\n if not found:\n raise ValueError(\"Error decoding PKCS#8 (%s)\" % error_str)\n\n pk_info = DerSequence().decode(p8_private_key, nr_elements=(2, 3, 4))\n if len(pk_info) == 2 and not passphrase:\n raise ValueError(\"Not a valid clear PKCS#8 structure \"\n \"(maybe it is encrypted?)\")\n\n #\n # PrivateKeyInfo ::= SEQUENCE {\n # version Version,\n # privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,\n # privateKey PrivateKey,\n # attributes [0] IMPLICIT Attributes OPTIONAL\n # }\n # Version ::= INTEGER\n if pk_info[0] != 0:\n raise ValueError(\"Not a valid PrivateKeyInfo SEQUENCE\")\n\n # PrivateKeyAlgorithmIdentifier ::= AlgorithmIdentifier\n #\n # EncryptedPrivateKeyInfo ::= SEQUENCE {\n # encryptionAlgorithm EncryptionAlgorithmIdentifier,\n # encryptedData EncryptedData\n # }\n # EncryptionAlgorithmIdentifier ::= AlgorithmIdentifier\n\n # AlgorithmIdentifier ::= SEQUENCE {\n # algorithm OBJECT IDENTIFIER,\n # parameters ANY DEFINED BY algorithm OPTIONAL\n # }\n\n algo = DerSequence().decode(pk_info[1], nr_elements=(1, 2))\n algo_oid = DerObjectId().decode(algo[0]).value\n if len(algo) == 1:\n algo_params = None\n else:\n try:\n DerNull().decode(algo[1])\n algo_params = None\n except:\n algo_params = algo[1]\n\n # EncryptedData ::= OCTET STRING\n private_key = DerOctetString().decode(pk_info[2]).payload\n\n return (algo_oid, private_key, algo_params)", "def parse_or_generate_private_key( cls, private_key, lib ):\n \n extra = {}\n pubkey_pem = None \n \n if private_key is None or private_key == \"\" or private_key.upper() == \"AUTO\":\n \n # generate one\n _, privkey_pem = crypto.generate_key_pair( OBJECT_KEY_SIZE )\n\n extra['private_key'] = privkey_pem\n if lib is not None:\n lib.private_key = privkey_pem\n\n else:\n \n # is this a key literal?\n try:\n privkey = CryptoKey.importKey( private_key )\n if not privkey.has_private():\n raise Exception(\"Not a private key\")\n \n if lib is not None:\n lib.private_key = private_key\n\n return private_key, extra\n \n except:\n # not a key literal\n pass\n \n # is this a path?\n try:\n privkey = storagelib.read_private_key( private_key )\n except:\n raise Exception(\"Failed to load %s\" % private_key )\n \n privkey_pem = privkey.exportKey()\n\n extra['private_key'] = privkey_pem\n if lib is not None:\n lib.private_key = privkey_pem\n \n return privkey_pem, extra", "def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()", "def from_key(cls, key: PrivateKey) -> PrivateKey:", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def decrypt_key(self, key, private_key):\n # type: (bytes, RSA.RSA) -> bytes\n iv = key[0:self.ivLength]\n secret = key[self.ivLength:]\n return iv + private_key.private_decrypt(secret, RSA.pkcs1_oaep_padding)", "def import_private_key():\n file = open(\"private.pem\", \"rb\")\n private_key = RSA.import_key(file.read())\n file.close()\n return private_key", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def load(data):\n d, n = RSAUtils.parse_key(data)\n return RSAPrivateKey(d, n)", "def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def key_decrypted(self):\n if not self.key:\n return \"\"\n return crypto.dump_privatekey(crypto.FILETYPE_PEM, self._pkey()).decode('utf-8')", "def _parse_ssleay(data, key_type=\"rsa\"):\n private_key_parser = ASN1Parser(data)\n # \"rsa\" type as old format doesn't support rsa-pss parameters\n return Python_Key._parse_asn1_private_key(private_key_parser, key_type)", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwraps a public key into an asn1crypto.keys.RSAPublicKey, asn1crypto.core.Integer (for DSA) or asn1crypto.keys.ECPointBitString object
def unwrap(self): if self.algorithm == 'ec': return self.asn1['public_key'] return self.asn1['public_key'].parsed
[ "def parse_public_key( cls, public_key, lib):\n\n extra = {}\n try:\n # key literal?\n pubkey = CryptoKey.importKey( public_key )\n lib.public_key = pubkey.exportKey()\n return lib.public_key, extra\n except:\n # not a key literal\n # path to a key?\n try:\n pubkey = storagelib.read_public_key( public_key )\n except:\n raise Exception(\"Failed to load %s\" % public_key )\n\n lib.public_key = pubkey.exportKey()\n extra['public_key'] = lib.public_key # store this in a cert\n\n return lib.public_key, extra", "def convert_pubkey_to_rsa(key):\n public_key_encoded = \"\".join(key.split(\"\\n\")[1:-1])\n public_key_der = base64.b64decode(public_key_encoded)\n return RSA.importKey(public_key_der)", "def decode_public_key(bytes):\r\n return RSA.importKey(bytes)", "def get_pub_rsa_key(pub_key):\n return RSA.importKey(pub_key)", "def load_pubkey(pk, get_encoding=False):\n if hasattr(pk, \"public_bytes\"):\n if isinstance(\n pk,\n (\n rsa.RSAPublicKey,\n ec.EllipticCurvePublicKey,\n ed25519.Ed25519PublicKey,\n ed448.Ed448PublicKey,\n ),\n ):\n return pk\n raise SaltInvocationError(\n f\"Passed object is not a public key, but {pk.__class__.__name__}\"\n )\n pk = load_file_or_bytes(pk)\n if PEM_BEGIN in pk:\n try:\n return serialization.load_pem_public_key(pk)\n except ValueError as err:\n raise CommandExecutionError(\n \"Could not load PEM-encoded public key.\"\n ) from err\n try:\n return serialization.load_der_public_key(pk)\n except ValueError as err:\n raise CommandExecutionError(\"Could not load DER-encoded public key.\") from err", "def _wrap_publickey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PublicKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize public key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_public_key(der, backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def read_serialized_rsa_pub_key(serialized):\n n = None\n e = None\n rsa = from_hex(serialized)\n\n pos = 0\n ln = len(rsa)\n while pos < ln:\n tag = bytes_to_byte(rsa, pos)\n pos += 1\n length = bytes_to_short(rsa, pos)\n pos += 2\n\n if tag == 0x81:\n e = bytes_to_long(rsa[pos:pos+length])\n elif tag == 0x82:\n n = bytes_to_long(rsa[pos:pos+length])\n\n pos += length\n\n if e is None or n is None:\n logger.warning(\"Could not process import key\")\n raise ValueError('Public key deserialization failed')\n\n return n, e", "def DecodePublic(curve, bb):\n pk = curve.Decode(bb)\n if pk.is_neutral():\n raise Exception('Invalid public key (neutral point)')\n return pk", "def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()", "def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)", "def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)", "def build_rsa_pubkey(pubkey_bytes):\n\n mod_len = e_len = 2048 // 8 # 0x100 bytes\n e = int.from_bytes(pubkey_bytes[0x40:0x40+e_len], 'little')\n modulus = int.from_bytes(pubkey_bytes[0x40+e_len:0x40+e_len+mod_len],\n 'little')\n\n pub_numbers = rsa.RSAPublicNumbers(e, modulus)\n\n return pub_numbers.public_key(openssl.backend)", "def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()", "def extractPublicKey(cert):\n pk = cert.get_pubkey()\n\n b = _util.binding\n l = b.lib\n ffi = b.ffi\n rsa = l.EVP_PKEY_get1_RSA(pk._pkey)\n buf = ffi.new(\"unsigned char **\")\n length = l.i2d_RSA_PUBKEY(rsa, buf)\n pk = ffi.buffer(buf[0], length)[:]\n ffi.gc(buf[0], l.OPENSSL_free)\n return pk", "def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()", "def _get_keyidv2(pubkey: SupportedKeyTypes) -> int:\n if isinstance(pubkey, RSAPublicKey):\n fmt = serialization.PublicFormat.PKCS1\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER, format=fmt)\n elif isinstance(pubkey, EllipticCurvePublicKey):\n fmt = serialization.PublicFormat.UncompressedPoint\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.X962, format=fmt)\n else:\n raise UnsupportedAlgorithm(f\"Unsupported public key type {type(pubkey)}\")\n\n default_be = backends.default_backend()\n digest = hashes.Hash(hashes.SHA1(), backend=default_be)\n digest.update(pubbytes)\n keydigest = digest.finalize()\n return int.from_bytes(keydigest[16:], \"big\")", "def load(data):\n e, n = RSAUtils.parse_key(data)\n return RSAPublicKey(e, n)", "def Read(key):\n rsa = json.loads(key)\n params = {\n 'modulus': util.Base64WSDecode(rsa['modulus']),\n 'publicExponent': util.Base64WSDecode(rsa['publicExponent'])\n }\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])", "def Read(key):\n rsa = json.loads(key)\n params = {'modulus' : util.Decode(rsa['modulus']),\n 'publicExponent' : util.Decode(rsa['publicExponent'])}\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unwraps an asn1crypto.keys.PrivateKeyInfo object into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey.
def _unwrap_private_key_info(key_info): key_alg = key_info.algorithm if key_alg == 'rsa' or key_alg == 'rsassa_pss': return key_info['private_key'].parsed if key_alg == 'dsa': params = key_info['private_key_algorithm']['parameters'] parsed = key_info['private_key'].parsed return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': Integer(pow( params['g'].native, parsed.native, params['p'].native )), 'private_key': parsed, }) if key_alg == 'ec': parsed = key_info['private_key'].parsed parsed['parameters'] = key_info['private_key_algorithm']['parameters'] return parsed raise ValueError('Unsupported key_info.algorithm "%s"' % key_info.algorithm)
[ "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def unwrap(p8_private_key, passphrase=None):\n\n if passphrase:\n passphrase = tobytes(passphrase)\n\n found = False\n try:\n p8_private_key = PBES1.decrypt(p8_private_key, passphrase)\n found = True\n except PbesError as e:\n error_str = \"PBES1[%s]\" % str(e)\n except ValueError:\n error_str = \"PBES1[Invalid]\"\n\n if not found:\n try:\n p8_private_key = PBES2.decrypt(p8_private_key, passphrase)\n found = True\n except PbesError as e:\n error_str += \",PBES2[%s]\" % str(e)\n except ValueError:\n error_str += \",PBES2[Invalid]\"\n\n if not found:\n raise ValueError(\"Error decoding PKCS#8 (%s)\" % error_str)\n\n pk_info = DerSequence().decode(p8_private_key, nr_elements=(2, 3, 4))\n if len(pk_info) == 2 and not passphrase:\n raise ValueError(\"Not a valid clear PKCS#8 structure \"\n \"(maybe it is encrypted?)\")\n\n #\n # PrivateKeyInfo ::= SEQUENCE {\n # version Version,\n # privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,\n # privateKey PrivateKey,\n # attributes [0] IMPLICIT Attributes OPTIONAL\n # }\n # Version ::= INTEGER\n if pk_info[0] != 0:\n raise ValueError(\"Not a valid PrivateKeyInfo SEQUENCE\")\n\n # PrivateKeyAlgorithmIdentifier ::= AlgorithmIdentifier\n #\n # EncryptedPrivateKeyInfo ::= SEQUENCE {\n # encryptionAlgorithm EncryptionAlgorithmIdentifier,\n # encryptedData EncryptedData\n # }\n # EncryptionAlgorithmIdentifier ::= AlgorithmIdentifier\n\n # AlgorithmIdentifier ::= SEQUENCE {\n # algorithm OBJECT IDENTIFIER,\n # parameters ANY DEFINED BY algorithm OPTIONAL\n # }\n\n algo = DerSequence().decode(pk_info[1], nr_elements=(1, 2))\n algo_oid = DerObjectId().decode(algo[0]).value\n if len(algo) == 1:\n algo_params = None\n else:\n try:\n DerNull().decode(algo[1])\n algo_params = None\n except:\n algo_params = algo[1]\n\n # EncryptedData ::= OCTET STRING\n private_key = DerOctetString().decode(pk_info[2]).payload\n\n return (algo_oid, private_key, algo_params)", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def get_verify_key_from_cross_signing_key(\n key_info: Mapping[str, Any]\n) -> Tuple[str, VerifyKey]:\n # make sure that a `keys` field is provided\n if \"keys\" not in key_info:\n raise ValueError(\"Invalid key\")\n keys = key_info[\"keys\"]\n # and that it contains exactly one key\n if len(keys) == 1:\n key_id, key_data = next(iter(keys.items()))\n return key_id, decode_verify_key_bytes(key_id, decode_base64(key_data))\n else:\n raise ValueError(\"Invalid key\")", "def parse_or_generate_private_key( cls, private_key, lib ):\n \n extra = {}\n pubkey_pem = None \n \n if private_key is None or private_key == \"\" or private_key.upper() == \"AUTO\":\n \n # generate one\n _, privkey_pem = crypto.generate_key_pair( OBJECT_KEY_SIZE )\n\n extra['private_key'] = privkey_pem\n if lib is not None:\n lib.private_key = privkey_pem\n\n else:\n \n # is this a key literal?\n try:\n privkey = CryptoKey.importKey( private_key )\n if not privkey.has_private():\n raise Exception(\"Not a private key\")\n \n if lib is not None:\n lib.private_key = private_key\n\n return private_key, extra\n \n except:\n # not a key literal\n pass\n \n # is this a path?\n try:\n privkey = storagelib.read_private_key( private_key )\n except:\n raise Exception(\"Failed to load %s\" % private_key )\n \n privkey_pem = privkey.exportKey()\n\n extra['private_key'] = privkey_pem\n if lib is not None:\n lib.private_key = privkey_pem\n \n return privkey_pem, extra", "def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)", "def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def load(data):\n d, n = RSAUtils.parse_key(data)\n return RSAPrivateKey(d, n)", "def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed", "def convert_key_info_to_readable(key_info: dict[str, Any]) -> dict[str, Any]:\n key_fields = {'kid': 'key_id',\n 'kty': 'json_web_key_type',\n 'key_ops': 'key_operations',\n 'n': 'RSA_modulus',\n 'e': 'RSA_public_components',\n }\n for key, value in key_fields.items():\n if key in key_info:\n key_info[value] = key_info.pop(key)\n\n return key_info", "def decrypt_key (key, tenant_id):\n try:\n key = RSA.importKey(key,tenant_id)\n unencrypted_key = key.exportKey('PEM')\n if isinstance(unencrypted_key, ValueError):\n raise NfvoException(\"Unable to decrypt the private key: {}\".format(unencrypted_key), httperrors.Internal_Server_Error)\n if isinstance(unencrypted_key, bytes):\n unencrypted_key = unencrypted_key.decode(encoding='UTF-8')\n except ValueError as e:\n raise NfvoException(\"Unable to decrypt the private key: {}\".format(e), httperrors.Internal_Server_Error)\n return unencrypted_key", "def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def key_decrypted(self):\n if not self.key:\n return \"\"\n return crypto.dump_privatekey(crypto.FILETYPE_PEM, self._pkey()).decode('utf-8')", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes PEMencoding from a public key, private key or certificate. If the private key is encrypted, the password will be used to decrypt it.
def _unarmor_pem(data, password=None): object_type, headers, der_bytes = unarmor(data) type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)' armor_type = re.match(type_regex, object_type) if not armor_type: raise ValueError(pretty_message( ''' data does not seem to contain a PEM-encoded certificate, private key or public key ''' )) pem_header = armor_type.group(1) data = data.strip() # RSA private keys are encrypted after being DER-encoded, but before base64 # encoding, so they need to be handled specially if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']): algo = armor_type.group(2).lower() return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password)) key_type = pem_header.lower() algo = None if key_type == 'encrypted private key': key_type = 'private key' elif key_type == 'rsa public key': key_type = 'public key' algo = 'rsa' return (key_type, algo, der_bytes)
[ "def test_remove_pass(self):\n TEST_PASS = \"weakpass\"\n # Generate a test key with a password\n key = RSA.gen_key(2048, 5, callback=lambda: None)\n key_pem = key.as_pem(cipher='aes_256_cbc',\n callback=lambda x: TEST_PASS)\n # Now try to decrypt the key with the helper function\n key_out = SSHKeyUtils.remove_pass(key_pem, TEST_PASS)\n # Check returned key looks OK and is password-less\n self.assertIn('BEGIN RSA PRIVATE KEY', key_out)\n key_back_in = RSA.load_key_string(key_out,\n callback=lambda x: None)\n # Finally, test with wrong password\n self.assertRaises(RSA.RSAError, SSHKeyUtils.remove_pass,\n key_pem, \"wrong\")", "def _extract_keys_from_pem(mode, pem_contents, cert_format,\n passphrase=None):\n\n temp_pem_file = constants.SSL_PEM_FILE + '.temp'\n with os.fdopen(os.open(temp_pem_file, os.O_CREAT | os.O_WRONLY,\n constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),\n 'w') as f:\n f.write(pem_contents)\n\n if passphrase:\n passphrase = str(passphrase)\n\n private_bytes = None\n private_mode = False\n if mode in [constants.CERT_MODE_SSL,\n constants.CERT_MODE_TPM,\n constants.CERT_MODE_DOCKER_REGISTRY,\n constants.CERT_MODE_OPENSTACK,\n ]:\n private_mode = True\n\n with open(temp_pem_file, \"r\") as key_file:\n if private_mode:\n # extract private_key with passphrase\n try:\n private_key = serialization.load_pem_private_key(\n key_file.read(),\n password=passphrase,\n backend=default_backend())\n except Exception as e:\n raise exception.SysinvException(_(\"Error decrypting PEM \"\n \"file: %s\" % e))\n key_file.seek(0)\n # extract the certificate from the pem file\n cert = x509.load_pem_x509_certificate(key_file.read(),\n default_backend())\n os.remove(temp_pem_file)\n\n if private_mode:\n if not isinstance(private_key, rsa.RSAPrivateKey):\n raise exception.SysinvException(_(\"Only RSA encryption based \"\n \"Private Keys are supported.\"))\n\n private_bytes = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=cert_format,\n encryption_algorithm=serialization.NoEncryption())\n\n signature = mode + '_' + str(cert.serial_number)\n if len(signature) > 255:\n LOG.info(\"Truncating certificate serial no %s\" % signature)\n signature = signature[:255]\n LOG.info(\"config_certificate signature=%s\" % signature)\n\n # format=serialization.PrivateFormat.TraditionalOpenSSL,\n public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)\n\n return private_bytes, public_bytes, signature", "def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n .replace(\"-----BEGIN PUBLIC KEY-----\", \"\").replace(\n \"-----END PUBLIC KEY-----\", \"\")", "def ssl_decrypt_ca_cert_pem(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ssl_decrypt_ca_cert_pem\")", "def decrypt(mess,pvt_key):\n cipher = mess.decode('base64')\n mem = BIO.MemoryBuffer(pvt_key)\n key = RSA.load_key_bio(mem)\n try:\n plain = key.private_decrypt(cipher,RSA.pkcs1_oaep_padding)\n except:\n plain = \"\"\n\n if plain == \"\":\n return \"\"\n\n return plain", "def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output", "def test_set_private_key_setter_encrypted_pem_str_password(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(\n self.encrypted_pem_private_key, password=self.private_key_password.decode()\n )\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def key_decrypted(self):\n if not self.key:\n return \"\"\n return crypto.dump_privatekey(crypto.FILETYPE_PEM, self._pkey()).decode('utf-8')", "def _parse_pem_key(raw_key_input):\n offset = raw_key_input.find(b'-----BEGIN ')\n if offset != -1:\n return raw_key_input[offset:]", "def dePem(s, name):\r\n prefix = \"-----BEGIN %s-----\" % name\r\n postfix = \"-----END %s-----\" % name \r\n start = s.find(prefix)\r\n if start == -1:\r\n raise SyntaxError(\"Missing PEM prefix\")\r\n end = s.find(postfix, start+len(prefix))\r\n if end == -1:\r\n raise SyntaxError(\"Missing PEM postfix\")\r\n s = s[start+len(\"-----BEGIN %s-----\" % name) : end]\r\n retBytes = a2b_base64(s) # May raise SyntaxError\r\n return retBytes", "def read_key_cert(filename='auth/jira_privatekey.pem'):\n with open('auth/jira_privatekey.pem', 'r') as j_pk:\n data = j_pk.read()\n private_key = data.strip()\n return private_key", "def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private", "def x509_to_pem(x509):\n pem = OpenSSL.crypto.dump_certificate(PEM_TYPE, x509)\n return pem", "def test_set_private_key_setter_pem_str(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def convert_key_to_pem ( key_filename, output_filename ) :\n cmd = 'openssl rsa -in ' + key_filename + ' -outform PEM -out ' + output_filename\n return subprocess.call( cmd, shell = True )", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def pfx2pem(input_file, output_file, passphrase=None):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx, passphrase)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n open(output_file, 'wb').write(pem)", "def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)", "def ec_private_pem_to_private_bin(pem):\n return \"\".join(pem.split(\"\\n\")[1:-2]).decode(\"BASE64\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses a PKCS12 ANS.1 DERencoded structure and extracts certs and keys
def _parse_pkcs12(data, password, load_private_key): if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) if password is not None: if not isinstance(password, byte_cls): raise TypeError(pretty_message( ''' password must be a byte string, not %s ''', type_name(password) )) else: password = b'' certs = {} private_keys = {} pfx = Pfx.load(data) auth_safe = pfx['auth_safe'] if auth_safe['content_type'].native != 'data': raise ValueError(pretty_message( ''' Only password-protected PKCS12 files are currently supported ''' )) authenticated_safe = pfx.authenticated_safe mac_data = pfx['mac_data'] if mac_data: mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native key_length = { 'sha1': 20, 'sha224': 28, 'sha256': 32, 'sha384': 48, 'sha512': 64, 'sha512_224': 28, 'sha512_256': 32, }[mac_algo] mac_key = pkcs12_kdf( mac_algo, password, mac_data['mac_salt'].native, mac_data['iterations'].native, key_length, 3 # ID 3 is for generating an HMAC key ) hash_mod = getattr(hashlib, mac_algo) computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest() stored_hmac = mac_data['mac']['digest'].native if not constant_compare(computed_hmac, stored_hmac): raise ValueError('Password provided is invalid') for content_info in authenticated_safe: content = content_info['content'] if isinstance(content, OctetString): _parse_safe_contents(content.native, certs, private_keys, password, load_private_key) elif isinstance(content, EncryptedData): encrypted_content_info = content['encrypted_content_info'] encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm'] encrypted_content = encrypted_content_info['encrypted_content'].native decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password) _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key) else: raise ValueError(pretty_message( ''' Public-key-based PKCS12 files are not currently supported ''' )) key_fingerprints = set(private_keys.keys()) cert_fingerprints = set(certs.keys()) common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints)) key = None cert = None other_certs = [] if len(common_fingerprints) >= 1: fingerprint = common_fingerprints[0] key = private_keys[fingerprint] cert = certs[fingerprint] other_certs = [certs[f] for f in certs if f != fingerprint] return (key, cert, other_certs) if len(private_keys) > 0: first_key = sorted(list(private_keys.keys()))[0] key = private_keys[first_key] if len(certs) > 0: first_key = sorted(list(certs.keys()))[0] cert = certs[first_key] del certs[first_key] if len(certs) > 0: other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly) return (key, cert, other_certs)
[ "def asn1_to_x509(asn1):\n return der_to_x509(asn1_to_der(asn1))", "def asn1_loads(asn1_str):\n\n # ASN.1 grammar\n identifier = pp.Word(pp.alphas + \"_\")\n assign = pp.Literal(\"::=\")\n # typedef = identifier.setName(\"typeref\") + assign + identifier.setName(\"basetype\")\n comment1 = pp.Literal(\"#\") + pp.originalTextFor(pp.SkipTo(pp.LineEnd()))\n # typelist = pp.OneOrMore(typedef)\n meta1 = pp.LineStart() + identifier + pp.Literal(\":\") + pp.SkipTo(pp.LineEnd()).setDebug()\n meta2 = pp.LineStart() + pp.White() + pp.SkipTo(pp.LineEnd()).setDebug()\n metaval = meta1 + pp.ZeroOrMore(meta2)\n # metalist = pp.ZeroOrMore(comment1) + pp.Literal(\"/*\") + pp.OneOrMore(metaval) + pp.Literal(\"*/\")\n metalist = pp.SkipTo(pp.Literal(\"/*\")).setDebug() + pp.Literal(\"/*\") + pp.OneOrMore(\n metaval).setDebug() + pp.Literal(\"*/\")\n\n asn1 = metalist.parseString(asn1_str, parseAll=False)\n print(asn1)\n jaen = {\"meta\": {}, \"types\": []}\n return jaen", "def _extract_keys_from_pem(mode, pem_contents, cert_format,\n passphrase=None):\n\n temp_pem_file = constants.SSL_PEM_FILE + '.temp'\n with os.fdopen(os.open(temp_pem_file, os.O_CREAT | os.O_WRONLY,\n constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),\n 'w') as f:\n f.write(pem_contents)\n\n if passphrase:\n passphrase = str(passphrase)\n\n private_bytes = None\n private_mode = False\n if mode in [constants.CERT_MODE_SSL,\n constants.CERT_MODE_TPM,\n constants.CERT_MODE_DOCKER_REGISTRY,\n constants.CERT_MODE_OPENSTACK,\n ]:\n private_mode = True\n\n with open(temp_pem_file, \"r\") as key_file:\n if private_mode:\n # extract private_key with passphrase\n try:\n private_key = serialization.load_pem_private_key(\n key_file.read(),\n password=passphrase,\n backend=default_backend())\n except Exception as e:\n raise exception.SysinvException(_(\"Error decrypting PEM \"\n \"file: %s\" % e))\n key_file.seek(0)\n # extract the certificate from the pem file\n cert = x509.load_pem_x509_certificate(key_file.read(),\n default_backend())\n os.remove(temp_pem_file)\n\n if private_mode:\n if not isinstance(private_key, rsa.RSAPrivateKey):\n raise exception.SysinvException(_(\"Only RSA encryption based \"\n \"Private Keys are supported.\"))\n\n private_bytes = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=cert_format,\n encryption_algorithm=serialization.NoEncryption())\n\n signature = mode + '_' + str(cert.serial_number)\n if len(signature) > 255:\n LOG.info(\"Truncating certificate serial no %s\" % signature)\n signature = signature[:255]\n LOG.info(\"config_certificate signature=%s\" % signature)\n\n # format=serialization.PrivateFormat.TraditionalOpenSSL,\n public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)\n\n return private_bytes, public_bytes, signature", "def test_x509_cert_and_key(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_cert_and_key_pem, quiet=True), self.x509_cert_and_key_pem_json)", "def _parse_ssleay(data, key_type=\"rsa\"):\n private_key_parser = ASN1Parser(data)\n # \"rsa\" type as old format doesn't support rsa-pss parameters\n return Python_Key._parse_asn1_private_key(private_key_parser, key_type)", "def parse(self, raw, encoding='PEM'):\n data = dict({})\n \n try:\n if encoding == 'PEM':\n csr = x509.load_pem_x509_csr(raw, backend=self.__backend)\n elif encoding in ['DER','PFX','P12']:\n csr = x509.load_der_x509_csr(raw, backend=self.__backend)\n else:\n raise NotImplementedError('Unsupported certificate request encoding')\n except Exception as err:\n raise Exception(err)\n\n data['subject'] = csr.subject\n data['digest'] = csr.signature_hash_algorithm\n data['signature'] = csr.signature\n\n return data", "def test_x509_letsencrypt(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_letsencrypt, quiet=True), self.x509_letsencrypt_json)", "def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List", "def _parse_dsa_ssleay(data):\n private_key_parser = ASN1Parser(data)\n return Python_Key._parse_dsa_private_key(private_key_parser)", "def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)", "def _parse(self, chain_raw, lz4_):\n if lz4_:\n chain_raw = lz4.loads(chain_raw).decode(\"utf-8\")\n chain = json.loads(chain_raw)\n for index in range(1, len(chain) + 1):\n cert_dict = chain[str(index)]\n cert_dict['subject_sig_key'] = \\\n base64.b64decode(cert_dict['subject_sig_key'])\n cert_dict['subject_enc_key'] = \\\n base64.b64decode(cert_dict['subject_enc_key'])\n cert_dict['signature'] = \\\n base64.b64decode(cert_dict['signature'])\n cert = Certificate.from_dict(cert_dict)\n self.certs.append(cert)", "def x509_to_der(x509):\n return OpenSSL.crypto.dump_certificate(ASN1_TYPE, x509)", "def parse_der_certificates(der_bytes: bytes) -> List[Certificate]:\n\n result = []\n try:\n leaf = x509.load_der_x509_certificate(der_bytes, default_backend())\n result.append(leaf)\n _, remaining_data = decode(der_bytes)\n while len(remaining_data) > 0:\n cert = x509.load_der_x509_certificate(remaining_data, default_backend())\n result.append(cert)\n _, remaining_data = decode(remaining_data)\n except Exception:\n raise X509CertificateError('Unable to parse DER X.509 certificate')\n\n return result", "def der_to_asn1(der):\n return asn1crypto.x509.Certificate.load(der)", "def dumpasn1(self):\n\n ret = None\n fn = \"dumpasn1.%d.tmp\" % os.getpid()\n try:\n f = open(fn, \"wb\")\n f.write(self.get_DER())\n f.close()\n p = subprocess.Popen((\"dumpasn1\", \"-a\", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n ret = \"\\n\".join(x for x in p.communicate()[0].splitlines() if x.startswith(\" \"))\n except Exception, e:\n ret = \"[Could not run dumpasn1: %s]\" % e\n finally:\n os.unlink(fn)\n return ret", "def test_x509_ca_cert(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_ca_cert, quiet=True), self.x509_ca_cert_json)", "def get_der(self):\n return OpenSSL.crypto.dump_certificate(\n OpenSSL.crypto.FILETYPE_ASN1, self._cert)", "def parse(self, certificate_file):\n cert = load_json_file(certificate_file)\n self.subject = cert['subject']\n self.subject_sig_key = base64.b64decode(cert['subject_sig_key'])\n self.subject_enc_key = base64.b64decode(cert['subject_enc_key'])\n self.issuer = cert['issuer']\n self.version = cert['version']\n self.issuing_time = cert['issuing_time']\n self.expiration_time = cert['expiration_time']\n self.sign_algorithm = cert['sign_algorithm']\n self.encryption_algorithm = cert['enc_algorithm']\n self.signature = base64.b64decode(cert['signature'])", "def get_certificate_from_file(filename):\r\n return ASN1CertificateData.from_file(filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses a SafeContents PKCS12 ANS.1 structure and extracts certs and keys
def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key): if isinstance(safe_contents, byte_cls): safe_contents = SafeContents.load(safe_contents) for safe_bag in safe_contents: bag_value = safe_bag['bag_value'] if isinstance(bag_value, CertBag): if bag_value['cert_id'].native == 'x509': cert = bag_value['cert_value'].parsed public_key_info = cert['tbs_certificate']['subject_public_key_info'] certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed elif isinstance(bag_value, PrivateKeyInfo): private_keys[_fingerprint(bag_value, load_private_key)] = bag_value elif isinstance(bag_value, EncryptedPrivateKeyInfo): encryption_algorithm_info = bag_value['encryption_algorithm'] encrypted_key_bytes = bag_value['encrypted_data'].native decrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password) private_key = PrivateKeyInfo.load(decrypted_key_bytes) private_keys[_fingerprint(private_key, load_private_key)] = private_key elif isinstance(bag_value, SafeContents): _parse_safe_contents(bag_value, certs, private_keys, password, load_private_key) else: # We don't care about CRL bags or secret bags pass
[ "def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n certs = {}\n private_keys = {}\n\n pfx = Pfx.load(data)\n\n auth_safe = pfx['auth_safe']\n if auth_safe['content_type'].native != 'data':\n raise ValueError(pretty_message(\n '''\n Only password-protected PKCS12 files are currently supported\n '''\n ))\n authenticated_safe = pfx.authenticated_safe\n\n mac_data = pfx['mac_data']\n if mac_data:\n mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native\n key_length = {\n 'sha1': 20,\n 'sha224': 28,\n 'sha256': 32,\n 'sha384': 48,\n 'sha512': 64,\n 'sha512_224': 28,\n 'sha512_256': 32,\n }[mac_algo]\n mac_key = pkcs12_kdf(\n mac_algo,\n password,\n mac_data['mac_salt'].native,\n mac_data['iterations'].native,\n key_length,\n 3 # ID 3 is for generating an HMAC key\n )\n hash_mod = getattr(hashlib, mac_algo)\n computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()\n stored_hmac = mac_data['mac']['digest'].native\n if not constant_compare(computed_hmac, stored_hmac):\n raise ValueError('Password provided is invalid')\n\n for content_info in authenticated_safe:\n content = content_info['content']\n\n if isinstance(content, OctetString):\n _parse_safe_contents(content.native, certs, private_keys, password, load_private_key)\n\n elif isinstance(content, EncryptedData):\n encrypted_content_info = content['encrypted_content_info']\n\n encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']\n encrypted_content = encrypted_content_info['encrypted_content'].native\n decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)\n\n _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)\n\n else:\n raise ValueError(pretty_message(\n '''\n Public-key-based PKCS12 files are not currently supported\n '''\n ))\n\n key_fingerprints = set(private_keys.keys())\n cert_fingerprints = set(certs.keys())\n\n common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))\n\n key = None\n cert = None\n other_certs = []\n\n if len(common_fingerprints) >= 1:\n fingerprint = common_fingerprints[0]\n key = private_keys[fingerprint]\n cert = certs[fingerprint]\n other_certs = [certs[f] for f in certs if f != fingerprint]\n return (key, cert, other_certs)\n\n if len(private_keys) > 0:\n first_key = sorted(list(private_keys.keys()))[0]\n key = private_keys[first_key]\n\n if len(certs) > 0:\n first_key = sorted(list(certs.keys()))[0]\n cert = certs[first_key]\n del certs[first_key]\n\n if len(certs) > 0:\n other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)\n\n return (key, cert, other_certs)", "def extract_contents(self, contents):\n a_username = contents['a_username']\n kab = contents['kab']\n pwd = contents['pwd']\n iv = contents['iv']\n return (a_username, kab, pwd, iv)", "def _extract_keys_from_pem(mode, pem_contents, cert_format,\n passphrase=None):\n\n temp_pem_file = constants.SSL_PEM_FILE + '.temp'\n with os.fdopen(os.open(temp_pem_file, os.O_CREAT | os.O_WRONLY,\n constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),\n 'w') as f:\n f.write(pem_contents)\n\n if passphrase:\n passphrase = str(passphrase)\n\n private_bytes = None\n private_mode = False\n if mode in [constants.CERT_MODE_SSL,\n constants.CERT_MODE_TPM,\n constants.CERT_MODE_DOCKER_REGISTRY,\n constants.CERT_MODE_OPENSTACK,\n ]:\n private_mode = True\n\n with open(temp_pem_file, \"r\") as key_file:\n if private_mode:\n # extract private_key with passphrase\n try:\n private_key = serialization.load_pem_private_key(\n key_file.read(),\n password=passphrase,\n backend=default_backend())\n except Exception as e:\n raise exception.SysinvException(_(\"Error decrypting PEM \"\n \"file: %s\" % e))\n key_file.seek(0)\n # extract the certificate from the pem file\n cert = x509.load_pem_x509_certificate(key_file.read(),\n default_backend())\n os.remove(temp_pem_file)\n\n if private_mode:\n if not isinstance(private_key, rsa.RSAPrivateKey):\n raise exception.SysinvException(_(\"Only RSA encryption based \"\n \"Private Keys are supported.\"))\n\n private_bytes = private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=cert_format,\n encryption_algorithm=serialization.NoEncryption())\n\n signature = mode + '_' + str(cert.serial_number)\n if len(signature) > 255:\n LOG.info(\"Truncating certificate serial no %s\" % signature)\n signature = signature[:255]\n LOG.info(\"config_certificate signature=%s\" % signature)\n\n # format=serialization.PrivateFormat.TraditionalOpenSSL,\n public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)\n\n return private_bytes, public_bytes, signature", "def test_x509_cert_and_key(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_cert_and_key_pem, quiet=True), self.x509_cert_and_key_pem_json)", "def test_otoroshi_controllers_adminapi_pki_controller_import_cert_from_p12(self):\n pass", "def test_x509_letsencrypt(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_letsencrypt, quiet=True), self.x509_letsencrypt_json)", "def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List", "def is_pkcs12(data):\n try:\n header = Tlv.parse_from(Tlv.unpack(0x30, data))[0]\n return header.tag == 0x02 and header.value == b\"\\x03\"\n except ValueError:\n logger.debug(\"Unable to parse TLV\", exc_info=True)\n return False", "def ExtractApexPayloadAndSignContents(self, apk_entries, sepolicy_entries, apk_keys, payload_key, signing_args):\n if not os.path.exists(self.debugfs_path):\n raise ApexSigningError(\n \"Couldn't find location of debugfs_static: \" +\n \"Path {} does not exist. \".format(self.debugfs_path) +\n \"Make sure bin/debugfs_static can be found in -p <path>\")\n if not os.path.exists(self.fsckerofs_path):\n raise ApexSigningError(\n \"Couldn't find location of fsck.erofs: \" +\n \"Path {} does not exist. \".format(self.fsckerofs_path) +\n \"Make sure bin/fsck.erofs can be found in -p <path>\")\n payload_dir = common.MakeTempDir()\n extract_cmd = ['deapexer', '--debugfs_path', self.debugfs_path,\n '--fsckerofs_path', self.fsckerofs_path,\n 'extract',\n self.apex_path, payload_dir]\n common.RunAndCheckOutput(extract_cmd)\n assert os.path.exists(self.apex_path)\n\n has_signed_content = False\n for entry in apk_entries:\n apk_path = os.path.join(payload_dir, entry)\n\n key_name = apk_keys.get(os.path.basename(entry))\n if key_name in common.SPECIAL_CERT_STRINGS:\n logger.info('Not signing: %s due to special cert string', apk_path)\n continue\n\n logger.info('Signing apk file %s in apex %s', apk_path, self.apex_path)\n # Rename the unsigned apk and overwrite the original apk path with the\n # signed apk file.\n unsigned_apk = common.MakeTempFile()\n os.rename(apk_path, unsigned_apk)\n common.SignFile(\n unsigned_apk, apk_path, key_name, self.key_passwords.get(key_name),\n codename_to_api_level_map=self.codename_to_api_level_map)\n has_signed_content = True\n\n for entry in sepolicy_entries:\n sepolicy_path = os.path.join(payload_dir, entry)\n\n if not 'etc' in entry:\n logger.warning('Sepolicy path does not contain the intended directory name etc:'\n ' %s', entry)\n\n key_name = apk_keys.get(os.path.basename(entry))\n if key_name is None:\n logger.warning('Failed to find signing keys for {} in'\n ' apex {}, payload key will be used instead.'\n ' Use \"-e <name>=\" to specify a key'\n .format(entry, self.apex_path))\n key_name = payload_key\n\n if key_name in common.SPECIAL_CERT_STRINGS:\n logger.info('Not signing: %s due to special cert string', sepolicy_path)\n continue\n\n if OPTIONS.sign_sepolicy_path is not None:\n sig_path = os.path.join(payload_dir, sepolicy_path + '.sig')\n fsv_sig_path = os.path.join(payload_dir, sepolicy_path + '.fsv_sig')\n old_sig = common.MakeTempFile()\n old_fsv_sig = common.MakeTempFile()\n os.rename(sig_path, old_sig)\n os.rename(fsv_sig_path, old_fsv_sig)\n\n logger.info('Signing sepolicy file %s in apex %s', sepolicy_path, self.apex_path)\n if common.SignSePolicy(sepolicy_path, key_name, self.key_passwords.get(key_name)):\n has_signed_content = True\n\n if self.sign_tool:\n logger.info('Signing payload contents in apex %s with %s', self.apex_path, self.sign_tool)\n # Pass avbtool to the custom signing tool\n cmd = [self.sign_tool, '--avbtool', self.avbtool]\n # Pass signing_args verbatim which will be forwarded to avbtool (e.g. --signing_helper=...)\n if signing_args:\n cmd.extend(['--signing_args', '\"{}\"'.format(signing_args)])\n cmd.extend([payload_key, payload_dir])\n common.RunAndCheckOutput(cmd)\n has_signed_content = True\n\n return payload_dir, has_signed_content", "def test_x509_ca_cert(self):\n self.assertEqual(jc.parsers.x509_cert.parse(self.x509_ca_cert, quiet=True), self.x509_ca_cert_json)", "def extract_certs_from_pem(pem_contents):\n start = 0\n certs = []\n while True:\n index = pem_contents.find(constants.BEGIN_CERTIFICATE_MARKER, start)\n if index == -1:\n break\n try:\n cert = x509.load_pem_x509_certificate(pem_contents[index::],\n default_backend())\n except Exception:\n LOG.exception(_(\"Load pem x509 certificate failed at file \"\n \"location: %s\") % index)\n raise exception.SysinvException(_(\n \"Failed to load pem x509 certificate\"))\n\n certs.append(cert)\n start = index + len(constants.BEGIN_CERTIFICATE_MARKER)\n return certs", "def parse(self, raw, encoding='PEM'):\n data = dict({})\n \n try:\n if encoding == 'PEM':\n csr = x509.load_pem_x509_csr(raw, backend=self.__backend)\n elif encoding in ['DER','PFX','P12']:\n csr = x509.load_der_x509_csr(raw, backend=self.__backend)\n else:\n raise NotImplementedError('Unsupported certificate request encoding')\n except Exception as err:\n raise Exception(err)\n\n data['subject'] = csr.subject\n data['digest'] = csr.signature_hash_algorithm\n data['signature'] = csr.signature\n\n return data", "def parseCertificateLines(self, lines):\n result = list()\n i_cert = 1\n cur_certificate = dict()\n option_signature = ' : '\n for i_line, line in enumerate(lines):\n start_signature = '%d-------' % i_cert\n if line.startswith(start_signature):\n if cur_certificate:\n result.append(cur_certificate)\n cur_certificate = dict()\n i_cert += 1\n\n if option_signature in line:\n name = line[:line.index(option_signature)].strip()\n name = CERT_OPTION_NAME_REPLACEMENT.get(name, name)\n value = line[line.index(option_signature) + len(option_signature):].strip()\n if name == 'Issuer':\n value = self._parseCertificateOptionValue(value)\n elif name == 'Subject':\n value = self._parseCertificateOptionValue(value)\n\n if name in ('CA cert URL', 'CDP'):\n if name not in cur_certificate:\n cur_certificate[name] = list()\n cur_certificate[name].append(value)\n else:\n cur_certificate[name] = value\n\n if cur_certificate:\n result.append(cur_certificate)\n return tuple(result)", "def loads(self, txt, usage, owner):\n spec = json.loads(txt)\n for kspec in spec[\"keys\"]:\n if kspec[\"alg\"] == \"RSA\":\n e = my_b64decode(kspec[\"exp\"])\n n = my_b64decode(kspec[\"mod\"])\n\n k = M2Crypto.RSA.new_pub_key((long_to_mpi(e), long_to_mpi(n)))\n\n if \"kid\" in kspec:\n tag = \"%s:%s\" % (\"rsa\", kspec[\"kid\"])\n else:\n tag = \"rsa\"\n\n self.add_key(k, \"rsa\", usage, owner)\n elif kspec[\"alg\"] == \"HMAC\":\n self.add_key(kspec[\"modulus\"], \"hmac\", usage, owner)", "def test_pkcs12_ordering():\n\n def make_cert(name):\n key = ec.generate_private_key(ec.SECP256R1())\n subject = x509.Name(\n [\n x509.NameAttribute(x509.NameOID.COMMON_NAME, name),\n ]\n )\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(subject)\n .public_key(key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(now)\n .not_valid_after(now)\n .sign(key, hashes.SHA256())\n )\n return (key, cert)\n\n # Make some certificates with distinct names.\n a_name = \"A\" * 20\n b_name = \"B\" * 20\n c_name = \"C\" * 20\n a_key, a_cert = make_cert(a_name)\n _, b_cert = make_cert(b_name)\n _, c_cert = make_cert(c_name)\n\n # Bundle them in a PKCS#12 file in order A, B, C.\n p12 = serialize_key_and_certificates(\n b\"p12\", a_key, a_cert, [b_cert, c_cert], serialization.NoEncryption()\n )\n\n # Parse them out. The API should report them in the same order.\n (key, cert, certs) = load_key_and_certificates(p12, None)\n assert cert == a_cert\n assert certs == [b_cert, c_cert]\n\n # The ordering in the PKCS#12 file itself should also match.\n a_idx = p12.index(a_name.encode(\"utf-8\"))\n b_idx = p12.index(b_name.encode(\"utf-8\"))\n c_idx = p12.index(c_name.encode(\"utf-8\"))\n\n assert a_idx < b_idx < c_idx", "def unpack_keys_from_xfer(key_pack_hex: hex,\n path=paths.nacl_keys,\n *args,\n **kwargs):\n global public_box\n\n try:\n key_dict = public_box.decrypt(key_pack_hex)\n key_dict = json.loads(key_dict)\n\n aes_key = key_dict[\"aes\"]\n AES256Cipher().write_key(aes_key.encode())\n\n fernet_key = key_dict[\"fernet\"]\n FernetCipher().write_key(fernet_key.encode())\n\n chacha_key = key_dict[\"chacha\"]\n XChaCha20Poly1305.write_key(Base64Encoder.decode(chacha_key))\n\n except:\n print(sysMsgList.keysUnpackFail)", "def load_p12(self, p12_data=None):\n if p12_data is None:\n p12_data = open(self.p12_path, 'rb').read()\n\n p12 = crypto.load_pkcs12(p12_data, self.prikey_password)\n prikey_data = crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n prikey_data = prikey_data.replace('-----BEGIN PRIVATE KEY-----\\n', '').replace('\\n-----END PRIVATE KEY-----', '').decode('base64')\n pubkey_data = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pubkey_data = pubkey_data.replace('-----BEGIN CERTIFICATE-----\\n', '').replace('\\n-----END CERTIFICATE-----', '').decode('base64')\n self.load_pubkey(pubkey_data=pubkey_data)\n self._load_prikey_with_decrypted_data(decrypted_prikey_data=prikey_data)\n return", "def info_from_args(args):\n return CertInfo(\n subject=parse_dn(args.subject),\n usage=parse_list(args.usage),\n alt_names=parse_list(args.san),\n ocsp_nocheck=args.ocsp_nocheck,\n ocsp_must_staple=args.ocsp_must_staple,\n ocsp_must_staple_v2=args.ocsp_must_staple_v2,\n ocsp_urls=parse_list(args.ocsp_urls),\n crl_urls=parse_list(args.crl_urls),\n issuer_urls=parse_list(args.issuer_urls),\n permit_subtrees=parse_list(args.permit_subtrees),\n exclude_subtrees=parse_list(args.exclude_subtrees),\n ca=args.CA,\n path_length=args.path_length)", "def cert_content(self):\n return self._cert_content" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process and return selected confounds from the confounds file
def _select_confounds(confounds_file, selected_confounds): import pandas as pd import numpy as np confounds_df = pd.read_csv(confounds_file, sep='\t', na_values='n/a') # fill the first value of FramewiseDisplacement with the mean. if 'FramewiseDisplacement' in selected_confounds: confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna( np.mean(confounds_df['FramewiseDisplacement'])) desired_confounds = confounds_df[selected_confounds] return desired_confounds
[ "def _select_confounds(confounds_file, selected_confounds):\n import pandas as pd\n import numpy as np\n import re\n\n confounds_df = pd.read_csv(confounds_file, sep='\\t', na_values='n/a')\n # regular expression to capture confounds specified at the command line\n confound_expr = re.compile(r\"|\".join(selected_confounds))\n expanded_confounds = list(filter(confound_expr.fullmatch, confounds_df.columns))\n imputables = ('framewise_displacement', 'std_dvars', 'dvars', '.*derivative1.*')\n\n # regular expression to capture all imputable confounds\n impute_expr = re.compile(r\"|\".join(imputables))\n expanded_imputables = list(filter(impute_expr.fullmatch, expanded_confounds))\n for imputable in expanded_imputables:\n vals = confounds_df[imputable].values\n if not np.isnan(vals[0]):\n continue\n # Impute the mean non-zero, non-NaN value\n confounds_df[imputable][0] = np.nanmean(vals[vals != 0])\n\n desired_confounds = confounds_df[expanded_confounds]\n # check to see if there are any remaining nans\n if desired_confounds.isna().values.any():\n msg = \"The selected confounds contain nans: {conf}\".format(conf=expanded_confounds)\n raise ValueError(msg)\n return desired_confounds", "def read_candidates(file_path):\n pass", "def read_conect(self):\n self.conect_section = []\n if not self.lines:\n self.read_lines()\n for line in self.lines:\n if \"CONECT\" in line[0:6]:\n self.conect_section.append(line)", "def _read_conll(cls, input_file):\n #def read_conll(input_file):\n sents = []\n sent, labels = [], []\n for line in open(input_file):\n if line.startswith(\"# sent_id\"):\n current_id = line.strip().split(\" = \")[1]\n elif line.strip() == \"\":\n if len(sent) > 0:\n sents.append((current_id, sent, labels))\n sent, labels = [], []\n else:\n token, label = line.strip().split(\"\\t\")\n sent.append(token)\n labels.append(label)\n return sents", "def processFile(txt,filename):\n res = cveRex.findall(txt+\"\\n\"+filename)\n if res:\n #suppression des doublons\n res=unique(res)\n for cve in res:\n # ce if permet d'éviter de mettre deux fois la meme CVE lorsqu'elle provient de deux fichiers différents\n ve=\"CVE-\"+cve[2]+'-'+cve[3]\n #ve=\"CVE-\"+cve\n if ve not in exploits[\"cve\"]:\n exploits['cve'].append(ve)\n exploits['Source File'].append(filename)\n logging.debug(\"%s,%s\"%(ve,filename))", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def semcor2conc(args):\r\n input_files = list_files(*args.input_files)\r\n types = list(args.types)\r\n output_file = args.output_file or output_default / '{}_conc.csv'.format('_'.join(types))\r\n output_file = Path(output_file)\r\n left_context = args.left\r\n right_context = args.right\r\n separator = args.separator\r\n filter_pos = args.pos\r\n kind_id = args.kind_id\r\n with output_file.open('w') as file:\r\n x = 'last\\tnext\\tlemma' if args.add_closest else 'lemma'\r\n file.write('\\t'.join(['concordance', 'file', 'token_id', 'left', 'wordform', 'right', x, 'pos', 'sense_key\\n']))\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n tokenlist = list(generate_tokenlist(corpus_file.text))\r\n chosen_words = [index for (index, token) in enumerate(tokenlist) if token.lemma in types]\r\n for word in chosen_words:\r\n node = tokenlist[word]\r\n pos = node.pos\r\n if filter_pos and not re.match(r'{}'.format([x for x in filter_pos]), pos):\r\n continue\r\n if kind_id == 'lemma_pos':\r\n wordtype = '/'.join([node.lemma, node.pos])\r\n elif kind_id == 'wordform':\r\n wordtype = node.wordform\r\n else:\r\n wordtype = node.lemma\r\n token_id = '/'.join([wordtype, corpus_file.shortname, str(word + 1)])\r\n left, right = generate_context(tokenlist, word, left_context, right_context, separator, len(tokenlist))\r\n if args.add_closest:\r\n last = tokenlist[word-1].wordform\r\n following = tokenlist[word+1].wordform\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, last, following, node.lemma, pos, node.sense_key or 'NA']\r\n else:\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, node.lemma, pos, node.sense_key or 'NA']\r\n file.write('\\t'.join(line) + '\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))", "def _load_confounds_main(\n confounds_raw, strategy=[\"minimal\"], n_components=0.95, motion_model=\"6params\"\n):\n\n # Convert tsv file to pandas dataframe\n if not isinstance(confounds_raw, pd.DataFrame):\n confounds_raw = pd.read_csv(confounds_raw, delimiter=\"\\t\", encoding=\"utf-8\")\n\n # Add chosen confounds based on strategy to dataframe\n confounds_of_interest = set()\n confounds_out = pd.DataFrame()\n\n for strat in strategy:\n if strat in confound_dict.keys():\n\n confounds_of_interest |= set(_confound_strat(strat, confounds_raw))\n else:\n confounds_of_interest.add(strat)\n\n # Remove motion confounds and concatenate columns to confounds_out\n non_motion_confounds = [\n conf\n for conf in confounds_of_interest\n if ((\"rot\" not in conf) and (\"trans\" not in conf))\n ]\n\n confounds_out = pd.concat(\n (confounds_out, confounds_raw[list(non_motion_confounds)]), axis=1\n )\n\n # Apply PCA on motion confounds\n motion_bool = set(motion_6params) & confounds_of_interest\n if motion_bool:\n confounds_out = _pca_motion(\n confounds_out, confounds_raw, n_components, motion_model,\n )\n\n return confounds_out", "def read_conll(input_conll_file):\n ret = []\n curSent = \"\"\n curPreds = []\n for line in open(input_conll_file):\n line = line.strip()\n if not line:\n if curSent:\n ret.append((curSent.lstrip(), curPreds)) # Remove unneeded starting space\n curSent = \"\"\n curPreds = []\n else:\n # Some spacy characters mess up the conll format -- ignore these lines and carry on\n try:\n ind, word, fact = line.split()[:3]\n curSent += \" \" + word\n if is_annot(fact):\n curPreds.append(int(ind))\n except:\n logging.warn(\"Couldn't parse line: {}\".format(line))\n logging.debug(\"adding SP\")\n # Not really sure what's better to insert here\n # SP messes up the parser's output\n curSent += \" ,\"\n# curSent += \" SP\"\n\n print curSent\n return ret", "def _parse_relevant_lines(cls, conf_file_path):\n # Make a dictionary with the keys of find_words corresponding with\n # empty array as a place holder.\n relevant_lines = dict([(word, []) for word in cls.FIND_WORDS])\n # Now locate the relevant lines in this file and keep the found\n # pattern matches.\n with open(conf_file_path, 'r') as config:\n for line in config:\n # Strip whitespaces\n line = line.strip(\" \\t\")\n # Skip comment lines..\n if line.startswith('#'):\n continue\n for word, pattern in cls.FIND_WORDS.items():\n if \"{} \".format(word) not in line:\n continue\n matches = pattern.findall(line)\n if matches:\n # We only need the first capturing group.\n matches = [match[0].strip(\" \\t\") for match in matches]\n # We will only need the matched strings later on.\n relevant_lines[word] += matches\n return relevant_lines", "def contextualize(self, file_name):\n data = []\n qa_context = []\n with open(file_name, \"r\") as data_set:\n for line in data_set:\n # 将行号从行中分割出来\n line_number, line_res = tuple(line.split(\" \", 1))\n\n if line_number == \"1\":\n # 行号为1表示新的问题情景\n qa_context = []\n if \"\\t\" in line_res:\n # Tab键作为问题、答案、编号的划分\n question, answer, support = tuple(line_res.split(\"\\t\"))\n data.append((tuple(zip(*qa_context)) +\n self.sentence2sequence(question) +\n self.sentence2sequence(answer) +\n ([int(s) for s in support.split()],)))\n else:\n # 句子的一部分\n qa_context.append(self.sentence2sequence(line_res[:-1]))\n return data", "def readFileToCorpus(f):\n if os.path.isfile(f):\n file = open(f, \"r\") # open the input file in read-only mode\n i = 0 # this is just a counter to keep track of the sentence numbers\n corpus = [] # this will become a list of sentences\n print(\"Reading file \", f)\n for line in file:\n i += 1\n sentence = line.split() # split the line into a list of words\n #append this lis as an element to the list of sentences\n corpus.append(sentence)\n if i % 1000 == 0:\n #print a status message: str(i) turns int i into a string\n #so we can concatenate it\n sys.stderr.write(\"Reading sentence \" + str(i) + \"\\n\")\n #endif\n #endfor\n return corpus\n else:\n #ideally we would throw an exception here, but this will suffice\n print(\"Error: corpus file \", f, \" does not exist\")\n sys.exit() # exit the script\n #endif", "def extract_programs(outputf):\t\n programs = []\n with open(outputf,'r') as f:\n\t combo_lines = f.readlines()\n for combo_line in combo_lines:\n combo = combo_line.split(' ',1)[1]\n\t programs.append(combo)\n return programs", "def parsec(formatted_file, pattern_tree):\n pattern_path = []\n result_tree = {}\n result_path = []\n for line in formatted_file:\n search(line, pattern_tree, pattern_path, result_tree, result_path)\n return result_tree", "def readFileToCorpus(f):\n if os.path.isfile(f):\n file = open(f, \"r\") # open the input file in read-only mode\n i = 0 # this is just a counter to keep track of the sentence numbers\n corpus = [] # this will become a list of sentences\n print (\"Reading file \", f)\n for line in file:\n i += 1\n sentence = line.split() # split the line into a list of words\n corpus.append(sentence) # append this list as an element to the list of sentences\n if i % 1000 == 0:\n sys.stderr.write(\"Reading sentence \" + str(i) + \"\\n\") # just a status message: str(i) turns the integer i into a string, so that we can concatenate it\n return corpus\n else:\n print (\"Error: corpus file \", f, \" does not exist\") # We should really be throwing an exception here, but for simplicity's sake, this will suffice.\n sys.exit() # exit the script", "def build_concordance(filename):\n infile = open(filename, \"r\")\n concordance = {}\n word_set =set()\n line_number = 1\n for line in infile:\n word_list = line.split()\n for every_word in word_list:\n every_word = every_word.strip(string.punctuation).lower()\n if every_word != '':\n word_set.add(every_word)\n list_of_words = list(word_set)\n for item in list_of_words:\n if item not in concordance:\n concordance[item] = [line_number]\n else:\n list_of_line = concordance.get(item)\n list_of_line.append(line_number)\n concordance[item] = list_of_line \n line_number = line_number + 1\n word_set=set()\n return concordance", "def parseComponents(self):\n for fname in self.matches:\n self.logger.info(\"Parsing \" +\n fname)\n with open(fname, \"rt\") as f:\n # parsing the file for specific tokens of component\n # start/stop is a simple state machine\n for line in f:\n self.current_state(line)", "def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences", "def getComicsListFromFile(filename):\n h = open(filename)\n contents = \"\\n\".join(h.readlines())\n expr = re.compile(\"([a-z0-9]+)\")\n return expr.findall(contents)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a cached copy of TestShib's metadata with a cacheDuration attribute
def cache_duration_metadata_callback(_request, _uri, headers): return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml')) # lint-amnesty, pylint: disable=no-member
[ "def tdcache():\n return cachetools.cached(cache=tdcache.tensor_description_cache)", "def get_metadata(self):\n return copy.copy(self.metadata)", "def test_cache_datastore_manifests(self, cache_audio: bool):\n # Data setup\n random_seed = 42\n sample_rate = 16000\n num_examples = 10\n num_manifests = 2\n data_duration = 1.0\n\n # Generate random signals\n _rng = np.random.default_rng(seed=random_seed)\n\n # Input and target signals have the same duration\n data_duration_samples = int(data_duration * sample_rate)\n\n with tempfile.TemporaryDirectory() as test_dir:\n test_store_dir = os.path.join(test_dir, 'store')\n os.mkdir(test_store_dir)\n\n # Prepare metadata and audio files\n manifest_filepaths = []\n audio_files = []\n for m in range(num_manifests):\n manifest_dir = os.path.join(test_store_dir, f'manifest_{m}')\n os.mkdir(manifest_dir)\n manifest_filepath = os.path.join(manifest_dir, 'manifest.json')\n\n metadata = []\n data = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples, num_examples))\n for n in range(num_examples):\n audio_filepath = f'manifest_{m}_audio_{n:02d}.wav'\n audio_file = os.path.join(manifest_dir, audio_filepath)\n # Write audio file\n sf.write(audio_file, data[:, n], sample_rate, 'float')\n # Update metadata\n metadata.append(\n {\n 'audio_filepath': audio_filepath,\n 'duration': data_duration,\n 'text': f'text for example {n:02d}',\n }\n )\n # Update audio files\n audio_files.append(audio_file)\n\n # Save manifest\n write_manifest(manifest_filepath, metadata)\n manifest_filepaths.append(manifest_filepath)\n\n # Cache location\n test_cache_dir = os.path.join(test_dir, 'cache')\n\n # Instead of using AIS, copy object from store dir to cache dir\n def fake_get(self):\n # Object path relative to store path\n object_path = os.path.relpath(self.store_path, start=test_store_dir)\n # Copy to fake local path\n self._local_path = os.path.join(test_cache_dir, object_path)\n os.makedirs(os.path.dirname(self.local_path), exist_ok=True)\n shutil.copy(self.store_path, self.local_path)\n # Return path as in the original get\n return self.local_path\n\n with mock.patch(\n 'nemo.collections.asr.data.audio_to_text.is_datastore_path', lambda x: True\n ), mock.patch.object(DataStoreObject, 'get', fake_get):\n # Use a single worker for this test to avoid failure with mock & multiprocessing (#5607)\n cache_datastore_manifests(manifest_filepaths, cache_audio=cache_audio, num_workers=1)\n\n # Manifests need to be compared\n store_files_to_compare = manifest_filepaths\n if cache_audio:\n # Audio needs to be compared\n store_files_to_compare += audio_files\n\n # Compare files\n for f_store in store_files_to_compare:\n f_cache = os.path.join(test_cache_dir, os.path.relpath(f_store, test_store_dir))\n assert filecmp.cmp(f_store, f_cache, shallow=False), f'Files {f_store} and {f_cache} do not match.'", "def generate_statistics():\r\n statistics = cache.get('statistics')\r\n if statistics is None:\r\n statistics = {}\r\n statistics['nr_hashtags'] = ('Number of Hashtags',\r\n get_number_hashtags())\r\n statistics['nr_tokens'] = ('Number of Tokens', get_token_count())\r\n statistics['media_storage_size'] = ('Storage Folder Size (MB)',\r\n str(get_folder_size(\r\n cfg['media_storage'])))\r\n\r\n cache.set('statistics', statistics,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return statistics", "def _read_update_time_from_cache(self):\n raise NotImplementedError(\"Implement in subclasses.\")", "def extract_metadata():\n\n create_output(ARGS.out)\n index = pre.pixz.read_index(ARGS.traffic)\n\n try:\n tmp = tempfile.mkdtemp(prefix=\"ictf2017_cache_\")\n print(\"Using temporary cache for extracted files at {}\".format(tmp))\n\n file_indexes = [i for i in range(len(index))\n if (i >= ARGS.start and i <= ARGS.stop)]\n\n # a wrapper which measures execution times and calculates eta\n eta = pre.timing.ETACalculator(len(file_indexes))\n\n for count, i in enumerate(file_indexes):\n print(\"\\nProcessing index {} from [{}, {}]\"\n .format(i, min(file_indexes), max(file_indexes)))\n\n def extract_read_append_remove():\n pcapfile = pre.pixz.extract_pcap(ARGS.traffic, index[i], tmp)\n metadata = pre.pcap.read(pcapfile)\n append_output(metadata, ARGS.out)\n os.remove(pcapfile)\n\n eta.execute(count, extract_read_append_remove)\n\n finally:\n shutil.rmtree(tmp)\n print(\"Cleaned up temporary cache {}\\n\\n\".format(tmp))", "def _metadata(self):\n metadata = {\n \"name\": self.name,\n \"hash\": self.hash,\n }\n if self.path is not None:\n metadata[\"path\"] = self.path\n return metadata", "def cache_duration(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cache_duration\")", "def test_cache2(self):\n\n retour = self.do_query('meta/cache2')\n\n assert(retour)\n assert(retour.json()['cache_time'] == 48)\n assert(retour.json()['cache_by_user'])", "def cache_info():\n return search_by_id.cache_info()", "def Metadata():\n def _CreateMetadata(unused_none):\n global _metadata\n if not _metadata:\n _metadata = _GCEMetadata()\n _metadata_lock.lock(function=_CreateMetadata, argument=None)\n _metadata_lock.unlock()\n return _metadata", "def cache_dict():\n return {}", "def cacheme( self ) :\n dofunc = lambda : self\n cachemgr = h.fromconfig( 'cachemgr' )\n cachenm = cachemgr.get_cache( cache_namespace )\n cachenm.remove_value( key=self.cache_key )\n return cachenm.get( key=self.cache_key, createfunc=dofunc )", "def get_cache_time(self, path, modified, mime_type):\r\n return self.CACHE_MAX_AGE if \"v\" in self.request.arguments else 0", "def stats():\n\n if __STATS[\"mod\"]:\n diff = timezone.now() - __STATS[\"mod\"]\n if diff.total_seconds() < settings.GLOBAL_STATS_CACHE_DURATION:\n return __STATS[\"data\"]\n\n gen_stats()\n return __STATS[\"data\"]", "def test_jsoncache():\n from tapis_cli.hashcache import jsoncache, lru_cache\n\n @jsoncache.mcache(lru_cache(maxsize=256))\n def timer_function(duration=1):\n sleep(duration)\n return 'function_response'\n\n delay = 2\n start_01 = seconds()\n resp = timer_function(delay)\n start_02 = seconds()\n resp = timer_function(delay)\n end_01 = seconds()\n\n delta_1 = round(start_02 - start_01)\n delta_2 = round(end_01 - start_02)\n\n assert delta_1 == delay\n assert delta_2 < delay", "def GetMetadata(self):\n return self.dict['meta']", "def metadata(self):\n return copy.deepcopy(self._metadata)", "def metadata_processor(self):\n counts = {key: int(value) for key, value in\n self.redis.hgetall(self.metadata_cache_key).iteritems()}\n\n counts['cached'] = len(self.tweet_cache)\n\n metadata = {'counts': counts}\n log.debug(metadata)\n\n if self.is_queuing:\n rqworker.enqueue(self.metadata_processor_fct, metadata)\n else:\n self.metadata_processor_fct(metadata)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enable and configure the TestShib SAML IdP as a third_party_auth provider
def _configure_testshib_provider(self, **kwargs): fetch_metadata = kwargs.pop('fetch_metadata', True) assert_metadata_updates = kwargs.pop('assert_metadata_updates', True) kwargs.setdefault('name', self.PROVIDER_NAME) kwargs.setdefault('enabled', True) kwargs.setdefault('visible', True) kwargs.setdefault("backend_name", "tpa-saml") kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG) kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID) kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL) kwargs.setdefault('icon_class', 'fa-university') kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName kwargs.setdefault('max_session_length', None) kwargs.setdefault('send_to_registration_first', False) kwargs.setdefault('skip_email_verification', False) saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member if fetch_metadata: assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata() if assert_metadata_updates: assert num_total == 1 # lint-amnesty, pylint: disable=no-member assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member assert num_updated == 1 # lint-amnesty, pylint: disable=no-member assert num_failed == 0 # lint-amnesty, pylint: disable=no-member assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member return saml_provider
[ "def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)", "def init_saml_auth(req):\n auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config[\"SAML_PATH\"])\n return auth", "def add_tomcat7_idp():\n pass", "def setup_sa_auth_backend(self):\n defaults = {\n 'form_plugin': None\n }\n # The developer must have defined a 'sa_auth' section, because\n # values such as the User, Group or Permission classes must be\n # explicitly defined.\n config['sa_auth'] = defaults\n config['sa_auth'].update(self.sa_auth)", "def keystone_federation_setup_idp2():\n test_saml_idp_unit = zaza.model.get_units(\"test-saml-idp2\")[0]\n idp_remote_id = LOCAL_IDP_REMOTE_ID.format(\n zaza.model.get_unit_public_address(test_saml_idp_unit))\n\n keystone_federation_setup(\n federated_domain=\"federated_domain_idp2\",\n federated_group=\"federated_users_idp2\",\n idp_name=\"test-saml-idp2\",\n idp_remote_id=idp_remote_id)", "def saml_provider(self, saml_provider):\n\n self._saml_provider = saml_provider", "def setup_provider(self):\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def includeme(config):\n settings = config.get_settings()\n if \"sauropod.credentials.backend\" not in settings:\n default_backend = \"pysauropod.server.credentials.BrowserIDCredentials\"\n settings[\"sauropod.credentials.backend\"] = default_backend\n settings[\"sauropod.credentials.verifier\"] = \"vep:DummyVerifier\"\n plugin.load_and_register(\"sauropod.credentials\", config)", "def keystone_federation_setup_idp1():\n test_saml_idp_unit = zaza.model.get_units(\"test-saml-idp1\")[0]\n idp_remote_id = LOCAL_IDP_REMOTE_ID.format(\n zaza.model.get_unit_public_address(test_saml_idp_unit))\n\n keystone_federation_setup(\n federated_domain=\"federated_domain_idp1\",\n federated_group=\"federated_users_idp1\",\n idp_name=\"test-saml-idp1\",\n idp_remote_id=idp_remote_id)", "def rhsso_setup(lifecycle_hooks, rhsso_service_info):\n\n lifecycle_hooks.append(OIDCClientAuthHook(rhsso_service_info, credentials_location=\"query\"))", "def attach_saml_resources_idp2():\n _attach_saml_resources_local_idp(\n keystone_saml_mellon_app_name=\"keystone-saml-mellon2\",\n test_saml_idp_app_name=\"test-saml-idp2\")", "def enable_sso(DirectoryId=None, UserName=None, Password=None):\n pass", "def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"", "def enable_slep006():\n with config_context(enable_metadata_routing=True):\n yield", "def includeme(config):\n # Grab the pyramid-wide settings, to look for any auth config.\n settings = config.get_settings().copy()\n # Use the settings to construct an AuthenticationPolicy.\n authn_policy = SRPAuthenticationPolicy.from_settings(settings)\n config.set_authentication_policy(authn_policy)\n # Hook up a default AuthorizationPolicy.\n # You can't have one without the other, and ACLAuthorizationPolicy is\n # usually what you want. If the app configures one explicitly then this\n # will get overridden.\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n # Add forbidden view to challenge for auth credentials.\n config.add_view(authn_policy.challenge_view,\n context=\"pyramid.exceptions.Forbidden\")", "def attach_saml_resources_idp1():\n _attach_saml_resources_local_idp(\n keystone_saml_mellon_app_name=\"keystone-saml-mellon1\",\n test_saml_idp_app_name=\"test-saml-idp1\")", "def attach_saml_resources(application=\"keystone-saml-mellon\"):\n test_idp_metadata_xml = \"samltest.xml\"\n idp_metadata_xml_file = os.path.join(\n charm_lifecycle_utils.BUNDLE_DIR, test_idp_metadata_xml)\n\n idp_metadata_name = \"idp-metadata\"\n sp_private_key_name = \"sp-private-key\"\n sp_signing_keyinfo_name = \"sp-signing-keyinfo\"\n\n zaza.model.attach_resource(\n application, idp_metadata_name, idp_metadata_xml_file)\n\n (key, cert) = cert_utils.generate_cert('SP Signing Key')\n\n cert = cert.decode().strip(\"-----BEGIN CERTIFICATE-----\")\n cert = cert.strip(\"-----END CERTIFICATE-----\")\n\n with tempfile.NamedTemporaryFile(mode='w', suffix='.pem') as fp:\n fp.write(key.decode())\n fp.flush()\n zaza.model.attach_resource(application, sp_private_key_name, fp.name)\n\n with tempfile.NamedTemporaryFile(mode='w', suffix='.xml') as fp:\n fp.write(SP_SIGNING_KEY_INFO_XML_TEMPLATE.format(cert))\n fp.flush()\n zaza.model.attach_resource(\n application, sp_signing_keyinfo_name, fp.name)", "def setup_auth_turing(cluster):\n # Read in auth info\n azure_file = os.path.join(ABSOLUTE_HERE, \"secrets\", \"turing-auth-key-prod.json\")\n with open(azure_file, \"r\") as stream:\n azure = json.load(stream)\n\n # Login in to Azure\n login_cmd = [\n \"az\", \"login\", \"--service-principal\",\n \"--username\", azure[\"sp-app-id\"],\n \"--password\", azure[\"sp-app-key\"],\n \"--tenant\", azure[\"tenant-id\"]\n ]\n subprocess.check_output(login_cmd)\n\n # Set kubeconfig\n creds_cmd = [\n \"az\", \"aks\", \"get-credentials\",\n \"--name\", cluster,\n \"--resource-group\", \"binder-prod\"\n\n ]\n stdout = subprocess.check_output(creds_cmd)\n print(stdout.decode('utf-8'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure TestShib before running the login test
def test_login(self): self._configure_testshib_provider() self._test_login()
[ "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def _FakeLogin(self):\n self.testbed.setup_env(\n USER_EMAIL='user@example.com',\n USER_ID='123',\n overwrite=True)", "def setUp(self):\n self.login_handler = LoginHandler()", "def test_login_required():\n pass", "def setUp(self):\n self.config_stub = {\n 'general': {\n 'app_name': 'app_name',\n 'environment': 'test',\n 'log_path': '/var/www/log/itn_test.log'\n },\n 'db': {\n 'host': 'localhost',\n 'database': 'example',\n 'user': 'example-user',\n 'password': 'password',\n 'port': 6379\n }\n }", "def setUp(self):\n self.login(self.create_superuser())", "def setUp(self):\n self.app = create_app(config_name='testing')", "def login(test_app):\n from flask_monitoringdashboard import config\n\n with test_app.session_transaction() as sess:\n sess[config.link + '_logged_in'] = True\n sess[config.link + '_admin'] = True", "def __init__(self, test_config):\n super().__init__(test_config)\n\n self.sshpass_base_cmd = 'sshpass -p {} ssh -T {}@{}'.format(\n VW_PASSWORD, VW_USERNAME, VW_IP_ADDR)\n\n self.vw_auto = VW_AUTO\n self.tcl_pid = None", "def __init__(self):\r\n self.load_config()\r\n self.login()", "def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing", "def setUp(self):\n # Reset the database and start a test client. Disable error catching\n # during request handling so that you get better error reports when\n # performing test requests against the application.\n try:\n insta485db = sh.Command(\"./bin/insta485db\")\n insta485db(\"reset\")\n except sh.ErrorReturnCode as error:\n self.assertTrue(False, (\"Failed to run insta485db, \"\n \"output: \"\n \"{}\").format((error).decode('ascii')))\n insta485.app.config[\"TESTING\"] = True\n self.app = insta485.app.test_client()", "def test_get_login_flow(self):\n pass", "def set_test_items(self):\n app.config[\"TESTING\"] = True\n\n # create customer\n self.customer = Customer.get_internal_customer()\n\n # create test admin\n self.admin = Admin.get_test_admin()\n\n # create test user\n self.test_user = User.get_test_user()\n self.test_user.set_status(Status.active)\n\n # get test cohort\n self.test_cohort = Cohort.get_test_cohort()\n\n # login admin\n self.test_app = app.test_client()\n resp = self.test_app.post(\"/login\", data={\"email\": TEST_ADMIN_EMAIL,\n \"password\": TEST_ADMIN_PASSWORD})\n self.assertRedirect(resp, \"Bad login\")", "def setUp(self):\n self.user = {\n \"user\": {\n \"email\": \"chirchir@olympians.com\",\n \"username\": \"chirchir\",\n \"password\": \"test1234\"\n }\n }", "def test_set_init(config):\n\n global flow_mods_port_map\n global flow_mods_logger\n global flow_mods_config\n\n flow_mods_logger = logging.getLogger(\"flow_mods\")\n flow_mods_logger.info(\"Initializing test set\")\n flow_mods_port_map = config[\"port_map\"]\n flow_mods_config = config", "def setUp(self):\n self.verificationErrors = []\n self.configfile = ElementTree(file='..\\..\\conf\\conf.xml').getroot()\n self.weburl = self.configfile.find(\"weburl/url\").text\n self.seleserver = self.configfile.find('seleniumserver/server').text\n self.seleport = self.configfile.find('seleniumserver/port').text\n self.browsername = self.configfile.find('browser/name').text\n self.selenium = selenium(self.seleserver, self.seleport, self.browsername, self.weburl)\n self.selenium.start()\n self.selenium.window_maximize()\n self.selenium.open(\"/\")\n self.selenium.wait_for_page_to_load(\"5000\") \n self.username = self.configfile.find('userdata/username').text\n self.password = self.configfile.find('userdata/password').text", "def test_config():\n assert not create_app().testing\n assert create_app({'TESTING': True}).testing", "def test_create_browser_login_flow(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure TestShib before running the register test
def test_register(self): self._configure_testshib_provider() self._test_register()
[ "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def setUp(self):\n self.modules = {}", "def test_set_registration_configuration(self):\n pass", "def test_shell_manager_register(self):\n\n @self.shells.register\n class ATestShell(Shell):\n\n \"\"\"A test shell.\"\"\"\n\n def init(self):\n \"\"\"Initialize this test shell.\"\"\"\n self.session.send(\"you just inited me!\")\n\n type(self).shell_class = ATestShell\n assert \"ATestShell\" in self.shells", "def test_set_init(config):\n\n global flow_mods_port_map\n global flow_mods_logger\n global flow_mods_config\n\n flow_mods_logger = logging.getLogger(\"flow_mods\")\n flow_mods_logger.info(\"Initializing test set\")\n flow_mods_port_map = config[\"port_map\"]\n flow_mods_config = config", "def config_setup(self, config):\n super(PushGatewayApiV1TestCase, self).config_setup(config)\n config[\"apps\"][\"com.example.spqr\"] = {\n \"type\": \"tests.test_pushgateway_api_v1.TestPushkin\"\n }", "def test_init(self):\n pass", "def setUp(self):\n self.config_stub = {\n 'general': {\n 'app_name': 'app_name',\n 'environment': 'test',\n 'log_path': '/var/www/log/itn_test.log'\n },\n 'db': {\n 'host': 'localhost',\n 'database': 'example',\n 'user': 'example-user',\n 'password': 'password',\n 'port': 6379\n }\n }", "def before_test_run(self) -> None:", "def configure(self):", "def test_install(self):\n pass", "def prepare_test(self):\n pass", "def setUp(self):\n self.app = create_app(config_name='testing')", "def test_register(self):\n result = {\"retcode\": 0, \"stdout\": \"Successfully registered system\"}\n salt_mock = {\n \"cmd.run_all\": MagicMock(return_value=result),\n }\n with patch.dict(suseconnect.__salt__, salt_mock):\n self.assertEqual(\n suseconnect.register(\"regcode\"), \"Successfully registered system\"\n )\n salt_mock[\"cmd.run_all\"].assert_called_with(\n [\"SUSEConnect\", \"--regcode\", \"regcode\"]\n )", "def custom_setup(self):\r\n pass", "def initialize(cls, *args, **kwargs):\n cls.test_config.initialize(*args,**kwargs)", "def start_test_run(self):", "def configure(self):\n super(BaseSharingTests, self).configure()\n self.patch(config.Sharing, \"Enabled\", True)\n self.patch(config.Sharing.Calendars, \"Enabled\", True)\n self.patch(config.Authentication.Wiki, \"Enabled\", True)", "def setUp(self):\n self.supvisors = DummySupvisors()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table.
def test_login_records_attributes(self): self.test_login() record = UserSocialAuth.objects.get( user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG ) attributes = record.extra_data assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['Member@testshib.org', 'Staff@testshib.org'] assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I'] assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself'] assert attributes.get('urn:oid:2.5.4.20') == ['555-5555'] # Phone number
[ "def test_user_attributes(self):\n user_attributes = (\n \"first_name\",\n \"last_name\",\n \"username\",\n \"email\",\n \"id\",\n \"email\",\n \"links\",\n )\n fetched_user = requests.get(\n user_list_url, headers=prepare_auth_headers()\n ).json()[0]\n for attr in user_attributes:\n assert attr in fetched_user.keys()", "def test_get_user_details(self):\n edx_saml_identity_provider = EdXSAMLIdentityProvider('demo', **mock_conf)\n assert edx_saml_identity_provider.get_user_details(mock_attributes) == expected_user_details", "def test_attribute_authenticated_has_attributes(testapp, login_fixture, fill_the_db):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert len(response.html.find_all(\"img\")) == 2", "def test_InstancesAttributes(self):\n self.assertTrue(hasattr(self.new_user, \"email\"))\n self.assertTrue(hasattr(self.new_user, \"password\"))\n self.assertTrue(hasattr(self.new_user, \"first_name\"))\n self.assertTrue(hasattr(self.new_user, \"last_name\"))", "def test_get_social_auth(self):\n assert get_social_auth(self.user) == self.user.social_auth.get(provider=EdxOrgOAuth2.name)\n UserSocialAuthFactory.create(user=self.user, uid='other name')\n with self.assertRaises(MultipleObjectsReturned):\n get_social_auth(self.user)", "def test_activity_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"activity\"))\n if models.storage_t == 'db':\n self.assertEqual(student.activity, None)\n else:\n self.assertEqual(student.activity, \"\")", "def has_saml(self) -> bool:\n return bool(self.saml_entity_id) and bool(self.saml_acs_url) and bool(self.saml_x509_cert)", "def test_first_name_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"first_name\"))\n if models.storage_t == 'db':\n self.assertEqual(student.first_name, \"Joe\")\n else:\n self.assertEqual(student.first_name, \"Joe\")", "def assert_social_auth_exists_for_user(self, user, strategy):\r\n social_auths = strategy.storage.user.get_social_auth_for_user(\r\n user, provider=self.PROVIDER_CLASS.BACKEND_CLASS.name)\r\n self.assertEqual(1, len(social_auths))\r\n self.assertEqual(self.backend_name, social_auths[0].provider)", "def test_attribute_view_authenticated(testapp, fill_the_db, login_fixture):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert response.status_code == 200", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_claims_supported_set(self):\n expected_claims = ['openid', 'email']\n\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], expected_claims)", "def test_read_user_identity_mapping(self):\n pass", "def test_user_associations(self):\n self.assertEqual(len(self.user.stashes), 1)\n self.assertEqual(self.user.stashes[0].name, \"test stash name\")\n self.assertEqual(len(self.user.songs), 1)\n self.assertEqual(self.user.songs[0].title, \"test title\")", "def test_attributes(self):\n a = self.model()\n for attr in self.attributes:\n self.assertTrue(hasattr(a, attr), 'Has attribute %s' % attr)", "def test_attribute_types(self):\n self.assertIsInstance(self.user_1.email, str)\n self.assertIsInstance(self.user_1.password, str)\n self.assertIsInstance(self.user_1.first_name, str)\n self.assertIsInstance(self.user_1.last_name, str)", "def test_create_user_identity_mapping(self):\n pass", "def test_get_user_attributes(self):\r\n\r\n _user_values = ([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \r\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0, 0, 0, 0, 0, 0, \r\n 0, 0, 0, 0], ['', '', '', '', ''])\r\n\r\n self.assertEqual(self.DUT._get_user_attributes(), _user_values)", "def test_attributeCopied(self):\n self.assertIdentical(\n self.store.findUnique(AMPConfiguration).loginSystem,\n self.store.findUnique(LoginSystem))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test SAML login logs with debug mode enabled or not
def test_debug_mode_login(self, debug_mode_enabled): self._configure_testshib_provider(debug_mode=debug_mode_enabled) with patch.object(saml_log, 'info') as mock_log: self._test_login() if debug_mode_enabled: # We expect that test_login() does two full logins, and each attempt generates two # logs - one for the request and one for the response assert mock_log.call_count == 4 expected_next_url = "/dashboard" (msg, action_type, idp_name, request_data, next_url, xml), _kwargs = mock_log.call_args_list[0] assert msg.startswith('SAML login %s') assert action_type == 'request' assert idp_name == self.PROVIDER_IDP_SLUG self.assertDictContainsSubset( {"idp": idp_name, "auth_entry": "login", "next": expected_next_url}, request_data ) assert next_url == expected_next_url assert '<samlp:AuthnRequest' in xml (msg, action_type, idp_name, response_data, next_url, xml), _kwargs = mock_log.call_args_list[1] assert msg.startswith('SAML login %s') assert action_type == 'response' assert idp_name == self.PROVIDER_IDP_SLUG self.assertDictContainsSubset({"RelayState": idp_name}, response_data) assert 'SAMLResponse' in response_data assert next_url == expected_next_url assert '<saml2p:Response' in xml else: assert not mock_log.called
[ "def test_get_login_flow(self):\n pass", "def test_logging(self):\n self._verify_logging()", "def test_successful_login(self):\n pass", "def test_login_required():\n pass", "def test_login(self):\n self._configure_testshib_provider()\n self._test_login()", "def test_login_invalid_saml():\n ve_test = VeTestApi(\"activation:test_login_invalid_saml\")\n ve_test.begin(login=ve_test.login_types.none)\n\n buttons = [RETRY_ACTION, HOME_ACTION]\n login_error(ve_test, user_name=INVALID_SAML_ACCOUNT_ID, password='123', error_title=ERROR_TITLE, error_msg=E_LOGIN_FAILED_ERROR_MSG, error_code=E_LOGIN_FAILED_ERROR_CODE, error_actions=buttons, focused_action=RETRY_ACTION)\n\n ve_test.end()", "def assertDebugOnly(self): # FIXME: when at python 3.10+ replace with assertNoLogs\n with self.assertLogs(\"qiskit.quantum_info.synthesis\", \"DEBUG\") as ctx:\n yield\n for i in range(len(ctx.records)):\n self.assertLessEqual(\n ctx.records[i].levelno,\n logging.DEBUG,\n msg=f\"Unexpected logging entry: {ctx.output[i]}\",\n )\n self.assertIn(\"Requested fidelity:\", ctx.records[i].getMessage())", "def test_valid_credentials_login(base_actions) -> None: \n page.login(literals.email, literals.pwd)\n assert page.element_visibility(le.header_settings), 'Not logged in'", "def test_professor_can_login_to_web_portal(professor):", "def test_aio_can_login_to_web_portal(aio):", "def test_view_login(self):\n response = self.make_request(\"/user/login/\", follow_redirects=True)\n self.assertEqual(200, response.status_code)\n self.assertIn(\"Login now to view premium content\", self.html)\n self.assertIn(\"Email Address\", self.html)\n self.assertIn(\"Password\", self.html)", "def test_login_session_check(self):\r\n\t\tprint(\"\")\r\n\t\tprint(\"`login_session_check` method tests\")\r\n\t\tprint(\"---------------------\")\r\n\t\tprint(\"Test: `login_session_check: logged in`\")\r\n\t\tpath = 'login'\r\n\t\twith requests_mock.mock() as m:\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":true,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-30\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\",\r\n\t\t\t\t\t\t\t\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == True\r\n\t\t\tassert session_check[1]['FORCE_PWD_CHANGE'] == True\r\n\t\t\tassert session_check[1]['LAST_ACCT'] == 1\r\n\t\t\tassert session_check[1]['NEXT_PWNED'] == None\r\n\t\t\tassert session_check[1]['ROOT'] == True\r\n\t\t\tassert session_check[1]['USER_ID'] == 2\r\n\t\t\tassert session_check[1]['USER'] == 'restuser'\r\n\t\t\tassert session_check[1]['expired_pwd'] == False\r\n\t\t\tprint(\"Passed!!!\")\r\n\t\t\tprint(\"Test: `login_session_check: not logged in`\")\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\t[],\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == False\r\n\t\t\tassert not session_check[1] # dictionary should be empty\r\n\t\tprint(\"Passed!!!\")", "def test_auth_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey'),\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )", "def test_admin_can_login_to_web_portal(admin):", "def test_v1login(self):\n pass", "def debug_login():\n return main_page()", "def debug_check(server_sd: SessionDescriptor) -> None:\n assert (\n server_sd.json_object.get(\"debug\", False) is False\n ), \"Server was running in debug mode\"", "def test_v1logininitiate(self):\n pass", "def test_login(self):\n print(\"Test Login\")\n self.mock_api.return_value = LOGIN_RESPONSE\n self.manager.enabled = False\n assert self.manager.login()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.login, all_kwargs, None,\n self.write_api, self.overwrite)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that when we have a TPA provider which as an explicit maximum session length set, waiting for longer than that between requests results in us being logged out.
def test_login_with_testshib_provider_short_session_length(self): # Configure the provider with a 10-second timeout self._configure_testshib_provider(max_session_length=10) now = datetime.datetime.utcnow() with freeze_time(now): # Test the login flow, adding the user in the process self._test_login() # Wait 30 seconds; longer than the manually-set 10-second timeout later = now + datetime.timedelta(seconds=30) with freeze_time(later): # Test returning as a logged in user; this method verifies that we're logged out first. self._test_return_login(previous_session_timed_out=True)
[ "def test_server_timeouted_session(self):\n\n session = Mock()\n session.timeout = Mock()\n session.is_active = False\n session.inactivity = config.SESSION_TIMEOUT + 1\n\n self.app.sessions.running = Mock(return_value=[session])\n self.worker.start()\n time.sleep(1)\n session.timeout.assert_any_call()\n session._close()", "def test_inactive_session_timeout(self):\r\n email, password = self.STUDENT_INFO[0]\r\n self.login(email, password)\r\n\r\n # make sure we can access courseware immediately\r\n resp = self.client.get(reverse('dashboard'))\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n # then wait a bit and see if we get timed out\r\n time.sleep(2)\r\n\r\n resp = self.client.get(reverse('dashboard'))\r\n\r\n # re-request, and we should get a redirect to login page\r\n self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=' + reverse('dashboard'))", "def test_inactive_session_timeout(self):\r\n self.create_account(self.username, self.email, self.pw)\r\n self.activate_user(self.email)\r\n\r\n self.login(self.email, self.pw)\r\n\r\n # make sure we can access courseware immediately\r\n course_url = '/course/'\r\n resp = self.client.get_html(course_url)\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n # then wait a bit and see if we get timed out\r\n time.sleep(2)\r\n\r\n resp = self.client.get_html(course_url)\r\n\r\n # re-request, and we should get a redirect to login page\r\n self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=/course/')", "def test_get_timeouted_session(self):\n from core.db.models import Session\n session = Session('some_platform')\n session.selenium_session = '1'\n session.add_session_step = Mock()\n session.timeout()\n\n with patch(\n 'flask.current_app.database.get_session',\n Mock(return_value=session)\n ):\n response = get_session_request(self.vmmaster_client, session.id)\n self.assertIn(\n \"SessionException: Session {}(Session timeout. No activity since None) already closed earlier\".format(\n session.id), response.data\n )\n self.assertEqual(session.add_session_step.call_count, 2)\n self.assertTrue(session.closed)", "def test_timeout_invalid_start():\n connection = FakeBaseConnection(session_timeout=10)\n assert not connection._timeout_exceeded(start=0)", "def test_timeout_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time() - 11\n try:\n connection._timeout_exceeded(start)\n except NetmikoTimeoutException as exc:\n assert isinstance(exc, NetmikoTimeoutException)\n return\n\n assert False", "def testMaxLeaseSeconds(self):\n sub_key = Subscription.create_key_name(self.callback, self.topic)\n self.assertTrue(Subscription.get_by_key_name(sub_key) is None)\n\n self.verify_callback_querystring_template = (\n self.callback +\n '?hub.verify_token=the_token'\n '&hub.challenge=this_is_my_fake_challenge_string'\n '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic'\n '&hub.mode=%s'\n '&hub.lease_seconds=864000')\n urlfetch_test_stub.instance.expect(\n 'get', self.verify_callback_querystring_template % 'subscribe', 200,\n self.challenge)\n self.handle('post',\n ('hub.callback', self.callback),\n ('hub.topic', self.topic),\n ('hub.mode', 'subscribe'),\n ('hub.verify', 'sync'),\n ('hub.verify_token', self.verify_token),\n ('hub.lease_seconds', '1000000000000000000'))\n self.assertEquals(204, self.response_code())\n sub = Subscription.get_by_key_name(sub_key)\n self.assertTrue(sub is not None)\n self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state)\n self.verify_record_task(self.topic)", "def test_timeout(self):\n context = Context(SSLv23_METHOD)\n context.set_timeout(1234)\n assert context.get_timeout() == 1234", "def test_login_duration(app, client):\n # Clear data before running\n db_session = sessionmaker(bind=engine)()\n # Errors are not checked here if there is a database error I want to know\n # about it\n # Ignoring a failure to delete the DB may break this test\n db_session.query(Users).delete()\n db_session.query(Sessions).delete()\n load_time = datetime.utcnow()-timedelta(seconds=5)\n user = Users(\n name='testuser',\n password='$argon2id$v=19$m=102400,t=2,p=8$RClylCHXGWztAvf4yOsa+Q$tANnnmfpEy6FejlH7vVMow'\n )\n db_session.add(user)\n db_session.commit()\n\n start = datetime.utcnow()\n res = client.post(\n '/login',\n data='{\"user\":\"testuser\",\"pass\":\"password1234\"}',\n headers={'Content-Type': 'application/json'}\n )\n stop = datetime.utcnow()\n assert res.status_code == 200\n\n # Get all cookies set in the last request\n cookie_setters = [header[1] for header in res.headers if header[0] == 'Set-Cookie']\n # get all session cookies set\n session_ids = [cookie for cookie in cookie_setters if cookie.startswith('session=')]\n assert len(session_ids) == 1\n # get cookie value\n session_id = session_ids[0].replace('session=','').split(';')[0]\n expire = db_session.query(Sessions).get(session_id).session_expire\n\n assert expire-stop <= timedelta(hours=1)\n\n db_session.close()", "def test_keepalive(self):\n response = requests.get(uri + reverse('login-out', args=(sessionID,)), headers=headers)\n assert response.status_code == 200, \"response status code should be 200\"\n assert len(response.json()) == 2, \"response data's items should be 2\"\n assert response.json().get('status') == 0, \"if response succeeded, status is 0\"\n assert response.json().get('sessionTimeout') == 3600, \"sessionTimeout should be 3600\"", "def test_search_end_of_session(self):\n pass", "def test_disable_my_other_sessions(self):\n pass", "async def test_validate_session(api_client: TestClient, coresys: CoreSys):\n with patch(\"aiohttp.web_request.BaseRequest.__getitem__\", return_value=None):\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": \"non-existing\"},\n )\n assert resp.status == 401\n\n with patch(\n \"aiohttp.web_request.BaseRequest.__getitem__\",\n return_value=coresys.homeassistant,\n ):\n resp = await api_client.post(\"/ingress/session\")\n result = await resp.json()\n\n assert \"session\" in result[\"data\"]\n session = result[\"data\"][\"session\"]\n assert session in coresys.ingress.sessions\n\n valid_time = coresys.ingress.sessions[session]\n\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": session},\n )\n assert resp.status == 200\n assert await resp.json() == {\"result\": \"ok\", \"data\": {}}\n\n assert coresys.ingress.sessions[session] > valid_time", "def test_wait_for_page_in_timeout(self):\n start_time = datetime.now()\n with self.assertRaises(SpdbError):\n csdb = CacheStateDB(self.config_data)\n ch = csdb.create_page_in_channel()\n\n csdb.wait_for_page_in([\"MY_TEST_KEY1\", \"MY_TEST_KEY2\"], ch, 1)\n\n assert (datetime.now() - start_time).seconds < 3", "def get_session_timeout():\n return SESSION_TIMEOUT", "def check_correct_usage(no_datastore, cookie_only_threshold):\n def minitest_divider(test):\n logger.debug('\\n\\n' + '-'*50)\n logger.debug(test + ' (nd=%s cot=%s)' % (no_datastore, cookie_only_threshold))\n\n st = SessionTester(no_datastore=no_datastore, cookie_only_threshold=cookie_only_threshold)\n expected_num_sessions_in_db_if_db_used = lambda a,b=0 : generic_expected_num_sessions_in_db_if_db_used(st, no_datastore, cookie_only_threshold, a, b)\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('try doing nothing (no session should be started)')\n st.noop()\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('start a session with a single write')\n st.start_request()\n str(st)\n assert st.get_expiration()==0, \"no session yet => no expiration yet\"\n assert st.is_active() is False\n st['x'] = 7\n assert st.is_active() is True\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider('start another session')\n st2 = SessionTester(st=st)\n st2.start_request()\n assert not st2.is_active()\n assert st2.get('x') is None, \"shouldn't get other session's data\"\n assert not st2.is_active(), \"still shouldn't be active - nothing set yet\"\n st2['x'] = 'st2x'\n assert st2.is_active()\n st2.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider('each session should get a unique sid')\n assert st2.ss.sid != st.ss.sid\n\n minitest_divider('we should still have the values we set earlier')\n st.start_request()\n str(st)\n assert_equal(st['x'], 7)\n st.finish_request_and_check()\n st2.start_request()\n assert_equal(st2['x'], 'st2x')\n st2.finish_request_and_check()\n\n minitest_divider(\"check get session by sid, save(True), and terminate()\")\n if cookie_only_threshold == 0:\n data1 = st.ss.data\n data2 = st2.ss.data\n else:\n # data is being stored in cookie-only form => won't be in the db\n data1 = data2 = {}\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data1)\n resp = st2.get_url('/get_by_sid?sid=%s' % st2.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data2)\n expected_num_sessions_in_db_if_db_used(2)\n st.start_request()\n st['y'] = 9 # make the session dirty\n st.save(True) # force it to persist to the db even though it normally wouldn't\n st.finish_request_and_check()\n\n # now the data should be in the db\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), st.ss.data)\n expected_num_sessions_in_db_if_db_used(2, 1)\n st.start_request()\n st.terminate() # remove it from the db\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider(\"should be able to terminate() and then start a new session all in one request\")\n st.start_request()\n st['y'] = 'yy'\n assert_equal(st.get('y'), 'yy')\n st.terminate()\n assert_raises(KeyError, st.__getitem__, 'y')\n st['x'] = 7\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n st.regenerate_id()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(initial_expir, st._get_expiration(), \"expiration should not change\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test w/new expiration time\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n new_expir = initial_expir + 120 # something new\n st.regenerate_id(expiration_ts=new_expir)\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(new_expir, st._get_expiration(), \"expiration should be what we asked for\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"check basic dictionary operations\")\n st.start_request()\n st['s'] = 'aaa'\n st['i'] = 99\n st['f'] = 4.37\n assert_equal(st.pop('s'), 'aaa')\n assert_equal(st.pop('s'), None)\n assert_equal(st.pop('s', 'nil'), 'nil')\n assert st.has_key('i')\n assert not st.has_key('s')\n assert_equal(st.get('i'), 99)\n assert_equal(st.get('ii'), None)\n assert_equal(st.get('iii', 3), 3)\n assert_equal(st.get('f'), st['f'])\n del st['f']\n assert_raises(KeyError, st.__getitem__, 'f')\n assert 'f' not in st\n assert 'i' in st\n assert_equal(st.get('x'), 7)\n st.clear()\n assert 'i' not in st\n assert 'x' not in st\n st.finish_request_and_check()\n\n minitest_divider(\"add complex data (models and objects) to the session\")\n st.start_request()\n st['model'] = make_entity(0)\n st['dict'] = dict(a='alpha', c='charlie', e='echo')\n st['list'] = ['b', 'd', 'f']\n st['set'] = set([2, 3, 5, 7, 11, 13, 17, 19])\n st['tuple'] = (7, 7, 1985)\n st.finish_request_and_check()\n st.start_request()\n st.clear()\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: basic usage\")\n st.start_request()\n st.set_quick('msg', 'mc only!')\n assert_equal('mc only!', st['msg'])\n st.finish_request_and_check()\n st.start_request()\n assert_equal('mc only!', st.pop_quick('msg'))\n assert_raises(KeyError, st.__getitem__, 'msg')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache (value will be lost if not using cookies)\")\n st.start_request()\n st.set_quick('a', 1)\n st.set_quick('b', 2)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if cookie_only_threshold > 0:\n assert_equal(st['a'], 1)\n assert_equal(st['b'], 2)\n else:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'b')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache should have no impact if another mutator is also used (and this ISNT memcache-only)\")\n st.start_request()\n st['x'] = 24\n st.set_quick('a', 1)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if no_datastore and cookie_only_threshold == 0:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'x')\n else:\n assert_equal(st['a'], 1)\n assert_equal(st['x'], 24)\n st.set_quick('msg', 'hello')\n st['z'] = 99\n st.finish_request_and_check()", "def test_disconnected_session_timeout(self):\n conn = HS2TestSuite()\n conn.setup()\n open_session_req = TCLIService.TOpenSessionReq()\n open_session_resp = conn.hs2_client.OpenSession(open_session_req)\n HS2TestSuite.check_response(open_session_resp)\n conn.session_handle = open_session_resp.sessionHandle\n # Ren a query, which should succeed.\n conn.execute_statement(\"select 1\")\n\n # Set up another connection and run a long-running query with the same session.\n conn2 = HS2TestSuite()\n conn2.setup()\n conn2.session_handle = open_session_resp.sessionHandle\n execute_resp = conn2.execute_statement(\"select sleep(10000)\")\n\n # Close one connection and wait for longer than disconnected_session_timeout. The\n # session should still be available since there's still one active connection.\n conn2.teardown()\n sleep(5)\n conn.execute_statement(\"select 3\")\n\n # Close the other connection and sleep again. THe session shuold now be closed.\n conn.teardown()\n sleep(5)\n conn.setup()\n\n # Run another query, which should fail since the session is closed.\n conn.execute_statement(\"select 2\", expected_error_prefix=\"Invalid session id\",\n expected_status_code=TCLIService.TStatusCode.ERROR_STATUS)\n\n # Check that the query was cancelled correctly.\n query_id = operation_id_to_query_id(execute_resp.operationHandle.operationId)\n status = self.cluster.get_first_impalad().service.get_query_status(query_id)\n assert status == \"Session closed because it has no active connections\"", "def get_session_duration() -> int:\n config = get_application_config()\n timeout: str = config['CLASSIC_SESSION_TIMEOUT']\n return int(timeout)", "def test_max_cookie_length(self):\n storage = self.get_storage()\n response = self.get_response()\n\n for i in range(5):\n storage.add(str(i) * 900)\n unstored_messages = storage.update(response)\n\n cookie_storing = self.stored_messages_count(storage, response)\n self.assertEqual(cookie_storing, 4)\n\n self.assertEqual(len(unstored_messages), 1)\n self.assert_(unstored_messages[0].message == '0' * 900)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mock an error response when calling the OData API for user details.
def _mock_odata_api_for_error(self, odata_api_root_url, username): def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument """ Return a 500 error when someone tries to call the URL. """ headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d' headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number return 500, headers, 'Failure!' fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy()) url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format( root_url=odata_api_root_url, user_id=username, fields=fields, ) httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json') return url
[ "def test_api_user_get(self):\n pass", "def test_api_auth_retrieve_user_details_success(self):\n self.client.credentials(HTTP_AUTHORIZATION=\"Token \" + self.token.key)\n response = self.client.get(self.user_details_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, self.user.name)\n self.assertContains(response, self.user.uuid)", "def test_user_data(self):\n response_payload = {\"login\": \"agrawalo\"}\n fake_session = mock.MagicMock(spec=Session)\n fake_session.get.return_value.json.return_value = response_payload\n ud = get_user_data(\"agrawalo\", fake_session)\n self.assertEqual(ud, 'agrawalo')", "def test_user_api(self):\n\n response = self.get(\n reverse('api-user-list'),\n expected_code=200\n )\n\n # Check the correct number of results was returned\n self.assertEqual(len(response.data), User.objects.count())\n\n for key in ['username', 'pk', 'email']:\n self.assertIn(key, response.data[0])\n\n # Check detail URL\n pk = response.data[0]['pk']\n\n response = self.get(\n reverse('api-user-detail', kwargs={'pk': pk}),\n expected_code=200\n )\n\n self.assertIn('pk', response.data)\n self.assertIn('username', response.data)", "def test_get_user_invalid_token(self):\n\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + self.invalid_token)\n response = self.get_user()\n self.assertEqual(response.status_code, 403)\n msg = \"Invalid token. Token decode failed\"\n self.assertEqual(response.data[\"detail\"], msg)", "def _aget_user_resp(self, response):\n if response.error is not None:\n raise UserNotFoundError(\"Exception happened: %s for request %s\" %\n (response.error, response.request))\n elif response.status != httplib.OK:\n raise UserNotFoundError(\"request: %s status: %s body: %s\" %\n (response.request, response.status,\n response.body))\n\n data = json.loads(response.body) # convert json string to dict\n return data", "async def test_bad_retrieve_user_data(self, m):\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await retrieve_user_data(\"bad_token\")", "def test_get_user_no_token(self):\n\n response = self.get_user()\n self.assertEqual(response.status_code, 403)\n msg = \"Authentication credentials were not provided.\"\n self.assertEqual(response.data[\"detail\"], msg)", "def test_get_user(self):\n\n self.token = self.get_user_token()\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token)\n response = self.get_user()\n self.assertEqual(response.data['email'], \"graceunah@gmail.com\")\n self.assertEqual(response.status_code, 200)", "def test_request_users_user(self):\n response = requests.get(self.url + '/users/John')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())", "def test_user_retrieve(self):\n self.client.force_authenticate(user=self.user)\n self.url = reverse('authentication:user-detail', kwargs={'pk': str(self.user.pk)})\n expected_result = {\n 'pk': str(self.user.pk),\n 'email': self.user.email,\n 'first_name': self.user.first_name,\n 'last_name': self.user.last_name,\n 'date_joined': self.user.date_joined.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n 'is_active': self.user.is_active,\n 'is_staff': self.user.is_staff,\n 'is_superuser': self.user.is_superuser,\n 'passport_photo': self.user.passport_photo,\n }\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, expected_result)", "def test_get_user_bad_id(self):\r\n res = self.backend.get_user(-1)\r\n\r\n self.assertIsNone(res)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def test_get_single_expense_incorrect_id(\n client, create_user, login_user, generate_headers\n):\n # user = create_user\n response = login_user\n\n access = response.data[\"access\"]\n headers = generate_headers(access)\n\n resp = client.get(f\"/api/expense/noId\", headers=headers)\n\n assert resp.status_code == 404", "def test_fetching_missing_profile(self):\n response = self.client.get(\"/api/profile/testuuuu/\", format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n assert \"errors\" in response.data", "def test_fetch_user(self):\n\n self.register_user()\n\n self.assertEqual(self.fetch_user_details().status_code, 200)\n\n self.assertTrue(self.fetch_user_details(\n ).json[\"data\"][0][\"username\"] == 'Bjorn')", "def test_users_list_fail_unauthenticated_user(self):\n self.url = reverse('authentication:users-list')\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data['error'], 'NotAuthenticated')", "def test_anonymous_user(self):\n with self.assertRaises(lookup.LookupError):\n lookup.get_person_for_user(self.anonymous_user)", "def test_backend_failure_without_idp_slug(self):\n user = auth.authenticate(\n request=self.request,\n username=self.user.username,\n is_handshake_successful=True,\n )\n self.assertIsNone(user)\n with self.assertRaises(AttributeError):\n self.request.sso_login_error" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_override_relevant_value(self): value_map = {'country': {'Australia': 'NZ'}} expected_country = 'NZ' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
[ "def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_resource_value_resolver_overrides_and_defaults(mocker):\n # The need to patch here will go away once we start using the resolveResource\n # schema option.\n patch_get_values = mocker.patch.object(uer.ResourceValueResolver, \"_get_values\")\n patch_get_values.return_value = {\n \"default_1\": \"default_data1\",\n \"default_2\": \"default_data2\",\n \"default_3\": \"default_data3\",\n }\n\n spec = ExternalResourceSpec(\n provision_provider=\"other\",\n provisioner={\"name\": \"some_account\"},\n resource={\n \"provider\": \"other\",\n \"identifier\": \"some-id\",\n \"field_1\": \"field_data1\",\n \"field_2\": \"field_data2\",\n \"field_3\": \"field_data3\",\n \"overrides\": json.dumps({\"default_2\": \"override_data2\"}),\n \"defaults\": \"/some/path\",\n },\n namespace={},\n )\n\n resolver = uer.ResourceValueResolver(spec)\n values = resolver.resolve()\n\n assert values == {\n \"field_1\": \"field_data1\",\n \"field_2\": \"field_data2\",\n \"field_3\": \"field_data3\",\n \"default_1\": \"default_data1\",\n \"default_2\": \"override_data2\",\n \"default_3\": \"default_data3\",\n }", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_set_registration_configuration(self):\n pass", "def test_override_implementation(self, name, expected_class):\n with mock.patch(self._load_setting_path, return_value=lambda: name):\n impl = UserNameGen.get_implementation()\n self.assertIsInstance(impl, expected_class)\n self.assertEqual(len(impl._loaded_occupied_name_sets), 0)", "def test_set_registration_instance_configuration(self):\n pass", "def setup_provider(self):\n pass", "def test_multi_registered_provider(self):\n config_provider.register_provider(DummyProvider)\n self.assertRaises(KeyError, config_provider.register_provider,\n Dummy2Provider)", "def test_update_setting(self):\n pass", "def test_default_value(self):\n self.assertEqual(config_utils._get_sanitizer(), 'address')", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_update_entry_application_preference(self):\n pass", "def mock_setting_data() -> SettingsData:\n return SettingsData(\n {\n \"default\": None,\n \"min\": None,\n \"access\": None,\n \"max\": None,\n \"unit\": None,\n \"type\": None,\n \"id\": \"data_id\",\n }\n )", "def mock_interface_settings_match(mock_interface_settings):\n mock_interface_settings.getClass.return_value = libusb.USB_DEVICE_CLASS\n mock_interface_settings.getSubClass.return_value = libusb.USB_DEVICE_SUBCLASS\n mock_interface_settings.getProtocol.return_value = libusb.USB_DEVICE_PROTOCOL\n return mock_interface_settings", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def test_update_reg_ex_config(self):\n pass", "def test_TC_44432_PATCH_Settings_Verify_Correct_Message_Displayed_Entering_Invalid_Values_Configured_Field(self, context):\n\n # Define a test step\n with pytest.allure.step(\"\"\" Verify user is getting correct message on entering invalid values (abcd) in 'configured' field using request PATCH /settings.\"\"\"):\n\n # Test case configuration\n settingsDetails = context.sc.SettingsDetails(\n configured=abcd,\n hostname='localhost',\n port=8080,\n sslEnabled=False,\n sslPort=8443)\n\n # prepare the request, so we can modify it\n request = context.cl.Settings.updateSettings(\n body=settingsDetails\n )\n\n ### Invalid JSON Error injection example\n ### Errors that result in valid JSON can be configured above.\n ### Otherwise, uncomment the code below (request.future....)\n\n # Get the generated payload and corrupt the metric\n # request.future.request.data = request.future.request.data.replace(\n # '\"metric\": 1,', '\"metric\":,'\n # )\n\n # updateSettings the Settings, and check we got the error we expect\n try:\n client, response = check(\n request,\n quiet=True, returnResponse=True\n )\n except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error\n get_error_message(e) | expect.any(\n should.start_with('Unrecognized token'),\n should.start_with('NameError: global name')\n )\n else:\n raise Exception(\n \"Expected error message, got {} status code instead.\".format(\n response.status_code))", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone).
def test_register_sapsf_metadata_present_override_other_value(self): value_map = {'country': {'United States': 'blahfake'}} expected_country = 'AU' provider_settings = { 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/', 'sapsf_private_key': 'fake_private_key_here', 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/', 'odata_company_id': 'NCC1701D', 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB', } if value_map: provider_settings['sapsf_value_mappings'] = value_map self._configure_testshib_provider( identity_provider_type='sap_success_factors', metadata_source=TESTSHIB_METADATA_URL, other_settings=json.dumps(provider_settings) ) self._test_register(country=expected_country)
[ "def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)", "def test_resource_value_resolver_overrides_and_defaults(mocker):\n # The need to patch here will go away once we start using the resolveResource\n # schema option.\n patch_get_values = mocker.patch.object(uer.ResourceValueResolver, \"_get_values\")\n patch_get_values.return_value = {\n \"default_1\": \"default_data1\",\n \"default_2\": \"default_data2\",\n \"default_3\": \"default_data3\",\n }\n\n spec = ExternalResourceSpec(\n provision_provider=\"other\",\n provisioner={\"name\": \"some_account\"},\n resource={\n \"provider\": \"other\",\n \"identifier\": \"some-id\",\n \"field_1\": \"field_data1\",\n \"field_2\": \"field_data2\",\n \"field_3\": \"field_data3\",\n \"overrides\": json.dumps({\"default_2\": \"override_data2\"}),\n \"defaults\": \"/some/path\",\n },\n namespace={},\n )\n\n resolver = uer.ResourceValueResolver(spec)\n values = resolver.resolve()\n\n assert values == {\n \"field_1\": \"field_data1\",\n \"field_2\": \"field_data2\",\n \"field_3\": \"field_data3\",\n \"default_1\": \"default_data1\",\n \"default_2\": \"override_data2\",\n \"default_3\": \"default_data3\",\n }", "def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider", "def test_set_registration_configuration(self):\n pass", "def test_override_implementation(self, name, expected_class):\n with mock.patch(self._load_setting_path, return_value=lambda: name):\n impl = UserNameGen.get_implementation()\n self.assertIsInstance(impl, expected_class)\n self.assertEqual(len(impl._loaded_occupied_name_sets), 0)", "def test_set_registration_instance_configuration(self):\n pass", "def setup_provider(self):\n pass", "def test_multi_registered_provider(self):\n config_provider.register_provider(DummyProvider)\n self.assertRaises(KeyError, config_provider.register_provider,\n Dummy2Provider)", "def test_update_setting(self):\n pass", "def test_default_value(self):\n self.assertEqual(config_utils._get_sanitizer(), 'address')", "def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)", "def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)", "def test_update_entry_application_preference(self):\n pass", "def mock_setting_data() -> SettingsData:\n return SettingsData(\n {\n \"default\": None,\n \"min\": None,\n \"access\": None,\n \"max\": None,\n \"unit\": None,\n \"type\": None,\n \"id\": \"data_id\",\n }\n )", "def mock_interface_settings_match(mock_interface_settings):\n mock_interface_settings.getClass.return_value = libusb.USB_DEVICE_CLASS\n mock_interface_settings.getSubClass.return_value = libusb.USB_DEVICE_SUBCLASS\n mock_interface_settings.getProtocol.return_value = libusb.USB_DEVICE_PROTOCOL\n return mock_interface_settings", "def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])", "def test_update_reg_ex_config(self):\n pass", "def test_TC_44432_PATCH_Settings_Verify_Correct_Message_Displayed_Entering_Invalid_Values_Configured_Field(self, context):\n\n # Define a test step\n with pytest.allure.step(\"\"\" Verify user is getting correct message on entering invalid values (abcd) in 'configured' field using request PATCH /settings.\"\"\"):\n\n # Test case configuration\n settingsDetails = context.sc.SettingsDetails(\n configured=abcd,\n hostname='localhost',\n port=8080,\n sslEnabled=False,\n sslPort=8443)\n\n # prepare the request, so we can modify it\n request = context.cl.Settings.updateSettings(\n body=settingsDetails\n )\n\n ### Invalid JSON Error injection example\n ### Errors that result in valid JSON can be configured above.\n ### Otherwise, uncomment the code below (request.future....)\n\n # Get the generated payload and corrupt the metric\n # request.future.request.data = request.future.request.data.replace(\n # '\"metric\": 1,', '\"metric\":,'\n # )\n\n # updateSettings the Settings, and check we got the error we expect\n try:\n client, response = check(\n request,\n quiet=True, returnResponse=True\n )\n except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error\n get_error_message(e) | expect.any(\n should.start_with('Unrecognized token'),\n should.start_with('NameError: global name')\n )\n else:\n raise Exception(\n \"Expected error message, got {} status code instead.\".format(\n response.status_code))", "def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_chain_by_id
def test_get_chain_by_id(self): pass
[ "def test_fetch_all_by_chain_id(self):\n chain = models.Chain.query.filter(models.Chain.pmatches('2P33/0/A')).first()\n self.assertPaginatedResult('fetch_all_by_chain_id',\n chain.chain_id, chain.biomolecule_id)", "def test_get_chains(self):\n pass", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def test_atom_chain(self):\n self.assertEqual(self.a[11].chain.id, \"A\")\n self.assertEqual(self.a[1100].chain.id, \"A\")", "def test_solareclipses_id_get(self):\n pass", "def test_get_ancestor_by_id(self):\n query_string = [('id', 'id_example'),\n ('level', 2),\n ('page', 56),\n ('pageSize', 56)]\n response = self.client.open(\n '/api/ontologies/terms/ancestors/byId{format}'.format(format='format_example'),\n method='GET',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_anatomicalstructures_id_get(self):\n pass", "def test_chainDeferredRecordsExplicitChain(self):\n a = defer.Deferred()\n b = defer.Deferred()\n b.chainDeferred(a)\n self.assertIs(a._chainedTo, b)", "def test_portfolio_account_id_ledger_get(self):\n pass", "def get_chain_sequence_and_numbering(self, chain_id, *args, **varargs):\n chain = self.structure[0][chain_id]\n return get_chain_sequence_and_numbering(chain, *args, **varargs)", "def test_get_block_by_hid_from_cache(chain_and_hids):\n chain, hids = chain_and_hids\n for i, hid in enumerate(hids):\n block = chain.get_block_by_hid(hid)\n assert block.payload == 'Block {}'.format(i)\n return block", "def test_lists_id_get(self):\n pass", "def get_chain(self, chain_id):\n endpoint = \"%s/%s\" % (STS_CHAINS, chain_id)\n\n response = self.client.get_json(endpoint)\n response.success = response.status_code == 200\n\n return response", "def test_workflows_id_get(self):\n pass", "def test_get_case_by_id(self):\n pass", "def test_liechtensteinsettlements_id_get(self):\n pass", "def test_coupledmodels_id_get(self):\n pass", "def test_books_id_get(self):\n pass", "def __check_okay_to_chain(self):\n if self.__retrieved or self.__id is not None:\n raise InvalidOperation(\"cannot set options after executing query\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_chains
def test_get_chains(self): pass
[ "def test_get_chain_by_id(self):\n pass", "def get_chains (structure):\n chains=[]\n for chain in structure[0]:\n chains.append(chain)\n return chains", "def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(list())", "def test_acyclic_chains():\n names = ['robot', 'box1', 'box2']\n chains = lambda: FactoredRandomGeometricGraph.acyclic_chains(names)\n\n expected_number = 16\n actual_number = sum(1 for _ in chains())\n assert actual_number == expected_number, \\\n \"Expected {} chains; actual value was {}\".format(\n expected_number, actual_number)\n\n assert all(\n FactoredRandomGeometricGraph.is_acyclic(chain)\n for chain in chains())", "def _test_chain(self, x, class_type_list, kwargs_list, y=None):\n chain, modules = self._create_chain(class_type_list, kwargs_list)\n\n chain = chain.fit(x, y=y)\n self.logger.info(\"Preprocessors chain:\\n{:}\".format(chain))\n\n x_chain = chain.forward(x)\n self.logger.info(\"Trasformed X (chain):\\n{:}\".format(x_chain))\n\n # Train the manual chain and transform\n x_manual = x\n for module in modules:\n module.fit(x_manual, y=y)\n x_manual = module.forward(x_manual)\n\n self.logger.info(\"Trasformed X (manual):\\n{:}\".format(x_manual))\n self.assert_allclose(x_chain, x_manual)\n\n return x_chain", "def get_chain(self, request, params):\n\n result_tables = load_chain(params[\"heads\"], params[\"tails\"])\n return self.return_ok(result_tables)", "def selectChains(self): \n \n #\n # TODO: Is this really necessary??!?\n #\n \n #\n # Check if anything left or set to all chains in chemical shift list\n # \n \n if self.atomMeasurements:\n \n if not self.chains and self.exportMode == 'atoms':\n\n #\n # Use all chains - note that output order depends on keys list... can change\n #\n\n resonanceToAtoms = self.atomMeasurements.keys()\n\n self.chains = [resonanceToAtoms[0].chain]\n\n for resonanceToAtom in resonanceToAtoms[1:]:\n\n if resonanceToAtom.chain not in self.chains:\n\n inserted = 0\n\n for i in range(0,len(self.chains)):\n\n if self.chains[i].code > resonanceToAtom.chain.code:\n\n self.chains.insert(i,resonanceToAtom.chain)\n inserted = 1\n break\n\n if not inserted:\n\n self.chains.append(resonanceToAtom.chain)\n\n else:\n \n #\n # If no measurements, set self.chains to [] so rest of function is ignored...\n #\n \n self.chains = []", "def test_get_option_chains(self):\n\n # Build a Query.\n option_chain_dict = {\n 'symbol': 'MSFT',\n 'contractType': 'CALL',\n 'expirationMonth': 'JUN',\n 'optionType': 'SC',\n 'range': 'ITM',\n 'includeQuotes': True\n }\n\n # Query the Options Data.\n options_data = self.service.get_option_chain(\n option_chain_dict=option_chain_dict\n )\n\n self.assertIn('numberOfContracts', list(options_data.keys()))", "def list_chains(self):\n response = self.client.get_json(STS_CHAINS)\n response.success = response.status_code == 200\n\n return response", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)", "def generate_chains(self) -> None:\n\n for size in self.input_sizes:\n print(\"Generating chains of size \" + str(size) + \"...\")\n prefix = \"../text_files/chain\"\n char_chain(\n DEFAULT_CHAIN_SOURCE_1,\n size,\n prefix + \"_size_\" + str(size) + \"_\" + str(1) + \".txt\",\n )\n char_chain(\n DEFAULT_CHAIN_SOURCE_2,\n size,\n prefix + \"_size_\" + str(size) + \"_\" + str(2) + \".txt\",\n )\n temp1 = char_chain(\n DEFAULT_CHAIN_SOURCE_1,\n floor(size / 2),\n prefix + \"_size_\" + str(size) + \"_temp\" + str(1) + \".txt\",\n )\n temp2 = char_chain(\n DEFAULT_CHAIN_SOURCE_2,\n ceil(size / 2),\n prefix + \"_size_\" + str(size) + \"_temp\" + str(2) + \".txt\",\n )\n temp3 = char_chain(\n DEFAULT_CHAIN_SOURCE_3,\n floor(size / 2),\n prefix + \"_size_\" + str(size) + \"_temp\" + str(3) + \".txt\",\n )\n temp1.concatenate(\n temp2, prefix + \"_size_\" + str(size) + \"_\" + str(3) + \".txt\"\n )\n temp2.concatenate(\n temp1, prefix + \"_size_\" + str(size) + \"_\" + str(4) + \".txt\"\n )\n temp2.concatenate(\n temp3, prefix + \"_size_\" + str(size) + \"_\" + str(5) + \".txt\"\n )\n temp3.concatenate(\n temp2, prefix + \"_size_\" + str(size) + \"_\" + str(6) + \".txt\"\n )\n temp1.delete_chain()\n temp2.delete_chain()\n temp3.delete_chain()", "def get_all_chains() -> List[ChainInfo]:\n return list(registry.chain_dict.values())", "def num_chains(self):\n return self._num_chains", "def chain():\n return eth_tester.EthereumTester(eth_tester.PyEVMBackend())", "def get_coref_chains(self):\r\n return self._coref_chains", "def f_chains(self) -> List[Callable[[], Chain]]:\n return [delayed_run_chain() for _ in range(self.n_chains)]", "def test_sort_chain_multiple_structure_random():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(len(data)):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with randomish values\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with randomish values\".format(len(data))", "def iter_all_chains(self):\n for model in self.model_list:\n for chain in model.chain_list:\n yield chain", "def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for post_chain
def test_post_chain(self): pass
[ "def test_post_chain_search(self):\n pass", "def test_post_transaction_pattern(self):\n pass", "def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)", "def test_post_foods(self):\n pass", "def test_post_party(self):\n pass", "def test_get_chains(self):\n pass", "def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})", "def _execute_post_hook(self, context, func_name, pre_hook_data,\n driver_action_results, *args, **kwargs):", "def test_posthardwares(self):\n pass", "def test_event_chain(self):\n def f1():\n self.runner.post_event(f2)\n\n def f2():\n self.runner.post_event(f3)\n\n def f3():\n pass\n\n self.runner.post_event(f1)\n self.assertEqual(self.handled_events, [f1, f2, f3])", "def test_chainDeferredRecordsExplicitChain(self):\n a = defer.Deferred()\n b = defer.Deferred()\n b.chainDeferred(a)\n self.assertIs(a._chainedTo, b)", "def test_tranform_chain() -> None:\n transform_chain = TransformChain(\n input_variables=[\"first_name\", \"last_name\"],\n output_variables=[\"greeting\"],\n transform=dummy_transform,\n )\n input_dict = {\"first_name\": \"Leroy\", \"last_name\": \"Jenkins\"}\n response = transform_chain(input_dict)\n expected_response = {\"greeting\": \"Leroy Jenkins says hello\"}\n assert response == expected_response", "def _test_chain(self, x, class_type_list, kwargs_list, y=None):\n chain, modules = self._create_chain(class_type_list, kwargs_list)\n\n chain = chain.fit(x, y=y)\n self.logger.info(\"Preprocessors chain:\\n{:}\".format(chain))\n\n x_chain = chain.forward(x)\n self.logger.info(\"Trasformed X (chain):\\n{:}\".format(x_chain))\n\n # Train the manual chain and transform\n x_manual = x\n for module in modules:\n module.fit(x_manual, y=y)\n x_manual = module.forward(x_manual)\n\n self.logger.info(\"Trasformed X (manual):\\n{:}\".format(x_manual))\n self.assert_allclose(x_chain, x_manual)\n\n return x_chain", "def _post(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr__post(self, *args, **kwargs)", "def test_handle_en_passant_side_effect(self):\n # TODO", "def test_chainDeferredRecordsImplicitChain(self):\n a = defer.Deferred()\n b = defer.Deferred()\n a.addCallback(lambda ignored: b)\n a.callback(None)\n self.assertIs(a._chainedTo, b)", "def forward_test(self, *args, **kwargs):\n pass", "def step(self):\n self.post.receive(self.w * self.pre.act)", "def test_workflows_post(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for post_chain_search
def test_post_chain_search(self): pass
[ "def test_post_chain(self):\n pass", "def test_search_process(self):\n pass", "def test_search_workflow(self):\n pass", "def test_search_workflow_step(self):\n pass", "def test_post_foods_search(self):\n pass", "def test_search_housekeeping(self):\n pass", "def test_search_transaction(self):\n pass", "def test_search_catering(self):\n pass", "def test_search(self):\n pass", "def test_search_entry_result(self):\n pass", "def test_search_systems_post(self):\n pass", "def test_search_validate_post(self):\n pass", "def test_search_portal_step(self):\n pass", "def test_search_portal_process(self):\n pass", "def test_search_transaction_dispute(self):\n pass", "def test_search_gl_posting(self):\n pass", "def test_search_entry_parcel(self):\n pass", "def test_search_web_transaction(self):\n pass", "def test_get_saved_search(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a networkx graph object from variables and relations.
def as_networkx_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables]) for r in relations: for p in all_pairs([e.name for e in r.dimensions]): graph.add_edge(*p) return graph
[ "def as_networkx_bipartite_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables], bipartite=0)\n graph.add_nodes_from([r.name for r in relations], bipartite=1)\n\n for r in relations:\n for e in r.dimensions:\n graph.add_edge(r.name, e.name)\n return graph", "def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph", "def make_nx_graph(self):\n\n nxG = nx.Graph()\n if self.pg_base is not None:\n path_graph_to_use = self.pg_base\n else:\n path_graph_to_use = self.matrix_bins\n\n for node_id, node in path_graph_to_use.node.iteritems():\n # when saving a networkx object, numpy number types or lists, are not accepted\n nn = node.copy()\n for attr, value in nn.iteritems():\n if isinstance(value, np.int64):\n nn[attr] = int(value)\n elif isinstance(value, np.float64):\n nn[attr] = float(value)\n elif isinstance(value, list):\n nn[attr] = \", \".join([str(x) for x in value])\n\n nxG.add_node(node_id, **nn)\n\n matrix = self.matrix.tocoo()\n max_weight = matrix.data.max() + 1\n\n for u, v, weight in zip(matrix.row, matrix.col, matrix.data):\n if u == v:\n continue\n\n if u in path_graph_to_use.adj[v]:\n # u and v are neighbors\n nxG.add_edge(u, v, weight=float(max_weight))\n else:\n nxG.add_edge(u, v, weight=float(weight))\n\n return nxG", "def return_nx_graph(self, features=False, relations=False, dropna=\"none\"):\n\n import networkx as nx\n\n # create empty DiGraph\n nx_g = nx.DiGraph()\n\n # select features\n if features is False:\n vt = pd.DataFrame(index=self.v.index)\n elif features is True:\n vt = self.v\n elif _is_array_like(features):\n vt = self.v[features]\n else:\n vt = self.v[features].to_frame()\n\n # create nx compatible tuple, (index, weight_dict)\n vt = vt.to_dict(\"index\")\n vt = ((key, value) for key, value in vt.items())\n\n # add nodes\n nx_g.add_nodes_from(vt)\n\n # select relations\n if hasattr(self, \"e\"):\n if relations is False:\n et = pd.DataFrame(index=self.e.index)\n\n elif relations is True:\n et = self.e\n\n elif _is_array_like(relations):\n if dropna != \"none\":\n et = self.e[relations].dropna(how=dropna)\n else:\n et = self.e[relations]\n\n else:\n if dropna != \"none\":\n et = self.e[relations].to_frame().dropna(how=dropna)\n else:\n et = self.e[relations].to_frame()\n\n # create nx compatible tuple, (index, index, weight_dict)\n et = et.to_dict(\"index\")\n et = ((key[0], key[1], value) for key, value in et.items())\n\n # add edges\n nx_g.add_edges_from(et)\n\n return nx_g", "def make_graph(gfile):\n print('making networkx graph for OmicsIntegrator')\n graph = oi.Graph(gfile)\n return graph", "def build_computational_graph():\n pass", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def _get_graph(nodes: Nodes, edges: np.ndarray):\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph", "def to_networkx_graph(graph: MolGraph) -> nx.Graph:\n G = nx.Graph(graph.adj_list)\n node_attrs = {num: {'element': element, 'xyz': xyz} for num, (element, xyz) in enumerate(graph)}\n nx.set_node_attributes(G, node_attrs)\n edge_attrs = {edge: {'length': length} for edge, length in graph.bond_lengths.items()}\n nx.set_edge_attributes(G, edge_attrs)\n return G", "def _build_graph(self):\n\n graph = nx.DiGraph()\n self._graph = graph\n for table in self.metadata.tables.values():\n if not is_active(table):\n continue\n\n self._graph.add_node(table.fullname)\n neighbors = self.find_neighbor_tables(table)\n for neighbor in neighbors:\n self._graph.add_node(neighbor.table.fullname)\n self._graph.add_edge(\n table.fullname,\n neighbor.table.fullname,\n join_fields=neighbor.join_fields,\n )", "def from_networkx(nx_graph, features=None):\n out = NodeGraphTensor()\n for node in nx_graph.nodes:\n feats = []\n if not features:\n feats = [1.0]\n else:\n for key in features:\n feats.append(nx_graph.nodes[node][key])\n feats = torch.Tensor(feats)\n out.add_node(feats)\n for source, target in nx_graph.edges:\n out.add_edge(source, target)\n return out", "def convert_to_networkx(query_graph):\n graph = nx.Graph()\n for node,node_props in query_graph['nodes'].items():\n graph.add_node(node,bound='ids' in node_props)\n for edge,edge_props in query_graph['edges'].items():\n graph.add_edge(edge_props['subject'],edge_props['object'],edge_id=edge)\n return graph", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def build_graph(bags):\n G = nx.DiGraph()\n\n for b, c in bags.items():\n for cost, node in c:\n G.add_edge(b, node, cost=cost)\n\n return G", "def get_graph(edges):\n\n # Without Networkx\n if cm.USE_NETWORKX is not True:\n return 'graph \"\" { ' + \\\n ' '.join(Edge.get_formatted_nodes(edges)) + \\\n ' '.join(Edge.get_formatted_edges(edges)) + \\\n '}'\n\n # With Networkx\n # Counter of pseudos\n all_nodes = Edge.get_nodes(edges)\n # Counter of edges\n all_edges = Counter((edge.pseudo1, edge.pseudo2) for edge in edges)\n\n # Add nodes automatically by adding weighted edges directly\n # Problem : this creates weight attribute but Vis uses value attribute..\n G = nx.Graph()\n G.add_weighted_edges_from([(e[0], e[1], all_edges[e]) for e in all_edges])\n\n # Set edges titles (according to weights)\n # PS: labels are always displayed on graph,\n # whereas titles are displayed on mouse hover.\n # PS2: to be resized, elements must have values value instead of weight.\n [nx.set_edge_attributes(G, 'title',\n {edge : str(weight) + \" message(s)\"})\n for edge, weight in nx.get_edge_attributes(G, 'weight').items()]\n [nx.set_edge_attributes(G, 'value',\n {edge : weight})\n for edge, weight in nx.get_edge_attributes(G, 'weight').items()]\n # Add weights on nodes\n [nx.set_node_attributes(G, 'value',\n {node : all_nodes[node]})\n for node in G.nodes_iter()]\n [nx.set_node_attributes(G, 'title',\n {node : str(all_nodes[node]) + \" message(s)\"})\n for node in G.nodes_iter()]\n\n# print(nx.get_edge_attributes(G, 'weight'))\n# print(nx.get_edge_attributes(G, 'label'))\n# print(nx.get_node_attributes(G, 'weight'))\n\n # Write into file => ULGYYY\n # https://networkx.github.io/documentation/latest/_modules/networkx/drawing/nx_pydot.html\n # Save dot file\n# nx.drawing.nx_pydot.write_dot(G, \"test.dot\")\n return nx.drawing.nx_pydot.to_pydot(G).to_string().replace('\\n', ' ')", "def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph", "def build_graph(self):\n self._create_audio_model()\n self._create_placeholders()\n self._create_embedding()\n self._initialize_embedding()\n self._create_recursive_net()\n self._create_output_layers()\n self._create_optimizer()\n self._create_summary()", "def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a networkx graph object from variables and relations.
def as_networkx_bipartite_graph(variables, relations): graph = nx.Graph() # One node for each variables graph.add_nodes_from([v.name for v in variables], bipartite=0) graph.add_nodes_from([r.name for r in relations], bipartite=1) for r in relations: for e in r.dimensions: graph.add_edge(r.name, e.name) return graph
[ "def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph", "def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph", "def make_nx_graph(self):\n\n nxG = nx.Graph()\n if self.pg_base is not None:\n path_graph_to_use = self.pg_base\n else:\n path_graph_to_use = self.matrix_bins\n\n for node_id, node in path_graph_to_use.node.iteritems():\n # when saving a networkx object, numpy number types or lists, are not accepted\n nn = node.copy()\n for attr, value in nn.iteritems():\n if isinstance(value, np.int64):\n nn[attr] = int(value)\n elif isinstance(value, np.float64):\n nn[attr] = float(value)\n elif isinstance(value, list):\n nn[attr] = \", \".join([str(x) for x in value])\n\n nxG.add_node(node_id, **nn)\n\n matrix = self.matrix.tocoo()\n max_weight = matrix.data.max() + 1\n\n for u, v, weight in zip(matrix.row, matrix.col, matrix.data):\n if u == v:\n continue\n\n if u in path_graph_to_use.adj[v]:\n # u and v are neighbors\n nxG.add_edge(u, v, weight=float(max_weight))\n else:\n nxG.add_edge(u, v, weight=float(weight))\n\n return nxG", "def return_nx_graph(self, features=False, relations=False, dropna=\"none\"):\n\n import networkx as nx\n\n # create empty DiGraph\n nx_g = nx.DiGraph()\n\n # select features\n if features is False:\n vt = pd.DataFrame(index=self.v.index)\n elif features is True:\n vt = self.v\n elif _is_array_like(features):\n vt = self.v[features]\n else:\n vt = self.v[features].to_frame()\n\n # create nx compatible tuple, (index, weight_dict)\n vt = vt.to_dict(\"index\")\n vt = ((key, value) for key, value in vt.items())\n\n # add nodes\n nx_g.add_nodes_from(vt)\n\n # select relations\n if hasattr(self, \"e\"):\n if relations is False:\n et = pd.DataFrame(index=self.e.index)\n\n elif relations is True:\n et = self.e\n\n elif _is_array_like(relations):\n if dropna != \"none\":\n et = self.e[relations].dropna(how=dropna)\n else:\n et = self.e[relations]\n\n else:\n if dropna != \"none\":\n et = self.e[relations].to_frame().dropna(how=dropna)\n else:\n et = self.e[relations].to_frame()\n\n # create nx compatible tuple, (index, index, weight_dict)\n et = et.to_dict(\"index\")\n et = ((key[0], key[1], value) for key, value in et.items())\n\n # add edges\n nx_g.add_edges_from(et)\n\n return nx_g", "def make_graph(gfile):\n print('making networkx graph for OmicsIntegrator')\n graph = oi.Graph(gfile)\n return graph", "def build_computational_graph():\n pass", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def _get_graph(nodes: Nodes, edges: np.ndarray):\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph", "def to_networkx_graph(graph: MolGraph) -> nx.Graph:\n G = nx.Graph(graph.adj_list)\n node_attrs = {num: {'element': element, 'xyz': xyz} for num, (element, xyz) in enumerate(graph)}\n nx.set_node_attributes(G, node_attrs)\n edge_attrs = {edge: {'length': length} for edge, length in graph.bond_lengths.items()}\n nx.set_edge_attributes(G, edge_attrs)\n return G", "def _build_graph(self):\n\n graph = nx.DiGraph()\n self._graph = graph\n for table in self.metadata.tables.values():\n if not is_active(table):\n continue\n\n self._graph.add_node(table.fullname)\n neighbors = self.find_neighbor_tables(table)\n for neighbor in neighbors:\n self._graph.add_node(neighbor.table.fullname)\n self._graph.add_edge(\n table.fullname,\n neighbor.table.fullname,\n join_fields=neighbor.join_fields,\n )", "def from_networkx(nx_graph, features=None):\n out = NodeGraphTensor()\n for node in nx_graph.nodes:\n feats = []\n if not features:\n feats = [1.0]\n else:\n for key in features:\n feats.append(nx_graph.nodes[node][key])\n feats = torch.Tensor(feats)\n out.add_node(feats)\n for source, target in nx_graph.edges:\n out.add_edge(source, target)\n return out", "def convert_to_networkx(query_graph):\n graph = nx.Graph()\n for node,node_props in query_graph['nodes'].items():\n graph.add_node(node,bound='ids' in node_props)\n for edge,edge_props in query_graph['edges'].items():\n graph.add_edge(edge_props['subject'],edge_props['object'],edge_id=edge)\n return graph", "def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new", "def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph", "def build_graph(bags):\n G = nx.DiGraph()\n\n for b, c in bags.items():\n for cost, node in c:\n G.add_edge(b, node, cost=cost)\n\n return G", "def get_graph(edges):\n\n # Without Networkx\n if cm.USE_NETWORKX is not True:\n return 'graph \"\" { ' + \\\n ' '.join(Edge.get_formatted_nodes(edges)) + \\\n ' '.join(Edge.get_formatted_edges(edges)) + \\\n '}'\n\n # With Networkx\n # Counter of pseudos\n all_nodes = Edge.get_nodes(edges)\n # Counter of edges\n all_edges = Counter((edge.pseudo1, edge.pseudo2) for edge in edges)\n\n # Add nodes automatically by adding weighted edges directly\n # Problem : this creates weight attribute but Vis uses value attribute..\n G = nx.Graph()\n G.add_weighted_edges_from([(e[0], e[1], all_edges[e]) for e in all_edges])\n\n # Set edges titles (according to weights)\n # PS: labels are always displayed on graph,\n # whereas titles are displayed on mouse hover.\n # PS2: to be resized, elements must have values value instead of weight.\n [nx.set_edge_attributes(G, 'title',\n {edge : str(weight) + \" message(s)\"})\n for edge, weight in nx.get_edge_attributes(G, 'weight').items()]\n [nx.set_edge_attributes(G, 'value',\n {edge : weight})\n for edge, weight in nx.get_edge_attributes(G, 'weight').items()]\n # Add weights on nodes\n [nx.set_node_attributes(G, 'value',\n {node : all_nodes[node]})\n for node in G.nodes_iter()]\n [nx.set_node_attributes(G, 'title',\n {node : str(all_nodes[node]) + \" message(s)\"})\n for node in G.nodes_iter()]\n\n# print(nx.get_edge_attributes(G, 'weight'))\n# print(nx.get_edge_attributes(G, 'label'))\n# print(nx.get_node_attributes(G, 'weight'))\n\n # Write into file => ULGYYY\n # https://networkx.github.io/documentation/latest/_modules/networkx/drawing/nx_pydot.html\n # Save dot file\n# nx.drawing.nx_pydot.write_dot(G, \"test.dot\")\n return nx.drawing.nx_pydot.to_pydot(G).to_string().replace('\\n', ' ')", "def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph", "def build_graph(self):\n self._create_audio_model()\n self._create_placeholders()\n self._create_embedding()\n self._initialize_embedding()\n self._create_recursive_net()\n self._create_output_layers()\n self._create_optimizer()\n self._create_summary()", "def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the variables and relation as a graph, using networkx and matplotlib.
def display_graph(variables, relations): graph = as_networkx_graph(variables, relations) # Do not crash if matplotlib is not installed try: import matplotlib.pyplot as plt nx.draw_networkx(graph, with_labels=True) # nx.draw_random(graph) # nx.draw_circular(graph) # nx.draw_spectral(graph) plt.show() except ImportError: print("ERROR: cannot display graph, matplotlib is not installed")
[ "def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()", "def plot_graph(self) -> None:", "def Show_Network_and_Outputs(t,Nets,dat,positons=0):\r\n\r\n \r\n max_neur = 0\r\n for i in range(len(Nets)):\r\n if Nets[i].number_of_nodes() >max_neur:\r\n max_neur = Nets[i].number_of_nodes()\r\n \r\n #Generate the colours for each node and activation\r\n #Its probably easier in general to calculate the colours for the largest\r\n #network and then for smaller nets just use the colours needed, as opposed\r\n #to creating a colour vector for each neuron\r\n col = []\r\n for i in range(max_neur):\r\n col.append('C'+str(i%10))\r\n \r\n \r\n #main loop for plotting\r\n #plots network in left column, data in right\r\n plt.figure(1)\r\n for i in range(len(Nets)):\r\n #edgewidth = [ d['weight'] for (u,v,d) in Nets[1].edges(data=True)]\r\n #Generate the numbers for each neuron\r\n labels = {}\r\n for j in range(Nets[i].number_of_nodes()):\r\n labels[j] = str(j)\r\n \r\n #positions for each node, kamada is the shortest path algorithm, it looks the best\r\n if type(positons) is not list or not tuple:\r\n pos = nx.kamada_kawai_layout(Nets[0])\r\n else:\r\n pos = positons[i]\r\n \r\n #plot network in left column\r\n plt.subplot( len(Nets)*100 + 20 + 2*i + 1)\r\n \r\n \r\n nx.draw(Nets[i],pos,node_size = 40, node_color=col)#,width=edgewidth)\r\n nx.draw_networkx_labels(Nets[i], pos, labels, font_size=6)\r\n \r\n \r\n\r\n #plot data in right column\r\n plt.subplot( len(Nets)*100 + 20 + 2*i + 2)\r\n #plt.ylim((-100, 40))\r\n for j in range(np.size(dat[i],0)):\r\n plt.plot(t,dat[i][j,:],col[j])\r\n \r\n plt.show()", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def draw( self, **kwargs ):\n\t\t\n\t\tnetworkx.draw(self.graph, **kwargs)\n\t\tpyplot.show()", "def plot_graph(self):\r\n x = []\r\n y = []\r\n\r\n for n in self.graph.get_all_v().values():\r\n if(n.get_pos() != None):\r\n x.append(n.get_pos().get_x())\r\n y.append(n.get_pos().get_y())\r\n else:\r\n x_random = random.random()\r\n y_random = random.random()\r\n n.set_pos(x_random, y_random, 0)\r\n x.append(x_random)\r\n y.append(y_random)\r\n fig, ax = plt.subplots()\r\n ax.scatter(x, y, 60, \"red\")\r\n for xi in self.graph.get_all_v().values():\r\n for yi in self.graph.all_out_edges_of_node(xi.get_key()):\r\n src = (xi.get_pos().get_x(), xi.get_pos().get_y())\r\n dest = (self.graph.get_node(yi).get_pos().get_x(), self.graph.get_node(yi).get_pos().get_y())\r\n plt.annotate(\"\", dest, src, arrowprops=dict(edgecolor=\"black\", arrowstyle=\"->\"))\r\n\r\n plt.title(\"OOP - Ex3\")\r\n plt.xlabel(\"x axis\")\r\n plt.ylabel(\"y axis\")\r\n plt.show()", "def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()", "def plot_mpl(self, prog=\"dot\"):\n nx.draw(self.graph, self.pretty_plot_coordinates(prog=prog))\n plt.show()", "def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it", "def draw_graph(self):\n plt.figure(figsize=(10, 10), dpi=200) # set the size of figure\n pos = nx.kamada_kawai_layout(self.__DiGraph) # use a layout function to build a dict of positions\n # the positions of nodes are not accurate to the fact and the visualization is just an abstract illustration\n attractions = Navigation.get_all_attractions(self) # retrieve the list of all attractions\n labels = {node:node for node in attractions} # create the dict of labels of all attractions\n nx.draw_networkx(self.__DiGraph, pos=pos, with_labels=True, node_color='b', node_size=50, labels=labels, font_size=14)\n path_edges = [(self.__path[n], self.__path[n+1]) for n in range(len(self.__path)-1)] # build the edges list from the lists of all attractions\n nx.draw_networkx_edges(self.__DiGraph, pos=pos, edgelist=path_edges, edge_color='r', width=8, arrows=False) # specify the style of the shortest path\n plt.xticks([]) # remove ticks\n plt.yticks([])\n return plt", "def plot_environment_graph(self) -> None:\n nx.draw(self.network,\n with_labels=True,\n node_color=[n['data'].value\n for i, n in\n self.network.nodes.items()],\n cmap=plt.cm.Oranges) # type:ignore", "def nx_plot(self, **kwargs):\n nx.draw(self.graph, node_size=500, with_labels=True, node_color=\"white\", **kwargs)", "def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()", "def plot_nodes(nodes):\n x = [node.x for node in nodes]\n y = [node.y for node in nodes]\n plt.plot(x, y, 'k.')\n# plot_nodes_id(nodes)\n plot_nodes_energy(nodes)", "def drawGraph(A):\n m,n = A.shape\n labels = {}\n for i in range(n):\n labels[i]=str(i)\n gr = nx.from_numpy_matrix(A.T,create_using=nx.DiGraph())\n nx.draw(gr,arrows=True,node_color='#15b01a',labels=labels)\n plt.show()", "def draw(deps):\n graph = nx.DiGraph()\n for node in deps:\n graph.add_node(node)\n for node, neighbors in deps.items():\n for neighbor in neighbors:\n graph.add_edge(node, neighbor)\n pos = exploded_layout(graph.nodes(), graph.edges())\n nx.draw_networkx_nodes(\n graph,\n pos,\n node_color=[\n 'r' if deps.get(node) else 'b'\n for node in graph.nodes()\n ],\n alpha=0.8,\n edge_color='w',\n )\n nx.draw_networkx_edges(graph, pos, alpha=0.5)\n nx.draw_networkx_labels(graph, pos)\n plt.show()", "def plot_netlist(\n self, with_labels: bool = True, font_weight: str = \"normal\"\n ) -> nx.Graph:\n netlist = self.get_netlist()\n connections = netlist[\"connections\"]\n placements = netlist[\"placements\"]\n\n G = nx.Graph()\n\n G.add_edges_from(\n [\n (\",\".join(k.split(\",\")[:-1]), \",\".join(v.split(\",\")[:-1]))\n for k, v in connections.items()\n ]\n )\n\n pos = {k: (v[\"x\"], v[\"y\"]) for k, v in placements.items()}\n labels = {k: \",\".join(k.split(\",\")[:1]) for k in placements.keys()}\n nx.draw(\n G,\n with_labels=with_labels,\n font_weight=font_weight,\n labels=labels,\n pos=pos,\n )\n return G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the graph diameter(s). If the graph contains several independent sub graph, returns a list the diamater of each of the subgraphs.
def graph_diameter(variables, relations): diams = [] g = as_networkx_graph(variables, relations) components = (g.subgraph(c).copy() for c in nx.connected_components(g)) for c in components: diams.append(nx.diameter(c)) return diams
[ "def diameter(self):\n \n v = self.vertices() \n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_paths(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_paths.append(smallest)\n\n smallest_paths.sort(key=len)\n\n # longest path is at the end of list, \n # i.e. diameter corresponds to the length of this path\n diameter = len(smallest_paths[-1])\n return diameter", "def graph_diameter():\n # you must replace this value with the graph diameter\n return 8", "def graph_diameter():\r\n # you must replace this value with the graph diameter\r\n return 8", "def topo_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n distance.append(len(pathlist[k]) - 1)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.topodiameter = Temp", "def diameter(graph):\r\n max_distance = 0\r\n for vertex in graph:\r\n new_dist = max_dist(graph, vertex)\r\n if new_dist > max_distance:\r\n max_distance = new_dist\r\n return max_distance", "def spatial_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n Temp2 = 0\n for m in range(len(pathlist[k]) - 1):\n Temp2 += self.Dismatrix[pathlist[k][m], pathlist[k][m+1]]\n distance.append(Temp2)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.diameter = Temp", "def show_diameters(self) -> None:\n for circle in self._circles:\n print(f\"A diameter of {circle} is {circle.diameter()}\")", "def diameter(self):\n return self._diameter", "def diameter(self):\n return self._diameter.get_waarde()", "def diameter(shape: Shape) -> float:\n max_distance = 0\n for x1, y1, z1 in shape.get_vertices():\n for x2, y2, z2 in shape.get_vertices():\n distance = np.sqrt(((x1-x2)**2)+((y1-y2)**2)+((z1-z2)**2))\n if distance > max_distance:\n max_distance = distance\n\n return max_distance", "def diameter(G, e=None, usebounds=False):\n if usebounds is True and e is None and not G.is_directed():\n return extrema_bounding(G, compute=\"diameter\")\n if e is None:\n e = eccentricity(G)\n return max(e.values())", "def num_of_subgraphs(self):\n \n G = self.to_undirected_graph()\n \n count = G.num_subgraph()\n \n print('The number of disconnected components in the graph is ', count)", "def volume(nodes, graph):\n edges = []\n #Get all edges\n for node in nodes:\n for neighbor in graph.neighbors(node):\n edges.append(tuple(sorted([neighbor,node])))\n #Remove duplicate edges\n edges = list(set(edges))\n return len(edges)", "def find_disconnected_subgraphs(self):\n\n disconnected_parts = []\n\n # Find nodes disconnected from the main part of the graph\n unvisited_nodes = set(self.nodes)\n\n # Initialize graph traversal\n if self.index is not None:\n start_node = self.index\n elif self.ltop is not None:\n start_node = self.ltop\n else:\n start_node = next(iter(self.nodes))\n\n explore_set = set(self.get_adjacent_nodes(start_node))\n unvisited_nodes.remove(start_node)\n\n # Iteratively visit a node and update the explore set with neighbouring nodes until explore set is empty\n while explore_set:\n node = explore_set.pop()\n unvisited_nodes.remove(node)\n explore_set.update(set(self.get_adjacent_nodes(node)) & unvisited_nodes)\n\n # If no nodes were unvisited, the graph has no disconnected parts\n if len(unvisited_nodes) == 0:\n return disconnected_parts\n\n # Collect disconnected parts into a list of parts by repeating the above procedure on unvisited nodes\n while unvisited_nodes:\n\n disconnected_part = set()\n\n # Initialize graph traversal\n start_node = next(iter(unvisited_nodes))\n explore_set = set(self.get_adjacent_nodes(start_node))\n unvisited_nodes.remove(start_node)\n disconnected_part.add(start_node)\n\n # Iteratively visit a node and update the explore set with neighbouring nodes until explore set empty\n while explore_set:\n node = explore_set.pop()\n unvisited_nodes.remove(node)\n disconnected_part.add(node)\n explore_set.update(set(self.get_adjacent_nodes(node)) & unvisited_nodes)\n\n disconnected_parts.append(disconnected_part)\n\n return disconnected_parts", "def get_molecule_sizes(geoms):\n molecule_sizes = []\n for geom in geoms:\n #print(geom)\n #print(type(geom))\n distances = geom.get_positions()\n molecule_sizes.append(len(distances))\n return molecule_sizes", "def get_pupil_diameter(dlc):\r\n diameters = []\r\n # Get the x,y coordinates of the four pupil points\r\n top, bottom, left, right = [np.vstack((dlc[f'pupil_{point}_r_x'], dlc[f'pupil_{point}_r_y']))\r\n for point in ['top', 'bottom', 'left', 'right']]\r\n # First compute direct diameters\r\n diameters.append(np.linalg.norm(top - bottom, axis=0))\r\n diameters.append(np.linalg.norm(left - right, axis=0))\r\n\r\n # For non-crossing edges, estimate diameter via circle assumption\r\n for pair in [(top, left), (top, right), (bottom, left), (bottom, right)]:\r\n diameters.append(np.linalg.norm(pair[0] - pair[1], axis=0) * 2 ** 0.5)\r\n\r\n # Ignore all nan runtime warning\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\r\n return np.nanmedian(diameters, axis=0)", "def diameter(points):\n if len(points[0]) == 2:\n return bounds2D(points)[0]\n else:\n return diameter3D(points)", "def diameter(self):\n return self._radius * 2", "def diameter(self):\n return self.radius * 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate all possible pairs from the list of given elements.
def all_pairs(elements): if len(elements) < 2: return [] elif len(elements) == 2: return [(elements[0], elements[1])] else: new_pairs = [] for elt in elements[1:]: new_pairs.append((elements[0], elt)) return all_pairs(elements[1:]) + new_pairs
[ "def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]", "def all_pairs(iterable):\n return itertools.combinations(iterable, 2)", "def calc_rdf_tup(elements: List) -> List:\n if len(elements) != 2:\n raise ValueError(\"Element must be of length 2\")\n return [list(p) for p in itertools.combinations_with_replacement(elements, 2)]", "def pairings(lst):\r\n\t\t\tif len(lst) == 2:\r\n\t\t\t\treturn [ [tuple(lst)] ]\r\n\t\t\tif len(lst) == 0:\r\n\t\t\t\treturn [ [] ]\r\n\t\t\tfirst = lst[0]\r\n\t\t\tres = []\r\n\t\t\tfor i, second in enumerate(lst[1:]):\r\n\t\t\t\tres += [[(first, second)] + pairing for pairing in pairings(lst[1:i+1] + lst[i+2:])]\r\n\t\t\treturn res", "def _gen_pairs(items):\n assert len(items) % 2 == 0\n items = iter(items)\n while True:\n try:\n yield next(items), next(items)\n except StopIteration:\n return", "def pair_iterator(lst):\n temp = iter(lst)\n return zip(temp, temp)", "def list_to_pairs(l):\n return {(l[2*i], l[2*i+1]) for i in range(len(l)/2)}", "def split_in_pairs(arg: Iterable) -> Iterable[Tuple]:\n # We are using zip_longest with one clever hack:\n # https://docs.python.org/3/library/itertools.html#itertools.zip_longest\n # We create an iterator out of the list and then pass the same iterator to\n # the function two times. Thus the function consumes a different element\n # from the iterator each time and produces the desired result.\n iterator = iter(arg)\n return zip_longest(iterator, iterator)", "def get_pairs(terms):\n return itertools.combinations(terms, 2)", "def pairwise(lst):\r\n if not lst: return\r\n\r\n for i in range(len(lst)-1):\r\n yield lst[i], lst[i+1]\r\n yield lst[-1], None", "def get_pairs(inp):\n return list(zip(inp[0::1], inp[1::1])) or None", "def pairs(lst):\n i = iter(lst)\n prev = next(i)\n for item in i:\n yield prev, item\n prev = item", "def pairwise(lst):\n if not lst: \n return\n length = len(lst)\n for i in range(length-1):\n yield lst[i], lst[i+1]\n yield lst[-1], None", "def pair(self, people_list: list, probabilities: list):\n # 以指定的概率获取元素 以一个列表为基准概率,从一个列表中随机获取元素\n people = []\n for ii in range(7):\n people.append(self.random_pick(people_list, probabilities))\n pair_list = []\n for pair_i in combinations(people, 2): # 抽出2个,看看总共有多少种组合\n num_a = pair_i[0]\n num_b = pair_i[1]\n if num_a != num_b:\n # 如下调整,确保每个元组,一定是小的数字在前面,大的数字在后面。方便后面去重\n if num_a > num_b:\n aa = num_b\n num_b = num_a\n ee = (aa, num_b)\n pair_list.append(ee)\n else:\n pair_list.append(pair_i)\n return pair_list", "def pairs(group):\n return map(frozenset, combinations(group, 2))", "def generate_pairs():\n\td1 = [1,2,3,4,5,6]\n\td2 = [1,2,3,4,5,6]\n\tpairs = []\n\tfor i in d1:\n\t\tfor j in d2:\n\t\t\tpairs.append([i,j])\n\treturn pairs", "def get_pairs(frequents):\n return list(combinations(frequents, 2))", "def pairwise(lst):\n if not len(lst): return\n #yield None, lst[0]\n for i in range(len(lst)-1):\n yield lst[i], lst[i+1]\n yield lst[-1], None", "def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
serialize internal keyvalue pair to byte_array, only pickle objects when necessary
def serialize(self): byte_array = bytearray() header = ( self.sequence_number | (1 << 63) if self.type == KeyType.PUT else self.sequence_number ) # append header first byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8)) pickle_key = pickle.dumps(self.key) # key length byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_key))) # key byte array byte_array.extend(pickle_key) # it is a put operation, value is needed if self.type == KeyType.PUT: pickle_value = pickle.dumps(self.value) # value length byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_value))) # value byte array byte_array.extend(pickle_value) return bytes(byte_array)
[ "def serialize(self, value):\r\n return pickle.dumps(value, protocol=self.protocol)", "def serialize(self, value) -> bytes:\n pass", "def _encode_value(self, value):\n return pickle.dumps(value)", "def encode(obj):\n byte_string = pickle.dumps(obj)\n return byte_string", "def serialize_bitarray(ba):\n return base64.encodebytes(ba.tobytes()).decode('utf8')", "def dump_object(self, value):\n return pickle.dumps(value)", "def __bytes__(self: BinarySerializer) -> bytes:\n return self.bytesink", "def pickle_it(data: SerialisableType) -> bytes:\n return pickle.dumps(data)", "def serialize(self, value: VALUE) -> bytes:\n raise NotImplementedError", "def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)", "def serialize(self) -> bytes:\n pass", "def _serialize(self, obj):\n pb = protobuf.serde._bufferize(self.serialize_worker, obj)\n return pb.SerializeToString()", "def get_dict_of_bytes2(self):\n pass", "def to_pickle(self, **kwargs):\n return self._encode(self.dict(), \"pickle\", **kwargs)", "def serialize_state(state: object) -> bytearray:\n return dumps(state)", "def b64pickle(obj):\n return base64.b64encode(pickle.dumps(obj, -1)).decode(\"ascii\")", "def dic_pickle_dumps_and_b64(data):\n for i in data:\n data[i] = base64.b64encode(pickle.dumps(data[i]))\n return data", "def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))", "def readSerializable(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import ASHRAE data from a directory containing the .csv files.
def import_data(ashrae_dir, filenames=const.NAMES): print('Importing data from csv') ashrae_dir = pathlib.Path(ashrae_dir) data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames} return data
[ "def from_csv(self, folder, sep=','):\n os.chdir(folder)\n for f in glob.glob(\"*.csv\"):\n name = f[:-4]\n with open(f) as ps:\n for line in ps:\n args = tuple(line.replace(' ', '').replace('\\n', '').split(sep))\n self.add_predicate(name, args)", "def import_PAIPR(input_dir):\n\n data = pd.DataFrame()\n for file in input_dir.glob(\"*.csv\"):\n data_f = pd.read_csv(file)\n data = data.append(data_f)\n return data", "def import_csv():\n # print(listdir)\n path_to_csv = './accounts/all.csv'\n with open(path_to_csv) as f:\n reader = csv.reader(f)\n for row in reader:\n obj, created = Account.objects.get_or_create(\n alias=row[0],\n mailbox=row[1],\n type=row[2],\n whitelist_ip=row[3],\n inbox=row[4],\n outbox=row[5],\n host_url=row[6],\n server=row[7],\n customer=row[8],\n )", "def open_csv():\n if not os.path.exists(csv_path):\n raise FileNotFoundError('Cannot find AIA data CSV file.')\n return pd.read_csv(csv_path, index_col=0)", "def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test", "def loadCSV(input_file):", "def from_csv(filename):\n df = pd.read_csv(filename, parse_dates=True, index_col=\"datetime\")\n print(f\"Succesfully read {len(df)} rows from file {filename}\")\n return AltimetryData(df)", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def load_data_from_folder(self, home_dir):\r\n dataset_dir = os.path.join(home_dir, \"data\", self.dataset)\r\n csv_dir = os.path.join(dataset_dir, \"data.csv\")\r\n \r\n self.load_data_from_csv(csv_dir)\r\n \r\n self.add_noise()\r\n #self.add_gauss_noise()\r\n #self.add_cat_noise()\r", "def import_data(data_files, msfile, config, config_raw, logger):\n logger.info('Starting import vla data')\n sum_dir = './summary/'\n cf.makedir(sum_dir,logger)\n cf.rmdir(msfile,logger)\n logger.info('Input files: {}'.format(data_files))\n logger.info('Output msfile: {}'.format(msfile))\n command = \"importvla(archivefiles = {0}, vis = '{1}')\".format(data_files, msfile)\n logger.info('Executing command: '+command)\n exec(command)\n cf.check_casalog(config,config_raw,logger,casalog)\n logger.info('Completed import vla data')", "def import_data(path, header_file):\n\n # Read header\n headers = np.loadtxt(header_file, dtype=str)\n\n # Format data\n data_list = []\n\n for file in glob.glob(path + \"*\"):\n data = pd.read_csv(file, sep=\" \", header=None)\n # drop the 32nd column which only contains NaN values\n data.dropna(axis=1, inplace=True)\n # rename the columns\n data.columns = headers\n # only keep the 10 sensors data columns\n data = data[v3_sensors]\n print(data.info())\n data_list.append(data)\n \n return data_list", "def import_from_csv(self, file_name):\n with open(file_name) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n next(reader, None)\n for row in reader:\n self.add_entry(row[0], row[1], row[2])", "def from_csv(self):\n timestamp_logname = \"from_csv_\" + datetime.today().strftime('%Y_%m_%d_%H_%M_%S')\n csv_files = [f for f in self.args.files if f.endswith('.csv')]\n if not csv_files:\n self.logger.error(\"No CSV files found.\")\n return False\n\n # Create an instance of the Ingestor class with common options set.\n ingestor = Ingestor(**self.options)\n\n # Ingest from each CSV file.\n for csv_file in csv_files:\n data_groups = Ingestor.process_csv(csv_file)\n for mask, routes, deployment_number in data_groups:\n ingestor.load_queue(mask, routes, deployment_number)\n ingestor.ingest_from_queue()\n\n # Write out any failed ingestions from the entire batch to a new CSV file.\n if ingestor.failed_ingestions:\n ingestor.write_failures_to_csv(timestamp_logname)\n\n self.logger.info('')\n self.logger.info(\"Ingestion completed.\")\n return True", "def load_csv(apps, path):\n Area = apps.get_model('model_api', 'Area')\n Covid19PredictionDataPoint = apps.get_model(\n 'model_api', 'Covid19PredictionDataPoint')\n\n with open(path, 'r') as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n data = []\n\n for row in reader:\n area = None\n\n # Determine the country / state.\n if 'global' in path:\n country = row[1]\n state = ''\n elif 'us' in path:\n country = 'US'\n state = row[1]\n else:\n msg = \"\"\"Could not determine country/state from:\n row = %s\n path = %s\"\"\" % (str(row), path)\n raise RuntimeError(msg)\n\n # Try to find the corresponding area.\n try:\n area = Area.objects.get(country=country, state=state)\n except Area.DoesNotExist:\n msg = \"Could not find the area for country '{0}'\".format(\n country)\n if state:\n msg += \" and state '{0}'\".format(state)\n area = Area(state=state, country=country)\n area.save()\n msg += ' in model_api_area. New area created.'\n print(msg)\n\n except Area.MultipleObjectsReturned:\n msg = \"Found multiple areas for country '{0}'\".format(\n country)\n if state:\n msg += \" and state '{0}'\".format(state)\n msg += ' in model_api_area. Skip this area.'\n print(msg)\n continue\n\n # Load the predictions.\n for i in range(2, len(header)):\n date = header[i]\n\n # Skip invalid values.\n raw_val = row[i]\n if raw_val in ['NaN', '-Inf', 'Inf']:\n continue\n\n # Skip negative values.\n val = int(float(raw_val))\n if val < 0:\n continue\n\n data.append(Covid19PredictionDataPoint(\n area=area,\n date=date,\n val=val,\n ))\n\n return data", "def import_csv(\n self,\n file_path: str,\n overwrite_samples: bool = True,\n clip_to_space: bool = True,\n infer_space: bool = True,\n ) -> None:\n samples_df = pd.read_csv(file_path, index_col=0)\n self.from_dataframe(\n samples_df=samples_df,\n overwrite_samples=overwrite_samples,\n clip_to_space=clip_to_space,\n infer_space=infer_space,\n )\n pass", "def test_import_data(self):\n self.assertEqual(import_data(src_dir, 'product_file.csv', 'customer_file.csv', 'rental_file.csv'), ((9999, 9999, 9999), (1, 0, 1)))", "def import_course_csv(csv_path):\n with open(csv_path, 'r', newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile)\n return list(reader)\n # for row in reader:\n # print(row)\n # c_code = row[0]\n # c_lo = row[1]\n # print(c_code, c_lo)", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def import_csv(self, file_path):\n file_extension = os.path.splitext(file_path)[1]\n if not os.path.isfile(file_path) or file_extension != \".csv\": #Do nothing if file doesn't exist or is not csv\n return\n\n temp_df = pd.read_csv(file_path)\n col_list = temp_df.columns.tolist()\n\n if col_list != self.Music_cols: #do nothing if columns don't match\n return\n else:\n self.Music = temp_df\n\n has_error = False\n\n #get all tags\n song_paths = self.Music['path'].tolist()\n for music_path in song_paths:\n file_extension = os.path.splitext(music_path)[1]\n if os.path.isfile(music_path) and file_extension in self.supported_format:\n tag = TinyTag.get(music_path)\n self.tags[music_path] = tag\n else: #file doesn't exist or not supported format\n has_error = True\n self.Music = self.Music[self.Music['path'] != music_path]\n\n self.clear_all_youtube_links()\n\n if has_error:\n print(\"Warning: Some music files found in .csv are missing/modified\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import ASHRAE data with optional caching mechanism.
def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES): cache_file = pathlib.Path(cache_file) if cache_file is not None and cache_file.exists(): data = import_dict_from_cached(cache_file, filenames) else: data = import_data(ashrae_dir) _cache_data(data, cache_file) # Sanity check: the set of building ids should be the same in the train and test sets. assert set(data['train'].building_id) == set(data['test'].building_id) return data
[ "def load_aws_data(self):\n pass", "def load_azure_data(self):\n pass", "def import_(self, data):\n return self.__import(data)", "def load_data(self) -> None:", "def load_data(data_set_key: str, a2e_data_path: str = '../../../a2e-data/data', cache_dir: str = None) -> BearingDataSet:\n\n if a2e_data_path is not None and not a2e_data_path.startswith('http') and not a2e_data_path.startswith('file://'):\n if os.path.isabs(a2e_data_path):\n a2e_data_path = 'file://' + os.path.abspath(a2e_data_path)\n else:\n bearing_module_path = pathlib.Path(__file__).parent.absolute()\n absolute_data_path = os.path.abspath(os.path.join(bearing_module_path, a2e_data_path))\n if os.name == 'nt':\n absolute_data_path = f'/{absolute_data_path}'.replace('\\\\', '/')\n\n a2e_data_path = 'file://' + absolute_data_path\n\n if not os.path.isdir(a2e_data_path.replace('file://', '')):\n a2e_data_path = 'https://github.com/maechler/a2e-data/raw/master/data/'\n\n if cache_dir is None:\n cache_dir = os.path.join(Path.home(), '.a2e')\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n a2e_data_path = a2e_data_path.rstrip('/') + '/'\n data_set_description_origin = f'{a2e_data_path}{data_set_key}.yaml'\n data_set_origin = f'{a2e_data_path}{data_set_key}.csv.gz'\n data_set_description_path = get_file(data_set_key + '.yaml', origin=data_set_description_origin, cache_dir=cache_dir, cache_subdir='datasets/bearing')\n windows = {}\n\n with open(data_set_description_path) as data_set_description_file:\n data_set_description = yaml.load(data_set_description_file, Loader=yaml.FullLoader)\n data_set_path = get_file(data_set_key + '.csv.gz', origin=data_set_origin, cache_dir=cache_dir, cache_subdir='datasets/bearing', file_hash=data_set_description['data']['md5_hash'], hash_algorithm='md5')\n\n with gzip.open(data_set_path, mode='rt') as data_set_file:\n data_frame = pd.read_csv(data_set_file, parse_dates=[data_set_description['data']['index_column']], date_parser=lambda x: timestamp_to_date_time(float(x)), quotechar='\"', sep=',')\n data_frame = data_frame.set_index(data_set_description['data']['index_column'])\n\n for window_key, window_description in data_set_description['windows'].items():\n windows[window_key] = {\n 'mask': (data_frame.index > window_description['start']) & (data_frame.index <= window_description['end']),\n 'label': window_description['label'],\n }\n\n return BearingDataSet(data_set_key, data_frame, windows)", "def import_acref_data(self):\n acref_col_dict = {\n 'CODE' : 'CODE',\n 'NO-SEATS' : 'NO_SEATS',\n 'MFR' : 'MFR',\n 'MODEL' : 'MODEL',\n 'AC-WEIGHT' : 'AC_WEIGHT',\n 'SPEED' : 'SPEED',\n 'TYPE-ACFT' : 'TYPE_AC'\n }\n acref_import_cols = ['CODE', 'NO-SEATS', 'MFR', 'MODEL', 'AC-WEIGHT','SPEED','TYPE-ACFT']\n \n acref_load_file_path = os.path.join(self.raw_path,'AcftRef.txt')\n \n self.csv_loader(acref_load_file_path,acref_import_cols,acref_col_dict,'ac_ref')", "def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return", "def import_data(ashrae_dir, filenames=const.NAMES):\n print('Importing data from csv')\n ashrae_dir = pathlib.Path(ashrae_dir)\n data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames}\n\n return data", "def load_data(self):", "def importAbcAsset ():\n\n help(importAbcAsset)\n\n import hou\n import os\n \n \n #set path\n hipPath = hou.expandString('$HIP')\n path = hipPath + \"/abc/\"\n print (path)\n \n listPath = os.listdir(path)\n \n obj = hou.node(\"/obj\")\n alembicImport= obj.createNode (\"geo\",\"alembicImport\")\n \n file1 = hou.node(\"/obj/alembicImport/file1\")\n file1.destroy()\n \n for n in listPath:\n print (n)\n currentFile=alembicImport.createNode(\"alembic\",n)\n #set fileName\n currentFile.setParms({\"fileName\":\"$\"+\"HIP/abc/\"+n})\n\n #reload geo callback\n #prepa param\n parm_group = alembicImport.parmTemplateGroup()\n parm_folder = hou.FolderParmTemplate(\"folder\",\"reload\")\n #button run code\n button=hou.ButtonParmTemplate(\"reload\",\"Reload\")\n button.setTags({\"script_callback_language\":\"python\",\"script_callback\":\"import y \\ny.reloadAlembic()\"})\n parm_folder.addParmTemplate(button)\n #append param\n parm_group.append(parm_folder)\n alembicImport.setParmTemplateGroup(parm_group)", "def import_archive(self):\n if self.archive:\n archive = IrkruTildaArchive(self.archive, material=self)\n archive.process()", "def load_openshift_data(self):\n pass", "def make_test_data(self):\n import data", "def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()", "def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data", "def import_all_data(self, aliases):\n for alias in aliases:\n self.ho.add_edf_file(os.path.join(self.base_directory, 'raw', alias + '.edf'))\n self.ho.edf_message_data_to_hdf(alias=alias)\n self.ho.edf_gaze_data_to_hdf(alias=alias)", "def create_import_data(properties):\n # if using ue2rigify un-hide the source rig\n if properties.use_ue2rigify:\n set_source_rig_hide_value(False)\n\n # get the mesh and rig objects from their collections\n mesh_objects = get_from_collection(properties.mesh_collection_name, 'MESH', properties)\n rig_objects = get_from_collection(properties.rig_collection_name, 'ARMATURE', properties)\n\n # if the combine meshes option is on, get only meshes with unique armature parents\n mesh_objects = utilities.get_unique_parent_mesh_objects(rig_objects, mesh_objects, properties)\n\n # get the asset data for all the mesh objects\n mesh_data = create_mesh_data(mesh_objects, rig_objects, properties)\n\n # get the asset data for all the actions on the rig objects\n action_data = create_action_data(rig_objects, properties)\n\n # if using ue2rigify re-hide the source rig\n if properties.use_ue2rigify:\n set_source_rig_hide_value(True)\n\n return mesh_data + action_data", "def _load_data(\n data_dir: Path,\n script_dir: Path,\n host: str = CLICKHOUSE_HOST,\n port: int = CLICKHOUSE_PORT,\n user: str = CLICKHOUSE_USER,\n password: str = CLICKHOUSE_PASS,\n database: str = IBIS_TEST_CLICKHOUSE_DB,\n **_,\n ) -> None:\n clickhouse_driver = pytest.importorskip(\"clickhouse_driver\")\n\n client = clickhouse_driver.Client(\n host=host, port=port, user=user, password=password\n )\n\n client.execute(f\"DROP DATABASE IF EXISTS {database}\")\n client.execute(f\"CREATE DATABASE {database} ENGINE = Atomic\")\n\n client.execute(\"DROP DATABASE IF EXISTS tmptables\")\n client.execute(\"CREATE DATABASE tmptables ENGINE = Atomic\")\n\n client.execute(f\"USE {database}\")\n client.execute(\"SET allow_experimental_object_type = 1\")\n client.execute(\"SET output_format_json_named_tuples_as_objects = 1\")\n\n with open(script_dir / 'schema' / 'clickhouse.sql') as schema:\n for stmt in filter(None, map(str.strip, schema.read().split(\";\"))):\n client.execute(stmt)", "def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of timestamps missing
def count_missing_timestamps(df): no_of_timestamps = len(df.timestamp) no_of_sites = len(set(df.site_id)) full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') no_of_missing_timestamps = no_of_sites * len(full_date_range) - no_of_timestamps print(f'There are {no_of_timestamps} timestamps in the data. The full date range is {len(full_date_range)} long and' f' there are {no_of_sites} sites so there should be {no_of_sites * len(full_date_range)} ' f'timestamps in the data. There are therefore {no_of_missing_timestamps} missing. ') return no_of_missing_timestamps
[ "def get_num_of_timestamps(self):\n return len(self)", "def missing_row_cnt(df: EDAFrame) -> Any:\n nulls = df.nulls\n rst = nulls.sum(1)\n rst = rst[rst > 0]\n\n return (rst > 0).sum()", "def check_no_missing_timesteps(timesteps, verbose=True):\n timesteps = _check_timesteps(timesteps)\n # Check if there are data\n if timesteps.size == 0:\n raise ValueError(\"No data available !\")\n # Check if missing timesteps\n dt = np.diff(timesteps)\n dts, counts = np.unique(dt, return_counts=True)\n if verbose:\n print(\" --> Starting at\", timesteps[0])\n print(\" --> Ending at\", timesteps[-1])\n if len(counts) > 1:\n print(\"Missing data between:\")\n bad_dts = dts[counts != counts.max()]\n for bad_dt in bad_dts:\n bad_idxs = np.where(dt == bad_dt)[0]\n bad_idxs = [b.tolist() for b in bad_idxs]\n for bad_idx in bad_idxs:\n tt_missings = timesteps[bad_idx : (bad_idx + 2)]\n print(\"-\", tt_missings[0], \"and\", tt_missings[1])\n raise ValueError(\"The process has been interrupted\")\n return", "def calculateMissing(odf):\n df = odf.copy()\n # Calculate last minute of operation for each day in `df`\n df.loc[:, 'time'] = np.nan\n df.loc[:, 'time'] = df.index.astype(np.int64)//10**9 # (to unix timestamp) from nano seconds 10*9 to seconds\n days = df.groupby(df.index.date)['time'].agg(['min', 'max', 'count']) # aggreagate on groupby\n # total number of minutes on the day\n totalminday = (days['max']-days['min'])//60\n # minutes with data by day\n countminday = days['count'] # -1 due count is +1\n missminday = totalminday-countminday\n percmissminday = missminday/totalminday\n\n # print('not working on daemon just on jupyter notebook!!!')\n return np.mean(percmissminday) # average of missing minutes", "def test_count_when_data_is_not_present(self):\n\n temp_data = []\n\n tt = TemperatureTracker()\n result = tt.count_from(temp_data)\n self.assertEqual(result, 0)", "def timestamp_length(self) -> int:\n timestamps = self.timestamps_sorted_list()\n base_length = computation.num_digits(timestamps[0]) if len(timestamps) > 0 else -1\n indexes = [1, 2, 3, 4, 5, -1, -2, -3, -4] if len(timestamps) > 10 else list(range(1, len(timestamps)))\n for n in indexes:\n length = computation.num_digits(timestamps[n])\n if length != base_length:\n return -1\n return base_length", "def get_number_of_items_with_missing_information(list_of_records):\n counter = 0\n #count the number of items that have 0 value\n for record in list_of_records:\n if record == 0:\n counter += 1\n \n return counter", "def count_missing_stats(manifest):\n num_missing = 0\n for element in manifest:\n if element.missing_stats():\n num_missing += 1\n return num_missing", "def _count_nulls(series):\n return series.isnull().sum()", "def missing_number(arr:list):\n n = len(arr) + 1\n sum_of_n_consecutive_numbers = int(n * (n + 1)/2)\n return sum_of_n_consecutive_numbers - sum(arr)", "def getUnseenCount():", "def missing_col_cnt(df: EDAFrame) -> Any:\n nulls = df.nulls\n rst = nulls.sum(0)\n rst = rst[rst > 0]\n\n return (rst > 0).sum()", "def _find_non_increasing_timestamps(timestamps: List[Fraction]) -> List[bool]:\n if len(timestamps) == 0:\n return []\n is_non_increasing = np.zeros(\n shape=len(timestamps),\n dtype=bool,\n )\n max_timestamp = timestamps[0] - 1\n for i, timestamp in enumerate(timestamps):\n if timestamp > max_timestamp:\n max_timestamp = timestamp\n else:\n is_non_increasing[i] = True\n\n return list(is_non_increasing)", "def count_placeholders(series):\n count = 0\n\n for i in range(series.size-1, -1, -1):\n if pd.isnull(series[i]) or series[i] == 0:\n count += 1\n else:\n break\n\n return count", "def get_missing_test_numbers(a_dict, logged_test_numbers):\n tnum_list = []\n for tname, t_dict in a_dict.iteritems():\n for tpin, r_dict in t_dict.iteritems():\n tnum_list.append(int(r_dict[\"Test number\"]))\n missing = list(set(tnum_list) - set(logged_test_numbers))\n return missing, tnum_list", "def test_timestamp_not_found(self, l):\n extract_columns(data=self.data, columns=['a'], timestamps=['timestamp'])\n l.check(\n ('pynts.util', 'WARNING', \"Couldn't find timestamps '['timestamp']' in data, using 'ts' instead\"),\n )", "def get_num_nonnull_values(column_profile_summary: ColumnProfileSummary) -> int:\n return column_profile_summary[COUNTS_N] - column_profile_summary[COUNTS_NULL]", "def valid_points(tx: List) -> int:\n return sum(v != MISSING_VALUE for v in tx)", "def _count_null(s: \"pandas.Series\") -> int:\n return s.isna().sum()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add missing timestamps to weather data and interpolate to fill in the data return df with missing times and weather data filled in
def add_missing_weather_data(df): full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H') sites = list(set(df.site_id)) full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range), columns=['site_id', 'timestamp']) df_all_dates = full_data_site_range.merge(df, on=['site_id', 'timestamp'], how='left') df_all_dates = df_all_dates.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both')) return df_all_dates
[ "def fill_missing_time(row):\n\n row.index = pd.to_datetime(row.index)\n return row.interpolate(method=\"time\").fillna(method=\"backfill\")", "def auto_fillna(ts: TimeSeries,\n **interpolate_kwargs) -> TimeSeries:\n\n ts_temp = ts.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if 'limit_direction' not in interpolate_kwargs:\n interpolate_kwargs['limit_direction'] = 'both'\n interpolate_kwargs['inplace'] = True\n ts_temp.interpolate(**interpolate_kwargs)\n\n return TimeSeries.from_times_and_values(ts.time_index(), ts_temp.values)", "def _auto_fill(series: TimeSeries, **interpolate_kwargs) -> TimeSeries:\n\n series_temp = series.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if 'limit_direction' not in interpolate_kwargs:\n interpolate_kwargs['limit_direction'] = 'both'\n interpolate_kwargs['inplace'] = True\n series_temp.interpolate(**interpolate_kwargs)\n return TimeSeries.from_times_and_values(series.time_index(), series_temp, series.freq())", "def fill_missing_data_points(data):\n return data.interpolate()", "def interpolate(df):\n for x in df.columns:\n if x == \"date\":\n continue\n df[x] = df[x].interpolate(method='linear', axis=0).ffill().bfill()\n return df", "def interpolate(data_frame, time):\n\n total_index = np.sort(np.hstack((data_frame.index.values,\n time)))\n reindexed_data_frame = data_frame.reindex(total_index)\n interpolated_data_frame = \\\n reindexed_data_frame.apply(pandas.Series.interpolate,\n method='values').loc[time]\n\n # If the first or last value of a series is NA then the interpolate\n # function leaves it as an NA value, so use backfill to take care of\n # those.\n interpolated_data_frame = \\\n interpolated_data_frame.fillna(method='backfill')\n # Because the time vector may have matching indices as the original\n # index (i.e. always the zero indice), drop any duplicates so the\n # len() stays consistent\n return interpolated_data_frame.drop_duplicates()", "def _auto_fill(series: TimeSeries, **interpolate_kwargs) -> TimeSeries:\n\n series_temp = series.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if \"limit_direction\" not in interpolate_kwargs:\n interpolate_kwargs[\"limit_direction\"] = \"both\"\n interpolate_kwargs[\"inplace\"] = True\n series_temp.interpolate(**interpolate_kwargs)\n return TimeSeries.from_dataframe(\n series_temp,\n freq=series.freq,\n static_covariates=series.static_covariates,\n hierarchy=series.hierarchy,\n )", "def interpolate(df,timestampField,fieldstoInterpolate):\n #get datetime from timestamp string\n df['datetime']=df[timestampField].apply(lambda x: datetime.strptime(x,'%Y-%m-%d %H:%M:%S'))\n #sort by timestamp\n df=df.sort_values(by=timestampField,ascending=True).reset_index()\n #get the index for change in hour by iterating till the second last eleement of sorted timestamp\n for i in range(len(df)-1):\n thisTime, nextTime=df.iloc[i]['datetime'],df.iloc[i+1]['datetime']\n if thisTime.hour != nextTime.hour:\n #get the difference in their hour value and adjust for change of day\n differencehour=nextTime.hour -thisTime.hour\n #address for change of hour from 23 to 0 at midnight\n if differencehour<0:\n differencehour=24+differencehour\n #interpolate only for two successive hours\n if differencehour<=2:\n #Initiate first hour to start interpolation from\n initialtime=thisTime-timedelta(minutes=thisTime.minute,seconds=thisTime.second)\n #interpolate value for each full hour value between them\n for n in range(1,differencehour+1):\n interpolateTime=initialtime+timedelta(hours=n)\n #initiate dictionary to write to interpolated value\n dictToWrite={timestampField:interpolateTime.strftime('%Y-%m-%d %H:%M:%S'),\"tag_ident\":df.iloc[i]['tag_ident'],\"Gender\":df.iloc[i]['Gender'],\"datetime\":interpolateTime}\n #interpolate values\n referenceInterval=nextTime-thisTime\n interpolateInterval=interpolateTime -thisTime\n #iterate through whole list of fields to be interpolated\n for fieldtoInterpolate in fieldstoInterpolate:\n interpolatedValue=(interpolateInterval/referenceInterval)*(df.iloc[i+1][fieldtoInterpolate]-df.iloc[i][fieldtoInterpolate])+df.iloc[i][fieldtoInterpolate]\n #Update dictionary with interpolated value\n dictToWrite.update({fieldtoInterpolate:interpolatedValue})\n #Appedn data to the original dataframe\n df=df.append(dictToWrite, ignore_index=True)\n #sort data with interpolated value and reset index\n df=df.sort_values(by=\"timestamp\").reset_index(drop=True)\n return df", "def fill_weather_forecast_columns(df):\n\n filled_df = df.copy()\n filled_df.loc['2018-01-01','temp_KC':'wind_north_SD'] = filled_df.loc['2018-01-02','temp_KC':'wind_north_SD'].values\n filled_df.loc['2018-02-06','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-05','temp_KC':'wind_north_SD'].values\n filled_df.loc['2019-02-05','temp_KC':'wind_north_SD'] = filled_df.loc['2019-02-04','temp_KC':'wind_north_SD'].values\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='bfill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n\n any_nans = filled_df.isna().sum(axis=0)\n \n if any_nans.sum(axis=0) != 0:\n print('The function did not convert all NaNs. Some NaNs still exist.')\n\n return filled_df", "def storm_interpolator(my_df):\n # df_resampled = my_df.resample(\n # '1T', loffset=datetime.timedelta(seconds=30.)).mean()\n df_resampled = my_df.interpolate(method='linear', limit=15)\n df_resampled.dropna(axis='index', how='any', inplace=True)\n\n return df_resampled", "def clean_meteo_data(self, df):\n for col in df.columns:\n df[col] = df[col].str.replace(',', '.').astype(\"float\")\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n df=df.fillna(method='ffill')\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n# print(\"shape selected sensor data:\",df.shape)\n df=df.dropna()\n df=df.resample(\"10T\").mean()\n df=df.reset_index()\n df['dag']=df['datetime'].dt.day\n return df", "def smart_gas_nan_checker(smart, gas, weather, dwelling_id):\n\n print('Resampling smart, gas, weather')\n # For more resampling info see: https://pandas.pydata.org/pandas-docs/stable/api.html#id41\n # Makes missing gaps appear as NaN, these are the general raw dataframes to work with\n smart_10s = smart.resample('10s').mean()\n gas_h = gas.resample('H').mean()\n weather_10min = weather.resample('10min').mean()\n\n \"\"\"\n Create a dataframe with a 1 hour sample rate\n \"\"\"\n gas_h['gasPower'] = gas_h['gasMeter'].diff() # Calculate gasPower column\n gas_h['gasPower'][0] = gas_h['gasPower'][1] # Replace 1st entry (NaN) with 2nd entry\n\n smart_h = smart_10s.resample('H').mean() # Down sample smart\n weather_h = weather_10min.resample('H').mean() # Down sample weather\n\n # Combine gas, smart, weather\n df_hour = pd.merge(smart_h, gas_h, left_index=True, right_index=True)\n df_hour = pd.merge(df_hour, weather_h, left_index=True, right_index=True)\n\n \"\"\"\n Create smartmeter dataframe with a 10s sample rate\n \"\"\"\n gas_10s = gas_h.resample('10s').ffill() # Up sample gas to 10s\n # Calculate gasPower column, is this rhe right way? Or should we ffill it?\n # Currently this code makes it so there is one gasPower value per hour, we could ffill this also?\n gas_10s['gasPower'] = gas_10s['gasMeter'].diff()\n gas_10s['gasPower'][0] = gas_10s['gasPower'][1] # Replace 1st entry (NaN) with 2nd entry\n\n weather_10s = weather_10min.resample('10s').ffill() # forward fill because the raw data is the 10 minute mean\n\n # Combine gas, smart, weather\n df_10s = pd.merge(smart_10s, gas_10s, left_index=True, right_index=True)\n df_10s = pd.merge(df_10s, weather_10s, left_index=True, right_index=True)\n\n \"\"\"\n Do NaN analysis on the 10s and hour sample rate dataframes\n \"\"\"\n print('Length of combined df_10s: %s' % len(df_10s))\n print('df_nan_fig_10s')\n df_nan_fig_10s = plot_nans(df_10s, dwelling_id+' 10s sample rate')\n print('df_nan_table_10s')\n df_nan_table_10s = df_nan_checker(df_10s, 0)\n\n print('Length of combined df_hour: %s' % len(df_hour))\n print('df_nan_fig_hour')\n df_nan_fig_hour = plot_nans(df_hour, dwelling_id+' 1 hour sample rate')\n print('df_nan_table_hour')\n df_nan_table_hour = df_nan_checker(df_hour, 0)\n\n return df_10s, df_hour, df_nan_table_10s, df_nan_table_hour, df_nan_fig_hour, df_nan_fig_10s", "def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y", "def test_interpolate_values_1_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), 235.0),\n ]\n forcing_processor.interpolate_values('air_temperature', 1, 1)\n expected = (datetime.datetime(2011, 9, 25, 10, 0, 0), 225.0)\n assert forcing_processor.data['air_temperature'][1] == expected", "def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)", "def interpolate_missing_time(time, cadence_no=None, fill_value=\"extrapolate\"):\n if cadence_no is None:\n cadence_no = np.arange(len(time))\n\n is_finite = np.isfinite(time)\n num_finite = np.sum(is_finite)\n if num_finite < 2:\n raise ValueError(\n \"Cannot interpolate time with fewer than 2 finite values. Got \"\n \"len(time) = {} with {} finite values.\".format(len(time), num_finite))\n\n interpolate_fn = scipy.interpolate.interp1d(\n cadence_no[is_finite],\n time[is_finite],\n copy=False,\n bounds_error=False,\n fill_value=fill_value,\n assume_sorted=True)\n\n return interpolate_fn(cadence_no)", "def _interpolate_temp_hum_wind(\n dates,\n xs,\n ys,\n zs,\n temps,\n rel_hums,\n wind_speeds,\n temp_config,\n hum_config,\n wind_speed_config,\n):\n num_stations = len(xs)\n\n for date_num, date in enumerate(dates):\n nan_pos = np.isnan(temps[date_num, :])\n num_nan = nan_pos.sum()\n if num_nan > 0 and num_nan < num_stations:\n temps[date_num, nan_pos] = meteo.interpolate_param(\n 'temp',\n date,\n temp_config,\n temps[date_num, ~nan_pos],\n xs[~nan_pos],\n ys[~nan_pos],\n zs[~nan_pos],\n xs[nan_pos],\n ys[nan_pos],\n zs[nan_pos],\n )\n\n nan_pos = np.isnan(rel_hums[date_num, :])\n num_nan = nan_pos.sum()\n if num_nan > 0 and num_nan < num_stations:\n rel_hums[date_num, nan_pos] = meteo.interpolate_param(\n 'rel_hum',\n date,\n hum_config,\n rel_hums[date_num, ~nan_pos],\n xs[~nan_pos],\n ys[~nan_pos],\n zs[~nan_pos],\n xs[nan_pos],\n ys[nan_pos],\n zs[nan_pos],\n temps=temps[date_num, ~nan_pos],\n target_temps=temps[date_num, nan_pos],\n )\n\n nan_pos = np.isnan(wind_speeds[date_num, :])\n num_nan = nan_pos.sum()\n if num_nan > 0 and num_nan < num_stations:\n wind_speeds[date_num, nan_pos] = meteo.interpolate_param(\n 'wind_speed',\n date,\n temp_config,\n wind_speeds[date_num, ~nan_pos],\n xs[~nan_pos],\n ys[~nan_pos],\n zs[~nan_pos],\n xs[nan_pos],\n ys[nan_pos],\n zs[nan_pos],\n )", "def fillna(ts: TimeSeries, fill: float = 0) -> TimeSeries:\n\n return TimeSeries.from_times_and_values(ts.time_index(), ts.pd_dataframe().fillna(value=fill))", "def test_interpolate_values_2_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 12, 0, 0), 230.0),\n ]\n forcing_processor.interpolate_values('air_temperature', 1, 2)\n expected = (datetime.datetime(2011, 9, 25, 10, 0, 0), 220.0)\n assert forcing_processor.data['air_temperature'][1] == expected\n expected = (datetime.datetime(2011, 9, 25, 11, 0, 0), 225.0)\n assert forcing_processor.data['air_temperature'][2] == expected" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Join together the meter data, weather data and building metadata into one df data = dict of df's (keys are'building_metadata', 'weather_train', 'weather_test', 'train','test') dataset_name = 'train' or 'test' returns a merged df which includes building_metadata, weather_train (or weather_test) and train (or test)
def join_input_data_and_multi_index(data, dataset_name): meter_df = data[dataset_name] building_df = data['building_metadata'] weather_df = data['weather_' + dataset_name] # join meter and weather data building_n_meter = meter_df.merge(building_df, on='building_id', how='left') joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left') # Add time related columns joined_data['hour'] = joined_data['timestamp'].dt.hour joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek joined_data['week_number'] = joined_data['timestamp'].dt.week joined_data['month'] = joined_data['timestamp'].dt.month joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0) # multi index on building id and timestamp joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index() return joined_data
[ "def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join", "def createMasterData():\n cpi = readCPI()\n exports, imports = readExportsImports()\n consumption = readConsumption()\n population = readPopulation()\n gsptse = readGSPTSE()\n housing = readHousingStarts()\n jobless = readInitialJobless()\n ippi = readIPPI()\n gdp = readGDP()\n retail = readRetailTrade()\n unemployment = readUnemployment()\n wti = readWCS()\n cadusd = readCADUSD()\n wages = readWages()\n target, tenyearbond = readBOC()\n crime = readCrimeSeverity()\n google_trends = readGoogleTrends()\n investment = readInvestment()\n median_age = readMedianAge()\n manufacturing = readManufacturing()\n savings_rate, disposable_income = readSavingsRate()\n\n all_series_no_gdp = [exports, imports, consumption, investment, wages,\n population, median_age, cpi, ippi, wti,\n housing, unemployment, retail, manufacturing,\n gsptse, cadusd, tenyearbond, target,savings_rate, disposable_income]\n \n # Using repeated joins to maximize data retention.\n for df in all_series_no_gdp:\n gdp = gdp.join(df)\n \n master_data = gdp.copy()\n master_data_no_na = master_data.dropna(how='any')\n master_data_some_na = master_data.dropna(how='all')\n\n return master_data, master_data_some_na, master_data_no_na", "def combine_load_weather_df(county_load_df_rolling, weather_features_hourly, save=None):\n\n del weather_features_hourly.columns.name\n del weather_features_hourly.index.name\n\n load_and_weather_data = pd.merge(\n weather_features_hourly,\n county_load_df_rolling['Load'],\n how='outer',\n left_index=True,\n right_index=True)\n\n load_and_weather_data.columns = ['temperature', 'dewpoint', 'load']\n\n # interpolate any missing values linearly.\n load_and_weather_data = load_and_weather_data.interpolate(\n method='linear', axis=0).ffill().bfill()\n\n # Build date and time features.\n load_and_weather_data['year'] = load_and_weather_data.index.map(lambda x: x.year)\n load_and_weather_data['month'] = load_and_weather_data.index.map(lambda x: x.month)\n load_and_weather_data['day'] = load_and_weather_data.index.map(lambda x: x.day)\n load_and_weather_data['weekday'] = load_and_weather_data.index.map(lambda x: x.weekday)\n load_and_weather_data['hour'] = load_and_weather_data.index.map(lambda x: x.hour)\n\n # Build lagged weather predictors.\n for ix in range(8):\n load_and_weather_data['temperature_d' + str(ix)] = load_and_weather_data['temperature'].shift(24*ix)\n load_and_weather_data['dewpoint_d' + str(ix)] = load_and_weather_data['dewpoint'].shift(24*ix)\n\n # Next day's load values.\n load_and_weather_data['load_tomorrow'] = load_and_weather_data['load'].shift(-24)\n\n load_and_weather_data = load_and_weather_data.fillna(0)\n\n if save:\n joblib.dump(load_and_weather_data, os.path.join(save, 'load_and_weather_data'))\n\n return load_and_weather_data", "def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df", "def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")", "def create_features(overwrite=False):\n \n data_dict = pd.read_pickle('complete_dataset.pickle') # upload dictionary of tickers\n ticker_dict = data_dict['Raw_Data']\n\n # initialize dataframe\n first_key = list(ticker_dict.keys())[0] # find the first ticker\n df1 = ticker_dict[first_key].copy() # df for first ticker\n first_df_cols = df1.columns.tolist()\n df2 = aggregate_from_daily_ml(df1, first_key) # aggregate to monthly level\n j=0\n for key, value in ticker_dict.items(): # for each ticker, aggregate then concat to master df\n if key==first_key: continue\n if first_df_cols != value.columns.tolist(): print('bad columns for {}!'.format(key))\n df3 = aggregate_from_daily_ml(value, key)\n \n df2 = pd.concat([df2, df3])\n if j%(round(len(ticker_dict)/10))==0: print('Fraction done: {}'.format(round(j/len(ticker_dict),5)))\n j+=1\n df2 = df2.sort_index(level=[0,1])\n \n df2.columns = [col[0] + '_' + str(col[1]) if str(col[1])!='NA' else col[0] for col in df2.columns.tolist()]\n\n df3 = create_target(df2, threshold=0.0)\n df3.columns = [col[0] + '_' + str(col[1]) if str(col[1])!='NA' else col[0] for col in df3.columns.tolist()]\n\n\n if overwrite:\n print('Saving to data.pkl')\n df3.to_pickle('data.pkl')\n else:\n print('File not being saved. To save, use overwrite=True')\n\n return df3", "def load_data() -> pd.DataFrame:\n index_cols = [\"level\", \"country\", \"region\", \"sub_region\", \"date\"]\n ts_df = time_series_formatter(fetch_time_series())\n ts_df = ts_df.set_index(index_cols)\n # drop duplicated rows\n ts_df = ts_df[~ts_df.index.duplicated()]\n\n mobility_df = fetch_mobility_data()\n metrics = [\n \"retail_recreation\",\n \"grocery_pharmacy\",\n \"parks\",\n \"transit_stations\",\n \"workplaces\",\n \"residential\",\n ]\n\n mobility_df.loc[mobility_df[\"region\"].isnull(), \"level\"] = \"country\"\n mobility_df.loc[\n (~mobility_df[\"region\"].isnull())\n & (mobility_df[\"sub_region\"].isnull()),\n \"level\",\n ] = \"region\"\n mobility_df.loc[\n ~mobility_df[\"sub_region\"].isnull(), \"level\"\n ] = \"sub_region\"\n\n mobility_df = mobility_df.set_index(index_cols)[metrics]\n mobility_df.columns = [\"mobility_\" + x for x in metrics]\n\n # treat missing values in mobility data\n mobility_df = _treat_mobility_missing_values(mobility_df)\n\n # Incorporate mobility data\n enriched_ts_df = pd.concat([ts_df, mobility_df], axis=1, join=\"inner\")\n return enriched_ts_df.reset_index()", "def fullMetadata():\n derivedData = pd.read_excel(str(paths.derivedData))\n metadata = pd.read_excel(str(paths.metadata))\n derivedData.columns = derivedData.columns.str.lower()\n derivedData.rename(columns= {\"park\": \"unit\", \"code\": \"site\", \"site\": \"title\"}, inplace= True)\n metadata.rename(columns= {\"code\": \"site\", \"site\": \"title\"}, inplace= True)\n\n # Strip whitespace from string columns\n for df in [metadata, derivedData]:\n stringCols = df.dtypes[ df.dtypes == np.object ].index\n df[stringCols] = df[stringCols].apply(lambda col: col.str.strip())\n\n full = pd.merge(metadata, derivedData, on=['unit', 'site', 'year'], how= \"outer\", suffixes= (\"_meta\", \"_derived\"))\n \n # Resolve overlapping columns, tie goes to metadata\n dupCols = derivedData.columns.intersection(metadata.columns).difference(['unit', 'site', 'year'])\n for col in dupCols:\n full[col+\"_meta\"].replace(to_replace= np.nan, value= full[col+\"_derived\"], inplace= True)\n full.rename(columns= {col+\"_meta\": col}, inplace= True)\n full.drop(col+\"_derived\", axis= 1)\n\n # Set index to siteIDs\n ids = full.apply(lambda site: paths.siteID(site.unit, site.site, site.year), axis= 1)\n full.index = ids\n\n # Convert boolean-looking columns to boolean datatype\n for col in full:\n unique = full[col].unique()\n if len(unique) <= 3:\n unique = set( unique[ ~pd.isnull(unique) ] )\n if unique == {1, 0}:\n full[col] = full[col].astype(bool)\n\n # full.winter_site = full.winter_site.astype(bool)\n\n return full", "def merge_data(agg_cases, lk_info, geolocation_data):\n merged_df = pd.merge(agg_cases, lk_info, left_on='IdLandkreis', right_on = 'Key')\n merged_df[\"RelativFall\"] = merged_df[\"AnzahlFall\"] / merged_df[\"Bev Insgesamt\"]\n merged_df[\"RelativTodesfall\"] = merged_df[\"AnzahlTodesfall\"] / merged_df[\"Bev Insgesamt\"]\n merged_df = pd.merge(merged_df, geolocation_data, left_on=\"Key\", right_on=\"cca_2\")\n return merged_df", "def build_jersey_wearers_dataframe(year_links_num_dict):\n year_range = list(reversed(list(year_links_num_dict.keys())))\n\n jersey_wearers = pd.DataFrame()\n\n for year in year_range:\n filepath = 'data/' + str(year) + '/' + str(year) + '_jersey_wearers.csv'\n new_df = pd.read_csv(filepath)\n new_df['year'] = year\n jersey_wearers = pd.concat([jersey_wearers, new_df], sort='False')\n jersey_wearers = jersey_wearers[['year','stage_num','yellow_jersey','green_jersey','polka_dot_jersey','polka-dot_jersey','white_jersey']]\n jersey_wearers['polka_dot_jersey'] = jersey_wearers['polka_dot_jersey'].fillna(jersey_wearers['polka-dot_jersey'])\n jersey_wearers = jersey_wearers.drop('polka-dot_jersey', axis=1)\n jersey_wearers = jersey_wearers.reset_index(drop=True)\n save_pickle(jersey_wearers, 'jersey_wearers_all')", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def _fetch_dataframe(self):\n\n def reshape(training_summary):\n # Helper method to reshape a single training job summary into a dataframe record\n out = {}\n for k, v in training_summary[\"TunedHyperParameters\"].items():\n # Something (bokeh?) gets confused with ints so convert to float\n try:\n v = float(v)\n except (TypeError, ValueError):\n pass\n out[k] = v\n out[\"TrainingJobName\"] = training_summary[\"TrainingJobName\"]\n out[\"TrainingJobStatus\"] = training_summary[\"TrainingJobStatus\"]\n out[\"FinalObjectiveValue\"] = training_summary.get(\n \"FinalHyperParameterTuningJobObjectiveMetric\", {}\n ).get(\"Value\")\n\n start_time = training_summary.get(\"TrainingStartTime\", None)\n end_time = training_summary.get(\"TrainingEndTime\", None)\n out[\"TrainingStartTime\"] = start_time\n out[\"TrainingEndTime\"] = end_time\n if start_time and end_time:\n out[\"TrainingElapsedTimeSeconds\"] = (end_time - start_time).total_seconds()\n if \"TrainingJobDefinitionName\" in training_summary:\n out[\"TrainingJobDefinitionName\"] = training_summary[\"TrainingJobDefinitionName\"]\n return out\n\n # Run that helper over all the summaries.\n df = pd.DataFrame([reshape(tjs) for tjs in self.training_job_summaries()])\n return df", "def concatenateData(self):\n self.data = pd.concat([tr.data for tr in self.getTestRuns()])", "def train_test_data_df(train_data_file, test_data_file):\n dtype_dict = {\n \"age\": np.int32,\n \"education-num\": np.int32,\n \"capital-gain\": np.int32,\n \"capital-loss\": np.int32,\n \"hours-per-week\": np.int32\n }\n cols = [i for i in range(15) if i != 2]\n train_data = pd.read_csv(train_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n train_data = train_data.dropna(axis=0, how=\"any\")\n test_data = pd.read_csv(test_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n test_data = test_data.dropna(axis=0, how=\"any\")\n return train_data, test_data", "def read_data(train_path, test_path, attributes_path, descriptions_path):\n\n def aggregate_attributes(val):\n return \". \".join([row['name'] + \"\\t\" + row['value']\n for _, row in val[['name', 'value']].iterrows()])\n\n train_data = pd.read_csv(train_path, encoding=\"ISO-8859-1\")\n test_data = pd.read_csv(test_path, encoding=\"ISO-8859-1\")\n attributes_data = pd.read_csv(attributes_path, encoding=\"ISO-8859-1\").dropna()\n brand_data = attributes_data[attributes_data['name'] == 'MFG Brand Name'][[\"product_uid\", \"value\"]].rename(\n columns={\"value\": \"brand\"})\n attributes_data['attributes'] = \" . \" + attributes_data['name'] + \" . \" + attributes_data['value'] + \" \\n \"\n attributes_data.drop(['name', 'value'], axis=1, inplace=True)\n attributes_data = attributes_data.groupby('product_uid', as_index=False).aggregate(np.sum)\n descriptions_data = pd.read_csv(descriptions_path, encoding=\"ISO-8859-1\")\n\n all_data = pd.concat((train_data, test_data), axis=0, ignore_index=True)\n all_data = pd.merge(all_data, descriptions_data, how='left', on='product_uid')\n all_data = pd.merge(all_data, brand_data, how='left', on='product_uid')\n all_data = pd.merge(all_data, attributes_data, how='left', on='product_uid')\n return all_data.iloc[:len(train_data)], all_data.iloc[len(train_data):]", "def predict_energy_consumption(buildings):\n forecasts = [forecast_for_building(building) for i, building in buildings.iterrows()]\n df = pd.concat(forecasts)\n df.drop(columns=\"id\", inplace=True)\n df = buildings.merge(df, left_on=\"id\", right_on=\"building_id\")\n df[\"meter\"] = 0\n df[\"floor_count\"] = df[\"floorcount\"]\n df[\"air_temperature\"] = df[\"temp\"]\n df[\"relative_humidity\"] = df[\"humidity\"]\n df[\"dew_temperature\"] = df[\"air_temperature\"] - ((100 - df[\"relative_humidity\"]) / 5)\n df[\"precip_depth_1_hr\"] = np.nan\n df[\"timestamp\"] = pd.to_datetime(df[\"date\"])\n df[\"wind_direction\"] = df[\"deg\"]\n df[\"wind_speed\"] = df[\"speed\"]\n\n df.drop(columns=[\"id\", \"name\", \"floorcount\", \"latitude\", \"longitude\", \"user_id\", \"temp\", \"feels_like\", \"temp_min\",\n \"temp_max\", \"pressure\", \"sea_level\", \"grnd_level\", \"humidity\", \"temp_kf\", \"main\", \"description\",\n \"icon\", \"speed\", \"deg\", \"date\"], inplace=True)\n\n df_temp = df.copy(deep=True)\n for i in range(1, 4):\n df_temp[\"meter\"] += 1\n df = pd.concat([df, df_temp])\n del df_temp\n\n cfg = {\n 'circular_timestamp_encoding': False,\n 'log_transform_square_feet': True,\n 'log_transform_area_per_floor': True,\n 'label_square_feet_outlier': True,\n 'label_area_per_floor_outlier': True,\n 'encode_wind_direction': False,\n 'include_feels_like': True,\n 'fill_na_with_zero': False,\n 'add_lag_features': True,\n 'lag_columns': ['air_temperature', 'dew_temperature', 'cloud_coverage'],\n 'lag_windows': [6, 24],\n }\n [df] = build_features(df, cfg=cfg)\n\n df.reset_index(inplace=True, drop=True)\n building_ids = df[\"building_id\"]\n timestamps = df[\"timestamp\"]\n df.drop(columns=[\"timestamp\", \"month\", \"wind_direction\", \"wind_speed\", \"building_id\"], inplace=True)\n\n model_endpoint = \"http://model:5001/predict\"\n data = df.to_json()\n response = requests.get(model_endpoint, json=data).json()\n\n predictions = pd.DataFrame({\"reading\": response[\"prediction\"],\n \"building_id\": building_ids,\n \"meter\": df[\"meter\"],\n \"timestamp\": timestamps,\n \"air_temperature\": df[\"air_temperature\"]})\n return predictions", "def getFullData(rapport, UsableVideos, mergeddf):\r\n #first, I create a dictionary for each video (keys) that has the ratings for all slices (values)\r\n rapportvideo = {}\r\n for _, row in rapport.iterrows():\r\n if row['Video'] in rapportvideo:\r\n rapportvideo[row['Video']].append(row['Rating'])\r\n else:\r\n rapportvideo[row['Video']] = list()\r\n rapportvideo[row['Video']].append(row['Rating'])\r\n\r\n #create a data set from this dictionary, with one participant (or video) in each row and all the individual slice ratings as the columns:\r\n rapport_dataset= pd.DataFrame.from_dict(rapportvideo, orient='index').reset_index(level=0)\r\n rapport_dataset= rapport_dataset.add_prefix('AMT_Slice_')\r\n rapport_dataset = rapport_dataset.rename(columns={'AMT_Slice_index':'Participant'})\r\n\r\n #get the average of all the slices, and put them in a new column (Note: I had to remove the participant column first to do this)\r\n select=rapport_dataset.drop('Participant', axis=1)\r\n rapport_dataset.insert(83, 'AMT_Rapport_Average', select.mean(axis=1))\r\n\r\n #To merge rapport_database with the previous data (i.e. usablevideos)\r\n full_usabledatabase = UsableVideos.merge(rapport_dataset, on='Participant')\r\n\r\n #to save the Excelsheets:\r\n rapport_dataset.to_excel(\"WoZ_2019_AMTRatings.xlsx\", index=False)\r\n\r\n with pd.ExcelWriter('WoZ_2019_FullData.xlsx') as writer: \r\n mergeddf.to_excel(writer, sheet_name='AllVideos', index=False)\r\n full_usabledatabase.to_excel(writer, sheet_name='UsableVideos', index=False)\r\n\r\n return full_usabledatabase", "def load_data():\n data_path = os.path.join('qual-o-mat-data', 'data', '2019', 'europa')\n data_keys = [\"answer\", \"comment\", \"opinion\", \"party\", \"statement\"]\n raw_data = dict()\n all_data = dict()\n\n # Create a dictionary of type <string, DataFrame> that contains the data from all JSON files\n for dk in data_keys:\n json_file = os.path.join(data_path, dk + \".json\")\n with open(json_file, \"r\") as fh:\n raw_data[dk] = json.load(fh)\n all_data[dk] = pd.DataFrame(raw_data[dk])\n\n\n # Based on the opinion data, merge all other data frames on their ID fields to get usable names instead of just ID numbers\n merged_df = all_data[\"opinion\"].copy()\n for to_merge in [\"party\", \"statement\", \"comment\", \"answer\"]:\n merged_df = merged_df.merge(all_data[to_merge], how='inner', left_on=[to_merge], right_on=['id'])\n\n #print(mdf.head())\n return merged_df, all_data, raw_data", "def build_useful_data():\n\n # 读取蛋白质数据\n with timer(\"Loading and merging data\"):\n protein_train = pd.read_csv('datas/df_protein_train.csv')\n\n protein_test = pd.read_csv('datas/df_protein_test.csv')\n\n protein_all = pd.concat([protein_train, protein_test])\n\n # 添加蛋白质序列长度作为特征\n protein_all['seq_len'] = protein_all['Sequence'].apply(len)\n\n # 读取分子数据\n mol_train = pd.read_csv('datas/df_molecule.csv')\n\n aff_train = pd.read_csv('datas/df_affinity_train.csv')\n\n aff_test = pd.read_csv('datas/df_affinity_test_toBePredicted.csv')\n\n # 初始化待预测的Ki值为-11\n aff_test['Ki'] = -11\n\n aff_all = pd.concat([aff_train, aff_test])\n\n data = aff_all.merge(mol_train, on=\"Molecule_ID\", how='left')\n data = data.merge(protein_all, on='Protein_ID', how='left')\n\n # 获取蛋白质ID\n PID = list(protein_all[\"Protein_ID\"])\n with timer(\"Processing wordcount1\"):\n # word_length = 1时的wordcount特征\n _, word_counts1 = tfidf_and_wordcounts(protein_all, PID, word_length=1, stride=1)\n\n # word_length = 2时的wordcount特征\n with timer(\"Processing wordcount2\"):\n _, word_counts2 = tfidf_and_wordcounts(protein_all, PID, word_length=2, stride=1)\n\n word_counts1_2 = word_counts1.merge(word_counts2, on=\"Protein_ID\", how=\"left\")\n # 保存特征文件,以供后期训练\n word_counts1_2.to_csv(\"datas/1and2_1_421_protein_std.csv\", index=False)\n\n del word_counts1_2, word_counts1, word_counts2\n\n with timer(\"Processing wordcount3\"):\n _, word_count3 = tfidf_and_wordcounts(protein_all, PID, word_length=3, stride=1)\n\n word_count3_features = list(word_count3.columns) # 8000维的数据,需要降维\n word_count3_features.remove(\"Protein_ID\")\n\n # 利用标准差进行降维,设置标准差阈值为0.42,去掉标准差小于0.42的特征\n new_word_count3 = reduce_dims_with_std(word_count3, word_count3_features, std_threshold=0.3)\n # 保存特征文件,以供后期训练\n new_word_count3.to_csv(\"datas/3_1_protein_std_0.3.csv\", index=False)\n del new_word_count3\n\n for i in range(len(word_count3_features) // 1000):\n # 每次划分1000个特征,并保存在特征文件里,以供后期训练\n file = word_count3[[\"Protein_ID\"] + word_count3_features[i * 1000:(i + 1) * 1000]]\n file_name = \"3_1_1000_protein_\" + str(i)\n file.to_csv(\"datas/\" + file_name + \".csv\", index=False)\n\n del word_count3, word_count3_features\n\n with timer(\"Processing wordcount4\"):\n gc.collect()\n _, word_count4 = tfidf_and_wordcounts(protein_all, PID, word_length=4, stride=1)\n\n word_count4_features = list(word_count4.columns) # 140000+ 维的数据,需要降维\n word_count4_features.remove(\"Protein_ID\")\n\n new_word_count4 = reduce_dims_with_pca(word_count4, word_count4_features, n_conponents=1000)\n new_word_count4.to_csv(\"datas/wordcount4_pca.csv\", index=False)\n\n # 利用标准差进行降维,设置标准差阈值为0.15,去掉标准差小于0.15的特征\n new_word_count4 = reduce_dims_with_std(word_count4, word_count4_features, std_threshold=0.15)\n new_word_count4.to_csv(\"datas/4_1_protein_std_0.15.csv\", index=False)\n\n # 利用标准差进行降维,设置标准差阈值为0.12,去掉标准差小于0.12的特征\n new_word_count4 = reduce_dims_with_std(word_count4, word_count4_features, std_threshold=0.12)\n\n word_count4_features = list(new_word_count4.columns)\n word_count4_features.remove(\"Protein_ID\")\n\n for i in range(len(word_count4_features) // 1000):\n # 每次划分500个特征,并保存在特征文件里,以供日后训练\n file = new_word_count4[[\"Protein_ID\"] + word_count4_features[i * 1000:(i + 1) * 1000]]\n file_name = \"4_1_1000_protein_\" + str(i)\n file.to_csv(\"datas/\" + file_name + \".csv\", index=False)\n\n del new_word_count4, word_count4\n\n # 以下特征是蛋白质的词向量特征, 来自技术圈, 谢谢"小武哥"同学.但我们的最终提交版本没用这些特征\n \"=====================================词向量特征===========================================\"\n # feat2 = protein_embedding(protein_all, word_length = 2)\n # data = data.merge(feat2, on=\"Protein_ID\", how=\"left\")\n # del feat2\n # feat3 = protein_embedding(protein_all, word_length = 3)\n # data = data.merge(feat3, on=\"Protein_ID\", how=\"left\")\n # del feat3\n # feat4 = protein_embedding(protein_all, word_length = 4)\n # data = data.merge(feat4, on=\"Protein_ID\", how=\"left\")\n # del feat4\n \"================================================================================\"\n\n with timer(\"分子指纹展开\"):\n mol_fingerprints = list(mol_train[\"Fingerprint\"].apply(lambda x: list(np.array(x.split(',')).astype(int))))\n mol_fingerprints = pd.DataFrame(mol_fingerprints, columns=[\"Fingerprint_\" + str(i) for i in range(167)])\n mol_fingerprints[\"Molecule_ID\"] = mol_train[\"Molecule_ID\"]\n\n del PID\n \"==================================================================================================\"\n\n with timer(\"加入分子指纹和描述符\"):\n data = data.merge(mol_fingerprints, on=\"Molecule_ID\", how='left')\n mol_ECFP4 = pd.read_csv(\"datas/df_mol_ECFP4s_1024.csv\")\n data = data.merge(mol_ECFP4, on=\"Molecule_ID\")\n del mol_fingerprints, mol_ECFP4\n del data[\"Sequence\"], protein_train, protein_test, mol_train\n\n data.reset_index(drop=True, inplace=True)\n data.to_csv(\"datas/original_data.csv\", index=False)\n\n del data\n print(\"Useful data have builded\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split the joined data into a dict with a df for each meter type
def split_on_meter_type(joined_data, meter_types): joined_data_dict = {meter_type: joined_data[joined_data['meter_type'] == meter_type] for meter_type in meter_types} return joined_data_dict
[ "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df", "def get_MeasurementDataSet(self, intDataInfo):\n MSdataSet ={}\n for i, dbinfo in enumerate(intDataInfo['db_info']):\n db_name = dbinfo['db_name']\n ms_name = dbinfo['measurement']\n self.switch_MS(db_name, ms_name)\n bind_params = {'end_time': dbinfo['end'], 'start_time': dbinfo['start']}\n MSdataSet[i] =self.get_data_by_time(bind_params, db_name, ms_name)\n MSdataSet[i].index.name ='datetime'\n\n return MSdataSet", "def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result", "def separateMeasures(self, ex_list):\n return_dict = {}\n return_dict['Spessore'] = []\n return_dict['Larghezza'] = []\n return_dict['Lunghezza'] = []\n if self.values['-SPLIT MEASURES-']:\n excel_list = ex_list\n for df in excel_list:\n for row in self.getRowListDF(df, True):\n for cell in row:\n if not cell:\n continue\n if ' ' not in str(cell):\n continue\n cell_list = str(cell).split(' ')\n measures_found = False\n for word in cell_list:\n if measures_found:\n break\n word_index = cell_list.index(word)\n if 'x' not in word and 'Ø' not in word and 'L=' not in word:\n continue\n measure_list = []\n if word == 'x':\n measure_list = [cell_list[word_index - 1], cell_list[word_index + 1]]\n if cell_list[word_index + 2] == 'x':\n measure_list.append(cell_list[word_index + 3])\n measures_found = True\n elif 'x' in word:\n measure_list = word.split('x')\n\n for measure in measure_list:\n i = measure_list.index(measure)\n if measure.startswith('Ø'):\n measure = measure[1:len(measure)]\n elif measure.startswith('L='):\n measure = measure[2:len(measure)]\n measure_list[i] = measure\n\n if measure_list:\n if len(measure_list) == 2:\n return_dict['Lunghezza'].append(measure_list[0])\n return_dict['Larghezza'].append(measure_list[1])\n return_dict['Spessore'].append('')\n elif len(measure_list) == 3:\n return_dict['Lunghezza'].append(measure_list[0])\n return_dict['Larghezza'].append(measure_list[1])\n return_dict['Spessore'].append(measure_list[2])\n elif 'Ø' in word:\n return_dict['Lunghezza'].append(word[1:len(word)])\n return_dict['Spessore'].append('')\n elif 'L=' in word:\n return_dict['Larghezza'].append(word[2:len(word)])\n return return_dict", "def create_splits(self) -> dict[str, pd.DataFrame]:\n if self.data is None:\n raise ValueError('Data is not loaded yet. run \"FolkTables.load_data\"')\n splits = {}\n if self.split_type == \"random\":\n remainder_df = self.data.copy()\n original_size = remainder_df.shape[0]\n for key, value in self.splits.items():\n adjusted_frac = (original_size / remainder_df.shape[0]) * value\n sample = remainder_df.sample(frac=adjusted_frac, random_state=self.seed)\n splits[key] = sample\n sample_indexes = sample.index\n remainder_df = remainder_df.drop(sample_indexes)\n\n elif self.split_type == \"predefined\":\n for key, value in zip([\"train\", \"validation\", \"test\"], self.data):\n splits[key] = value\n\n return splits", "def make_info_dataframes(self):\n col_sel = [self.key_key, self.title_key, self.units_key]\n self.question_info_df = self.question_df[col_sel].drop_duplicates()\n\n if not self.section_df.empty:\n col_sel = [self.parent_id_key, self.title_key]\n self.module_info_df = self.section_df[col_sel].drop_duplicates()", "def splitRawData(self, rawdata):\n data = {}\n times = {}\n for probe, probeData in rawdata.iteritems():\n data[probe] = probeData['data']\n times[probe] = probeData['time']\n \n return data, times", "def prep_csv(data):\n df = pd.read_csv(io.BytesIO(data), header=[0,1])\n df.dropna(inplace=True)\n df.rename(columns=lambda s: s.strip(), inplace=True)\n units = dict(zip(df.columns.get_level_values(0).values, \n df.columns.get_level_values(1).values))\n df.columns = df.columns.droplevel(1)\n return df, units", "def split_data_in_chunks(data: pd.DataFrame,\r\n time_lenght=pd.to_timedelta(7, unit='d')\r\n ) -> Dict[str, pd.DataFrame]:\r\n one_minute = pd.to_timedelta(1, unit='m')\r\n output = {}\r\n # for each hour in the dataset\r\n for timestep in data.index.ceil('H').unique():\r\n # if there is not enough available data\r\n # to complete the window, skip it\r\n if timestep < time_lenght:\r\n continue\r\n # create a chunk of the last 7 days of data up until (t - 1) minutes\r\n output[timestep] = data.loc[timestep-time_lenght: timestep-one_minute, :]\r\n return output", "def fill_data(self):\n\n df_list = list()\n for typed_data_set in self.typed_data_set:\n\n # loop over all the variables of the current block (belonging to one dimension value,\n # such as 'Bedrijven van 10 en groter'\n values = list()\n for key, data in typed_data_set.items():\n if key == \"ID\":\n # the first value is always an ID, Make a copy of the question dataframe to\n # a new data frame which we can fill with values for this dimension\n logger.debug(f\"Collecting data of {data}\")\n df = self.question_df.copy()\n elif key in list(self.dimensions.keys()):\n # the next rows contain dimension properties. Get the values and store those\n # in the dimension column of are question dataframe. Store both the Title\n # and the short key\n df.loc[:, key] = self.dimensions[key].loc[data, self.title_key]\n df.loc[:, key + \"_\" + self.key_key] = data\n else:\n # the rest of the rows in this block are the values belonging to the questions\n # store them in a list\n values.append(data)\n\n # now copy the whole list of values to our question data frame and add them to a list\n df.loc[:, self.value_key] = values\n df_list.append(df)\n\n # we have create a dataframe for each dimension. Now append all the data frames to one\n # big one\n logger.info(\"Merging all the dataframes\")\n self.question_df = pd.concat(df_list, axis=0)", "def collect_data():\n\n \"Aqui va el codigo de alberto para recoger los datos que puede venir en forma de diccionario\"\n #TODO: Función para recoger los datos de los bms y meterlos en diccionarios (Alberto jr.)\n\n bms1 = dict()\n bms2 = dict()\n bms3 = dict()\n general = dict()\n\n\n # Ejemplos de datos para meter en los diccionarios\n\n temperature = 35.5\n voltage1 = 15.2\n voltage2 = 14.8\n date = time.strftime(\"%Y-%m-%d\") # Current date\n t = time.strftime(\"%H:%M:%S\") # Current time\n\n return bms1, bms2, bms3, general", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def join_survey_data(survey, deezer):\n\n\n df = survey.rename(columns={'Age': 'user_age', 'Gender': 'user_gender',\n 'deezer_id': 'media_id'})\n\n for index, row in df.iterrows():\n if pd.isnull(row['time']):\n continue\n time = row['time'].split(',')\n if row['user_gender'] == 'Male':\n user_gender = 1\n else:\n user_gender = 0\n if time == None:\n if row['rating'] == 0:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0,\n 999, 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date',\n 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id',\n 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n elif 'Anytime' in time:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n else:\n t_dict = {'Morning': 0, 'Afternoon': 0, 'Evening': 0}\n for t in time:\n t_dict[t] = 1\n for i in [('Morning', 1480513129), ('Afternoon', 1479067262),\n ('Evening', 1478675619)]:\n new = pd.DataFrame(np.array([[999999, i[1], row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], t_dict[i[0]]]]),\n columns=['genre_id',\n 'ts_listen',\n 'media_id',\n 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n\n return deezer", "def get_data(self, sensortype=None, head=None, tail=None, diff='default', resample='min', unit='default'):\n sensors = self.get_sensors(sensortype)\n series = [sensor.get_data(head=head, tail=tail, diff=diff, resample=resample, unit=unit) for sensor in sensors]\n\n # workaround for https://github.com/pandas-dev/pandas/issues/12985\n series = [s for s in series if not s.empty]\n\n if series:\n df = pd.concat(series, axis=1)\n else:\n df = pd.DataFrame()\n\n # Add unit as string to each series in the df. This is not persistent: the attribute unit will get\n # lost when doing operations with df, but at least it can be checked once.\n for s in series:\n try:\n df[s.name].unit = s.unit\n except:\n pass\n\n return df", "def load_data(self, types, data_dir):\n\n data = {}\n for split in ['train', 'val', 'test']:\n if split in types:\n sentences_file = os.path.join(data_dir, split, 'sentences.txt')\n labels_file = os.path.join(data_dir, split, 'labels.txt')\n data[split] = {}\n self.load_sentences_labels(\n sentences_file, labels_file, data[split])\n\n return data", "def split_sales_records(record):\n for interaction in record['prices']:\n # date = pd.to_datetime(interaction['date'][:11]) # <- took up too much memory (I think)\n date = time.mktime(datetime.datetime.strptime(interaction['date'][:11], \"%b %d %Y\").timetuple())\n if date > 1.530335e+09: # This will occur when we go from daily records to hourly. Just taking records before Jul 01 2018\n break\n yield {\n 'median_sell_price': interaction['median_sell_price'],\n 'quantity': int(interaction['quantity']),\n 'date': date,\n 'app': record['app'],\n 'item_name': record['item_name']}", "def groupedData( self ):\n output = {}\n values = self._data.values()\n values.sort( lambda x, y: cmp(x.name, y.name) )\n for data in values:\n dtype = '%s %s' % (data.privacy, data.dataType)\n output.setdefault(dtype, [])\n output[dtype].append(data)\n \n return output", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def splitter (data1, data2):\n flow_data = list()\n fare_record_data = list()\n\n for line in data1:\n line = [line[2:6],line[6:10],line[10:15],line[15:18],line[18],line[19],line[36:39],line[20:28],line[28:36],line[42:49]]\n flow_data.append(line)\n\n flow = pd.DataFrame(flow_data, columns=[\"ORIGIN_CODE\",\"DESTINATION_CODE\",\"ROUTE_CODE\",\"STATUS_CODE\",\"USAGE_CODE\",\"DIRECTION\",\"TOC\",\"VALID_UNTIL\",\"VALID_FROM\",\"FLOW_ID\"])\n flow['ROUTE_CODE'] = flow['ROUTE_CODE'].astype(object)\n flow.index.name=\"flow_idx\"\n\n for line in data2:\n line=[line[2:9],line[9:12],line[12:20]]\n fare_record_data.append(line)\n\n fare_record = pd.DataFrame(fare_record_data, columns=[\"FLOW_ID\",\"TICKET_CODE\",\"FARE\"])\n fare_record.index.name = \"fare_record_idx\"\n\n return flow,fare_record" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the state of the evironment for a new episode `setup` is used to let the reset function know when we're calling it from `setup`. If we don't, the 'random' init scheme should reset to the randomly choosen position instead of picking a new random one.
def reset(self, setup=False): self._done = False self._nbSteps = 0 x = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): x = random.randint(0, self._width - 1) elif (self.startPosX == 'random' and not setup): x = self._initState[0] elif self.startPosX == 'center': x = self._width - 1 else: x = int(self.startPosX) y = None if (self.startPosX == 'random' and setup) or ( self.startPosX == 'episodeRandom'): y = random.randint(0, self._height - 1) elif (self.startPosY == 'random' and not setup): y = self._initState[1] elif self.startPosX == 'center': y = self._height - 1 else: y = int(self.startPosX) self._currentPos = (x, y) self._trajectory = [(x, y)] return (x, y)
[ "def _did_reset(self):\n # use this method to access the RAM of the emulator\n # and perform setup for each episode.\n # the method returns None\n pass", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def reset_environment(self, env):\r\n\r\n state = env.reset()\r\n #state = state[0]\r\n #state = np.expand_dims(state, axis=0)\r\n\r\n for _ in range(np.random.randint(self.start_random_steps)):\r\n action = env.action_space.sample() # sample random action\r\n next_state, _, _, _ = env.step(action)\r\n #next_state = next_state[0]\r\n #next_state = np.expand_dims(next_state, axis=0)\r\n state = next_state\r\n\r\n return state", "def reset_environment(self, env):\r\n\r\n state = env.reset()\r\n state = np.expand_dims(state, axis=1)\r\n\r\n for _ in range(np.random.randint(self.start_random_steps)):\r\n action = env.action_space.sample() # sample random action\r\n next_state, _, _, _ = env.step(action)\r\n next_state = np.expand_dims(next_state, axis=1) # this is required because it is 1 x traffic state size\r\n state = next_state\r\n\r\n return state", "def reset(self):\n self.tracker.reset()\n self.episode += 1\n self.episode_step = 0", "def reset(self):\n self.episode_obs, self.episode_labels = self.get_environment_data()\n obs, self.done = self._next_observation()\n self.done = False\n return obs", "def _reset_seeds(self) -> None:\n self._seeds = [None for _ in range(self.num_envs)]", "def _reset_(self):\n print(\"Resetting\")\n\n x_target = self._target_generator_.__next__()\n np.copyto(self._x_target_, x_target)\n self._target_ = self._x_target_[self._end_effector_indices]\n\n self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)\n self._cmd_prev_ = np.zeros(len(self._action_low)) # to be used with derivative control of velocity\n if self._reset_type != 'none':\n if self._reset_type == 'random':\n reset_angles, _ = self._pick_random_angles_()\n elif self._reset_type == 'zero':\n reset_angles = self._q_ref[self._joint_indices]\n self._reset_arm(reset_angles)\n\n rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(\n self._rand_obj_.get_state()\n )\n np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))\n\n print(\"Reset done\")", "def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")", "def _reset_(self):\n print(\"Resetting\")\n\n self._q_target_, x_target = self._pick_random_angles_()\n np.copyto(self._x_target_, x_target)\n if self._target_type == 'position':\n self._target_ = self._x_target_[self._end_effector_indices]\n elif self._target_type == 'angle':\n self._target_ = self._q_target_\n self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)\n self._cmd_prev_ = np.zeros(len(self._action_low)) # to be used with derivative control of velocity\n if self._reset_type != 'none':\n if self._reset_type == 'random':\n reset_angles, _ = self._pick_random_angles_()\n elif self._reset_type == 'zero':\n reset_angles = self._q_ref[self._joint_indices]\n self._reset_arm(reset_angles)\n\n rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(\n self._rand_obj_.get_state()\n )\n np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))\n\n print(\"Reset done\")", "def check_reset_seed(env: gym.Env):\n signature = inspect.signature(env.reset)\n if \"seed\" in signature.parameters or \"kwargs\" in signature.parameters:\n try:\n obs_1 = env.reset(seed=123)\n assert obs_1 in env.observation_space\n obs_2 = env.reset(seed=123)\n assert obs_2 in env.observation_space\n assert data_equivalence(obs_1, obs_2)\n seed_123_rng = deepcopy(env.unwrapped.np_random)\n\n # Note: for some environment, they may initialise at the same state, therefore we cannot check the obs_1 != obs_3\n obs_4 = env.reset(seed=None)\n assert obs_4 in env.observation_space\n\n assert (\n env.unwrapped.np_random.bit_generator.state\n != seed_123_rng.bit_generator.state\n )\n except TypeError as e:\n raise AssertionError(\n \"The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. \"\n \"This should never happen, please report this issue. \"\n f\"The error was: {e}\"\n )\n\n if env.unwrapped.np_random is None:\n logger.warn(\n \"Resetting the environment did not result in seeding its random number generator. \"\n \"This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. \"\n \"If you do not use the python-level random number generator, this is not a problem.\"\n )\n\n seed_param = signature.parameters.get(\"seed\")\n # Check the default value is None\n if seed_param is not None and seed_param.default is not None:\n logger.warn(\n \"The default seed argument in reset should be `None`, \"\n \"otherwise the environment will by default always be deterministic\"\n )\n else:\n raise error.Error(\n \"The `reset` method does not provide the `return_info` keyword argument\"\n )", "def reset(self):\n self.observation = None\n self.episode_done = True", "def reset(self):\n if self.init_velocity == 'slow':\n self.velocity = self.rng.randint(1, 3)\n elif self.init_velocity == 'fast':\n self.velocity = self.rng.randint(5, 10)\n else:\n self.velocity = self.init_velocity\n\n self.waypoint = self.random_waypoint()\n self.pausing = False\n self.curr_pause = 0\n self.log.debug('Reset movement', new_velocity=self.velocity, new_waypoint=str(self.waypoint))", "def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)", "def seed_random():\n random.seed(0)", "def reset(self):\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n p.setTimeStep(self._time_step)\n p.setGravity(0, 0, -9.8)\n\n # load plane\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"), [0, 0, 0])\n # load robot\n self._darwin = DarwinopEnv()\n\n # Let the world run for a bit\n for _ in range(20):\n p.stepSimulation()", "def reset(self) -> None:\n self._rng = random.default_rng(self.seed)", "def reset_game():\n clearscreen()\n initialize()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cette fonction de la classe des Labyrinthe permet d'initialiser un labyrinthe à partir d'un fichier txt et du numéro de la carte associé.
def telecharger(self, path, numero_lab): with open(path,"r") as f : # on initialise les compteurs pour remplir la carte i = 1 ii = str(i) j = 1 jj = str(j) # on parcourt un à un les caractères du fichier txt qui contient le labyrinthe for line in f.readlines(): for letter in line : # on teste si la lettre est bien un caractère prévu if re.search(r"^[ O.XU]$", letter) is None: break else: self.carte[ii,jj] = letter if letter == "X": self.robot = [i,j] elif letter == "U": self.sortie = [i,j] j += 1 jj = str(j) self.largeur = j i += 1 ii = str(i) j =1 jj = str(j) self.longueur = i self.numero = numero_lab
[ "def __init__(self, filename=None, allLines=None):\n self.filename = str(filename)\n self.allLines = allLines #stores all lines from file", "def __init__(self):\n self.file_name = 'moes_tavern_lines.txt'\n self.path_to_file = abspath(join(getcwd(), '../data',\n self.file_name))", "def __init__(self, file_path, label):\n self.file_path = file_path\n self.label = label\n super(LabeledFileRecord, self).__init__()", "def __init__(self, ecran, text, police, taille, pos, couleurText = (0,0,0), couleurFond = (255,255,255)):\n self.ecran = ecran\n self.police = police\n self.taille = taille\n self.pos = pos\n self.couleurText = couleurText\n self.couleurFond = couleurFond\n self.msg = text\n self.txt = self.msg\n self.lignes = [\n Ligne(self.ecran, self.txt ,self.police, self.taille, self.pos, self.couleurText, self.couleurFond)\n ]\n self.dim = self.lignes[0].dim\n self.actif = False\n self.hLigne = self.dim[1]", "def __init__(self, fileName, fpga):\n self.fileHandle = open(fileName + '.tcl', 'a+')\n self.fpga = fpga", "def __init__(self, nombre):\n\n try:\n # Intentar abrir el archivo\n self.f = open(nombre, 'r')\n self.nombre = nombre\n except:\n # Si no se puede abrir el archivo, entonces se termina el programa\n print('No se puede abrir el archivo', nombre)\n exit()", "def __init__(self, path, text_field, label_field,\n fine_grained=False, **kwargs):\n fields = [('text', text_field), ('label', label_field)]\n examples = []\n\n def get_label_str(label):\n return label.split(':')[0] if not fine_grained else label\n label_field.preprocessing = data.Pipeline(get_label_str)\n\n for line in open(os.path.expanduser(path), 'rb'):\n # there is one non-ASCII byte: sisterBADBYTEcity; replaced with space\n label, _, text = line.replace(b'\\xf0', b' ').decode().partition(' ')\n examples.append(data.Example.fromlist([text, label], fields))\n\n super(TREC, self).__init__(examples, fields, **kwargs)", "def abrir_fichero_txt(self, nombre_fichero_txt, codificacion=\"utf-8\"):\n #print (\"Abriendo -{0}-\".format ( nombre_fichero_txt ) )\n descriptor=open(nombre_fichero_txt, \"r\", encoding=codificacion)\n self.lineas_fichero=descriptor.readlines()\n self.num_columna=0\n self.num_fila=0\n self.FIN_DE_FICHERO=False\n self.MAX_FILAS=len(self.lineas_fichero) - 1\n #print(self.MAX_FILAS)\n descriptor.close()", "def __init__(self, x_labels, y_labels, title, filename, data_title=''):\r\n self.x_labels = x_labels\r\n self.y_labels = y_labels\r\n self.title = title\r\n self.filename = filename\r\n self.data_title = data_title", "def pl_create_txt(self):\n\t\tprint()\n\t\tprint('pl_create_txt')\n\n\t\t# Content - This !!!\n\t\tself.content = pl_lib_exp.get_file_content(self)\n\t\t#print(self.content)", "def pl_create_txt(self):\n\t\t#print()\n\t\t#print('Pl - Create Txt')\n\n\t\t# Content - This !!!\n\t\tself.content = pl_lib_exp.get_file_content(self)\n\t\t\n\t\t#print(self.content)", "def __init__( self, filename, ifno, skip_channelaveraged=False ):\n super(SimpleTsysFiller,self).__init__( filename )\n self.ifno = ifno\n print 'IFNO to be processed: %s'%(self.ifno)\n self.skip_channelaveraged = skip_channelaveraged\n if skip_channelaveraged is True:\n print 'SimpleTsysFiller: skip channel averaged spws (%s)'%(self.ifno + 1)", "def __init__(self,fpath='LANDUSE.TBL'):\n with open(fpath,'r') as f:\n name = f.readline().strip()\n while not name == '':\n print('Reading',name)\n self.__setitem__(name, self._read_def(f))\n name = f.readline().strip()", "def __init__(self,dataset):\n \n self.ct = ContentUtil()\n self.AI_pre = dataset + \"/AI/\"\n self.Not_AI_pre = dataset + \"/NOT/\"\n if not os.path.exists(self.AI_pre):\n os.makedirs(self.AI_pre)\n if not os.path.exists(self.Not_AI_pre):\n os.makedirs(self.Not_AI_pre)\n self.filenames = []\n self.dataset_folder = dataset\n self.dataset = dict()\n for file in os.listdir(dataset):\n if '.txt' in file:\n self.filenames.append(file)", "def __init__(self, file):\n self.HTML = \"\"\n self.FILE = file\n self.load_data()", "def Template(Fenetre_largeur,Fenetre_hauteur):\r\n li= Select_ligne(\"Nombre de lignes: \",Fenetre_largeur,Fenetre_hauteur)\r\n nom=\"Template\"\r\n fich=\"Template\"\r\n version=0\r\n while Path(\"stages/\"+fich+\".txt\").is_file() == True:\r\n version+=1\r\n fich=nom+str(version)\r\n fichier=open(\"stages/\"+fich+\".txt\",'w')\r\n fichier.write(str(li))\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n for i in range(li):\r\n for j in range(10):\r\n fichier.write(\"0,0|\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"gauche: resistance, droite: bonus\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"resistance max: 3\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"6=barre+\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"7=score+\")\r\n fichier.close()", "def __init__(self, file_path, text_grid=None):\n # TODO\n check = ''\n if file_path is check and text_grid is None:\n text_grid = [\"..+..++\", \"++.B..+\", \".....++\", \"++.....\", \".T....+\"]\n elif file_path is check:\n self.text_grid = text_grid\n else:\n text_grid = self.open_grid(file_path).readlines()\n for i in range(len(text_grid)):\n text_grid[i] = text_grid[i][:-2]\n self.text_grid = text_grid\n self.width = len(self.text_grid[0])\n self.height = len(self.text_grid)\n self.dict = {\n 'NW': (-1, -1),\n 'N': (0, -1),\n 'NE': (1, -1),\n 'E': (1, 0),\n 'SE': (1, 1),\n 'S': (0, 1),\n 'SW': (-1, 1),\n 'W': (-1, 0)\n }\n self.map = [[0 for i in range(self.height)] for j in range(self.width)]\n for x in range(self.width):\n for y in range(self.height):\n if text_grid[y][x] == '.':\n self.map[x][y] = Node(True, x, y)\n continue\n if text_grid[y][x] == '+':\n self.map[x][y] = Node(False, x, y)\n continue\n if text_grid[y][x] == 'B':\n self.map[x][y] = Node(True, x, y)\n self.boat = Node(True, x, y)\n self.boat.in_path = True\n continue\n if text_grid[y][x] == 'T':\n self.map[x][y] = Node(True, x, y)\n self.treasure = Node(True, x, y)\n self.treasure.in_path = True\n continue\n pass", "def __init__(self, text):\n self.text = text\n self.train_vec = np.load('feat.npy')\n self.train_output = pickle.load(open('mylist.pkl', 'rb'))\n self.vec = pickle.load(open('vector.pkl', 'rb'))", "def __init__(self,filename=''):\n try:\n receiver_h,receiver_v = np.loadtxt(filename, unpack=True)\n except IOError:\n receiver_h = np.array([[900.,2000],[15.,15.]])\n receiver_v = np.array([[900.,2000],[15.,15.]])\n warnings.warn('Warning: Failed to load Receiver models, setting models to 15 K ')\n #Assume Provided models are a function of zenith angle & frequency\n T_H = fit.Spline1DFit(degree=1)\n T_V = fit.Spline1DFit(degree=1)\n T_H.fit(receiver_h[0],receiver_h[1])\n T_V.fit(receiver_v[0],receiver_v[1])\n self.rec = {}\n self.rec['HH'] = T_H # The HH and VV is a scape thing\n self.rec['VV'] = T_V" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cette fonction permet si le joueur le souhaite d'enregistrer le labyrinthe et de le charger à la prochaine partie. Elle enregistre le labyrinthe dans le dossier cartes situé dans le doccier de travail.
def enregistrer(self, dir_path, nom_du_fichier): path = dir_path + "/cartes/{}.txt".format(nom_du_fichier) with open(path,"w") as labyrinthe_en_cours : labyrinthe_en_cours.write(str(self))
[ "def enregistrementPickle(dicoDesValeurs,pfxPickle=\"\"):\n\tprint(\"ENREGISTREMENT EN FORMAT BINAIRE PICKLE\")\n\n\tdicoPourEnregistrer = {\n\t\t\t'lesAttributs':dicoDesAttributs,\n\t\t\t'lesValeurs':dicoDesValeurs,\n\t}\n\n\tif not os.path.exists(os.path.normpath(DIR_STRUCT)):\n\t\tos.makedirs(os.path.normpath(DIR_STRUCT))\n\n\tnomFichier = os.path.normpath(os.path.join(\n\t\t\tDIR_STRUCT,pfxPickle+NOM_PICKLE))\n\n\twith open(nomFichier, 'wb') as fichierPkl:\n\t pickle.dump(\n\t\t\t\tdicoPourEnregistrer,\n\t\t\t\tfichierPkl,\n\t\t\t\tprotocol=pickle.HIGHEST_PROTOCOL)\n\n\t# test de rechargement\n\tdicoReCharge = chargementPickle(pfxPickle,silencieux=True)\n\n\t# test d'intégrité\n\tassert(dicoReCharge['lesAttributs'] ==\n\t\tdicoPourEnregistrer['lesAttributs'])\n\n\tfor k1 in dicoReCharge['lesValeurs'].keys():\n\t\tfor k2 in dicoReCharge['lesValeurs'][k1].keys():\n\t\t\tif dicoReCharge['lesAttributs'][k2]['dimensions']>1:\n\t\t\t\tassert(numpy.all(dicoReCharge['lesValeurs'][k1][k2]==\n\t\t\t\t\t dicoPourEnregistrer['lesValeurs'][k1][k2]))\n\t\t\telse:\n\t\t\t\tassert(dicoReCharge['lesValeurs'][k1][k2]==\n\t\t\t\t\t dicoPourEnregistrer['lesValeurs'][k1][k2])", "def remplissageTableInstallation(nomFichier) :\n\taffichageMessage(\"remplissageTableInstallation start\")\n\n\tconnexion()\n\n\ttry :\n\t\tfichier = open(nomFichier,'r')\n\n\t\tlecteurCsv = csv.reader(fichier, delimiter=',', quotechar='\"')\n\n\t\tlist_already_added = []\n\n\t\tfor row in lecteurCsv:\n\t\t\tif(row[0]!=\"Nom usuel de l'installation\" and row[1] not in list_already_added):\n\t\t\t\tif(row[1]!=\"\"): \tnumero\t\t\t\t\t\t= row[1] #numero\n\t\t\t\telse: \t\t\t\tnumero\t\t\t\t\t\t= 'null'\n\t\t\t\tif(row[0]!=\"\"):\t\tnom \t\t\t\t\t\t= \"'\" + row[0].replace(\"'\",\"\") + \"'\" #nom\n\t\t\t\telse: \t\t\t\tnom\t\t\t\t\t\t\t= 'null'\n\t\t\t\tif(row[2]!=\"\"): \tville\t\t\t\t\t\t= \"'\" + row[2].replace(\"'\",\"\")+ \"'\"#ville\n\t\t\t\telse: \t\t\t\tville\t\t\t\t\t\t= 'null'\n\t\t\t\tif(row[6]!=\"\"):\t\tnumVoie\t\t\t\t\t\t= row[6].replace(\"'\",\"\")#numVoie pour adresse\n\t\t\t\telse:\t\t\t\tnumVoie\t\t\t\t\t\t= 'null'\n\t\t\t\tif(row[7]!=\"\"):\t\tnomVoie\t\t\t\t\t\t= row[7].replace(\"'\",\"\")#nomvoie pour adresse\n\t\t\t\telse:\t\t\t\tnomVoie\t\t\t\t\t\t= 'null'\n\t\t\t\tif(row[4]!=\"\"):\t\tcodePostal\t\t\t\t\t= \"'\" + row[4].replace(\"'\",\" \") + \"'\"\n\t\t\t\telse:\t\t\t\tcodePostal\t\t\t\t\t= 'null'\n\t\t\t\tif(row[10]!=\"\"):\tlatitude\t\t\t\t\t= row[10] \n\t\t\t\telse:\t\t\t\tlatitude \t\t\t\t\t= 'null'\n\t\t\t\tif(row[9]!=\"\"):\t\tlongitude \t\t\t\t\t= row[9] \n\t\t\t\telse:\t\t\t\tlongitude \t\t\t\t\t= 'null'\n\t\t\t\tif(row[12] !=\"\"):\taccesHandiSenso\t\t\t\t= \"'\" + row[12].replace(\"'\",\" \") + \"'\"\n\t\t\t\telse:\t\t\t\taccesHandiSenso\t\t\t\t= 'null'\n\t\t\t\tif(row[11] !=\"\"):\taccesHandiMot\t\t\t\t= \"'\" + row[11].replace(\"'\",\" \") + \"'\"\n\t\t\t\telse:\t\t\t\taccesHandiMot\t\t\t\t= 'null'\n\n\t\t\t\tif(numVoie!='null'): adresse = \"'\" + numVoie + \" \"\n\t\t\t\telse : adresse = \"'\"\n\t\t\t\tif(nomVoie!='null'):adresse = adresse + nomVoie + \"'\"\n\t\t\t\telse : adresse = 'null'\n\t\t\t\t\t\n\t\t\t\tlist_already_added.append(row[1])\n\t\t\t\trequete=\"INSERT INTO `Installation` VALUES (\" + numero + \",\" + Util_String.convertString(nom) + \",\" + adresse + \",\" + codePostal + \", \" + Util_String.convertString(ville) + \", \" + latitude + \", \" + longitude + \",\" + accesHandiSenso+ \",\" + accesHandiMot +\")\"\n\t\t\t\tcursor.execute(requete)\n\n\t\tdb.commit()\n\t\tdeconnexion()\n\n\texcept ConnexionException as e:\n\t\tprint(\"Connexion Exception : \"+e)\n\n\taffichageMessage(\"remplissageTableInstallation fini\")", "def add_to_cart(self):\r\n course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')\r\n CourseModeFactory(course_id=course.id)\r\n cart = Order.get_cart_for_user(self.user)\r\n PaidCourseRegistration.add_to_order(cart, course.id)", "def definir_partie(self):\r\n self.indice_joueur_courant = None\r\n self.phase = InterfaceGraphique.TROUVER_PREMIER\r\n self.lancer_passer_control_var = BooleanVar(value=False)\r\n self.choix = None\r\n\r\n # ---- Afficher la fenêtre des options ----\r\n afficheur_doptions = Parametres_partie(self)\r\n self.nombre_joueurs, self.as_sont_joker, self.nom_joueurs = afficheur_doptions.get_values_saved()\r\n self.nb_victoires = 0\r\n first = True\r\n self.joueurs = [JoueurInterface(self.frame_de_droit, nom, first) for nom in self.nom_joueurs]\r\n self.afficher_partie()\r\n self.jouer()", "def enregistrer_grille(self, numero_grille):\n self.clean()\n\n label = Label(self, text=\"Dans quel fichier voulez-vous enregistrer cette grille ?\")\n entry = Entry(self, textvariable=self.parent.sortie)\n bouton = Button(self, text=\"Choisir parmi les fichiers\",\n command=lambda: self.parent.entree.set(choisir_fichier(\"Choisir le fichier où écrire la grille\")))\n btn_lancement = Button(self, text=\"Lancer l'écriture\",\n command=lambda: self.parent.ecrire_grille(numero_grille))\n label.grid(column=0, row=0, columnspan=3)\n entry.grid(column=0, row=1, columnspan=2)\n bouton.grid(column=2, row=1)\n btn_lancement.grid(column=0, row=2, columnspan=3)", "def init_jeu():\n\n global jeu\n global memoire\n global liste_vehicules\n global score_nmouv\n global victoire\n score_nmouv = 0 # On remet à zéro le nombre de mouvements\n victoire = False # ON indique que le joureur n'a pas encore gagné\n ##------- Tableau de mémoire -------##\n memoire = []\n for ligne in range(1,7):\n transit = [] # Les cases de chaque ligne seront stockées dans \"transit\"\n for colonne in range(6): # Conception des cases d'une ligne\n transit.append(0)\n memoire.append(transit) # Ajout de la ligne à la liste principale\n\n # Vidage du plateau et des voitures\n jeu.delete(\"all\")\n liste_vehicules = []\n ##------- Création du plateau -------##\n jeu.create_rectangle(1, 1, 600, 600)\n for k in range(5):\n n = k+1\n jeu.create_line(n*c, 0, n*c, 600)\n jeu.create_line(0, n*c, 600, n*c)\n menu_fichier.entryconfig(\"Fermer le niveau\", state='disabled') # Comme aucun fichier n'est encore ouvert, on grise l'option pour fermer le niveau.", "def passer_au_suivant(self):\r\n\r\n # ---- Récupérer les données du DICTIONNAIRE ----\r\n index_premier_joueur = DICTIONNAIRE[\"PREMIER_JOUEUR\"]\r\n liste_joueur = DICTIONNAIRE[\"LISTE_JOUEUR\"]\r\n\r\n # ---- Établir une limite de lancer pour le prochain joueur ----\r\n premier_joueur = liste_joueur[index_premier_joueur]\r\n if self.nom_joueur == premier_joueur:\r\n DICTIONNAIRE[\"LIMITE_LANCER\"] = self.num_lancer\r\n # else:\r\n # DICTIONNAIRE[\"LIMITE_LANCER\"] = NB_MAX_LANCERS\r\n\r\n # ---- Déterminer le type de la combinaison ----\r\n if DICTIONNAIRE[\"AS_SONT_JOKER\"] == True:\r\n self.combinaison_actuelle = self.determiner_type_combinaison()\r\n else:\r\n self.combinaison_actuelle = self.determiner_type_combinaison_sans_as()\r\n\r\n # ---- Ajouter les données au DICTIONNAIRE ----\r\n DICTIONNAIRE[\"COMBINAISON{}\".format(self.nom_joueur)] = str(self.combinaison_actuelle)\r\n DICTIONNAIRE['VALEUR_COMBINAISON{}'.format(self.nom_joueur)] = self.evaluer_valeur_combinaison(\r\n str(self.combinaison_actuelle))\r\n\r\n # ---- Mettre à jour les données dans le DICTIONNAIRE ----\r\n if DICTIONNAIRE[\"NB_PARTIE_JOUEES\"] > 0:\r\n self.num_lancer = 0\r\n DICTIONNAIRE[\"NUM_LANCER\"] = self.num_lancer\r\n\r\n # ---- Redonner le contrôle à la fenêtre principale ----\r\n self.grab_release()\r\n self.parent.focus_set()\r\n self.destroy()", "def ajout_backup(product, substitute):\r\n\r\n print('\\n Voulez-vous enregistrer cette comparaison comme favori ?')\r\n print('1. Oui')\r\n print('2. Non')\r\n choix = user_choix_input(2)\r\n if choix == 1:\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"INSERT INTO Backup (produit_id, substitut_id) \\\r\n VALUES (%s,%s)\"\"\", (product.id[0], substitute.id[0]))\r\n conn.commit()\r\n print('Sauvegarde effectuée')\r\n elif choix == 2:\r\n print('Non sauvegardé')", "def profeCreaLabo(self):\n\t\tglobal ruta\n\t\tnumlabo= retornaNumLabo(ruta)\n\t\testudiantes= retornaEstudiantes(ruta)\n\t\tconn=sqlite3.connect(ruta)\n\t\tc= conn.cursor()\n\t\tfor estudiante in estudiantes:\n\t\t\tc.execute(\"INSERT INTO {} (LABORATORIO, ASISTENCIA, PREREPORTE, QUIZ, COTIDIANO, REPORTE) VALUES ({}, {}, {}, {}, {}, {})\".format(estudiante, numlabo, 0, 0, 0, 0, 0))\n\t\tnumlabo+=1\n\t\tc.execute(\"UPDATE {} SET NUMLABO = {} WHERE GUIA = {}\".format('info',numlabo , '\\'numlabo\\''))\n\t\tconn.commit()\n\t\tconn.close()\n\t\tself.popup.open()", "def ajoutCouleur(self):\n if self.testeCouleur() == 0:\n return # Renvoie le message de testeCouleur\n couleur = self.nomEntree.get()\n if len(couleur) > 2:\n self.dico[couleur] = self.codeEntree.get()\n self.echantillon.config(text=\"{}\\na été ajoutée au dictionnaire\".\n format(couleur), fg='white')\n else:\n self.echantillon.config(bg='white', text=\"Nom incorrect\")", "def nueva_partida(self, nro_vuelo):\n self.partidas.append(nro_vuelo)", "def __init__(self,nombre,idg,bpel=\"\"):\n # Valores por defecto de proyecto\n\n ## Nombre del proyecto (se emplea en la ruta)\n self.nombre = nombre\n\n ## Instancia del control del proyecto\n self.idg = idg\n\n ## Instancia del control de opciones\n self.opts = idg.opt\n\n ## Ruta del bpel original (si está especificado)\n self.bpel_o = path.abspath( bpel ) if bpel else \"\"\n\n # Variables internas, rutas, etc...\n self._set_vars()\n\n # Si se indica la ruta del bpel, debemos crear el proyecto \n # Si ya está creado, leemos su configuración\n if not bpel :\n self.cargar_proy()\n else:\n self.crear()\n self.guardar()\n self.idg.obtener_lista_proyectos()\n\n # Proyecto instrumentado o no\n self.inst = path.exists(self.bpr)\n\n # Comprueba que la estructura del proyecto está bien\n try:\n self.check()\n except (ProyectoRecuperable) as e:\n log.error(_(\"El proyecto se ha creado con errores: \" + str(e)))\n\n ## @name Listas\n ## @{\n\n ## Lista con los ficheros en el proyecto\n self.fichs = os.listdir( self.dir )\n ## Lista con los ficheros de casos de prueba (.bpts)\n self.fcasos = os.listdir( self.casos_dir )\n ## Lista con los ficheros de las trazas\n self.ftrazas= os.listdir( self.trazas_dir )\n ## Lista con los ficheros de invariantes\n self.finvr = os.listdir( self.invr_dir ) \n ## @}\n\n # Número de casos\n self.hay_casos = len(self.casos) > 0", "def registrar_entrada(placa):\r\n if placa in estacionamento:\r\n print(f\"O veículo com placa {placa} já está estacionado.\")\r\n elif len(estacionamento) >= NUM_VAGAS:\r\n print(\"O estacionamento está lotado.\")\r\n else:\r\n entrada = datetime.datetime.now()\r\n estacionamento[placa] = [entrada, None, None]", "def lance_reussite(mode, nb_cartes=32, affiche=False, nb_tas_max=2):\n\n\tchoix_pioche = True\n\n\t# on demande l'origine de la pioche tant que celle-ci n'est pas valide\n\twhile choix_pioche:\n\t\tdepuis_fichier = afficher_menu_qcm(\"Charger la pioche depuis un fichier ?\", \"Non\", \"Oui\")\n\n\t\t# si on charge la pioche depuis un fichier\n\t\tif depuis_fichier:\n\t\t\tnom_fichier = afficher_menu_fichier(\"Entrez le nom du fichier à charger\")\n\t\t\t# si ce fichier contient une pioche on le charge\n\t\t\tif fichier_pioche_valide(nom_fichier):\n\t\t\t\tpioche = init_pioche_fichier(nom_fichier)\n\n\t\t\t\t# si cette pioche est truquée on avertit le joueur\n\t\t\t\tif not verifier_pioche(pioche, nb_cartes):\n\t\t\t\t\tdire(\"Triche détectée ! La pioche est truquée !\")\n\t\t\t\t\tpioche_acceptee = afficher_menu_qcm(\"Voulez-vous continuer avec cette pioche ?\", \"Non\", \"Oui\")\n\n\t\t\t\t\tif pioche_acceptee:\n\t\t\t\t\t\tchoix_pioche = False\n\t\t\t\telse:\n\t\t\t\t\tchoix_pioche = False\n\t\t\telse:\n\t\t\t\tdire(\"Ce fichier ne contient pas de pioche valide !\")\n\t\telse:\n\t\t\t# sinon on génère une pioche aléatoire\n\t\t\tpioche = init_pioche_alea(nb_cartes)\n\t\t\tchoix_pioche = False\n\n\t# le jeu commence véritablement ici\n\tliste_tas = []\n\n\tif mode == \"auto\":\n\t\tliste_tas = reussite_mode_auto(pioche, affiche)\n\t\tdire(\"La partie est terminée !\")\n\t\tdire(\"Il reste {} tas.\".format(len(liste_tas)))\n\telif mode == \"manuel\":\n\t\tliste_tas = reussite_mode_manuel(pioche, nb_tas_max)\n\telse:\n\t\tdeboggue(\"Mode non reconnu !\")\n\n\t# enfin on demande de sauvegarder la pioche\n\tif not depuis_fichier:\n\t\tsauve_pioche = afficher_menu_qcm(\"Enregistrer la pioche ?\", \"Non\", \"Oui\")\n\n\t\tif sauve_pioche:\n\t\t\tnom_fichier = afficher_menu_fichier(\"Entrez le nom du fichier où enregistrer la pioche\", False)\n\t\t\tecrire_fichier_reussite(nom_fichier, pioche)\n\n\treturn liste_tas", "def ajouter_profil_casier(self, profil_casier):\n check_isinstance(profil_casier, ProfilCasier)\n if profil_casier.id in self.profils_casier:\n raise ExceptionCrue10(\"Le profil casier %s est déjà présent\" % profil_casier.id)\n self.profils_casier[profil_casier.id] = profil_casier", "def modosit(self) -> None:\n projekturlap.ProjektModositoUrlap(self._mb.winfo_toplevel(), self._kon)", "def manageAssuetudeActiviteProposeeForInstitution():", "def actualizarEntrada(self):\n\t\tglobal estudiante\n\t\tglobal ruta\n\t\tentradaProfe=\", \".join([str(symbol.data) for symbol in App.get_running_app().zbarcam.symbols])\n\t\tif entradaProfe in retornaEstudiantes(ruta):\n\t\t\testudiante=entradaProfe\n\t\t\tApp.get_running_app().refresh()\n\t\t\tself.parent.current = 'DisplayNotasProfe'\n\t\telse:\n\t\t\tself.popup.open()", "def test_add_with_default_mode(self):\r\n reg1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key, mode_slug=\"DNE\")\r\n\r\n self.assertEqual(reg1.unit_cost, 0)\r\n self.assertEqual(reg1.line_cost, 0)\r\n self.assertEqual(reg1.mode, \"honor\")\r\n self.assertEqual(reg1.user, self.user)\r\n self.assertEqual(reg1.status, \"cart\")\r\n self.assertEqual(self.cart.total_cost, 0)\r\n self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure the netcdf cc data handler operates correctly
def test_data_handling_nc_cc(): input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'), os.path.join(TEST_DATA_DIR, 'va_test.nc'), os.path.join(TEST_DATA_DIR, 'orog_test.nc'), os.path.join(TEST_DATA_DIR, 'zg_test.nc')] with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_lon = np.min(fh.lon.values) target = (min_lat, min_lon) plevel = fh.plev[-1] ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0)) va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0)) handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'], target=target, shape=(20, 20), val_split=0.0, worker_kwargs=dict(max_workers=1)) assert handler.data.shape == (20, 20, 20, 2) handler = DataHandlerNCforCC(input_files, features=[f'U_{int(plevel)}pa', f'V_{int(plevel)}pa'], target=target, shape=(20, 20), val_split=0.0, worker_kwargs=dict(max_workers=1)) if handler.invert_lat: handler.data = handler.data[::-1] assert handler.data.shape == (20, 20, 20, 2) assert np.allclose(ua, handler.data[..., 0]) assert np.allclose(va, handler.data[..., 1])
[ "def test_solar_cc():\n\n features = ['clearsky_ratio', 'rsds', 'clearsky_ghi']\n input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')]\n nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5')\n\n with xr.open_mfdataset(input_files) as fh:\n min_lat = np.min(fh.lat.values)\n min_lon = np.min(fh.lon.values) - 360\n target = (min_lat, min_lon)\n shape = (len(fh.lat.values), len(fh.lon.values))\n\n with pytest.raises(AssertionError):\n handler = DataHandlerNCforCC(input_files, features=features,\n target=target, shape=shape,\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n handler = DataHandlerNCforCC(input_files, features=features,\n nsrdb_source_fp=nsrdb_source_fp,\n target=target, shape=shape,\n temporal_slice=slice(0, 1),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n cs_ratio = handler.data[..., 0]\n ghi = handler.data[..., 1]\n cs_ghi = handler.data[..., 2]\n cs_ratio_truth = ghi / cs_ghi\n\n assert cs_ratio.max() < 1\n assert cs_ratio.min() > 0\n assert (ghi < cs_ghi).all()\n assert np.allclose(cs_ratio, cs_ratio_truth)\n\n with Resource(nsrdb_source_fp) as res:\n meta = res.meta\n tree = KDTree(meta[['latitude', 'longitude']])\n cs_ghi_true = res['clearsky_ghi']\n\n # check a few sites against NSRDB source file\n for i in range(4):\n for j in range(4):\n test_coord = handler.lat_lon[i, j]\n _, inn = tree.query(test_coord)\n\n assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j])", "def testNetCDF(self, path):\n if os.path.isfile(path):\n try:\n # Also test for netCDF version here?\n self.dataset_handle = ncpy.Dataset(path, \"r\")\n self.dataset_file = path\n self.ncVars = self.getncVars(path)\n if DEBUG: print(path + \" is netCDF\")\n # if 1:\n if DEBUG:\n print(path + \" is netCDF\")\n print(self.dataset_handle.dimensions.keys())\n for key in self.dataset_handle.dimensions.keys():\n print(self.dataset_handle.dimensions[key])\n print(self.dataset_handle.variables.keys())\n for key in self.dataset_handle.variables.keys():\n var = self.dataset_handle.variables[key]\n print(key, var)\n print(var[:])\n return True\n except AttributeError as e: print(e)\n except RuntimeError as e: print(e)\n except:\n print(sys.exc_info()[0])\n return False", "def read_netcdf(self,filename):", "def sanity_check_step(self):\n\n incs = [\"netcdf.h\"]\n libs = [\"libnetcdf.so\", \"libnetcdf.a\"]\n # since v4.2, the non-C libraries have been split off in seperate extensions_step\n # see netCDF-Fortran and netCDF-C++\n if LooseVersion(self.version) < LooseVersion(\"4.2\"):\n incs += [\"netcdf%s\" % x for x in [\"cpp.h\", \".hh\", \".inc\", \".mod\"]] + \\\n [\"ncvalues.h\", \"typesizes.mod\"]\n libs += [\"libnetcdf_c++.so\", \"libnetcdff.so\",\n \"libnetcdf_c++.a\", \"libnetcdff.a\"]\n\n custom_paths = {\n 'files': [\"bin/nc%s\" % x for x in [\"-config\", \"copy\", \"dump\",\n \"gen\", \"gen3\"]] +\n [\"lib/%s\" % x for x in libs] +\n [\"include/%s\" % x for x in incs],\n 'dirs': []\n }\n\n super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)", "def nocd_ncfile_decode(filename):\n ncf = netcdf_file(filename)\n data = {}\n data['date_ref'] = datetime.strptime(string.join(ncf.variables['reference_date_time'][:],''),\"%Y%m%d%H%M%S\")\n data['date_creation'] = datetime.strptime(string.join(ncf.variables['date_update'][:],''),\"%Y%m%d%H%M%S\")\n data['date_update'] = datetime.strptime(string.join(ncf.variables['date_update'][:],''),\"%Y%m%d%H%M%S\")\n data['platform'] = int(string.join(ncf.variables['platform_number'],''))\n data['cycle'] = ncf.variables['cycle_number'][0]\n data['data_centre'] = string.join(ncf.variables['data_centre'][:],'').strip() #2\n data['dc_reference'] = string.join(ncf.variables['dc_reference'][:],'').strip() #32\n data['data_mode'] = string.join(ncf.variables['data_mode'][:],'').strip() #1\n data['datetime'] = data['date_ref']+timedelta(days=int(ncf.variables['juld'][:]))\n data['datetime_qc'] = int(string.join(ncf.variables['juld_qc'],''))\n data['inst_reference'] = string.join(ncf.variables['inst_reference'][:],'').strip() #64\n data['latitude'] = ncf.variables['latitude'][0]\n data['longitude'] = ncf.variables['longitude'][0]\n data['position_qc'] = int(string.join(ncf.variables['position_qc'],''))\n for k in ['pressure', 'pressure_adjusted']:\n if k in ncf.variables.keys():\n data[k] = ma.array(ncf.variables[k][:]) #,dataset.PRES._FillValue)\n data[k+'_qc'] = ma.array(ncf.variables[k+'_qc'][:,0], dtype='i')\n for k in ['temperature', 'temperature_adjusted', 'salinity', 'salinity_adjusted']:\n if k in ncf.variables.keys():\n data[k] = ma.array(ncf.variables[k][0,:,0,0])\n data[k+'_qc'] = ma.array(ncf.variables[k+'_qc'][:,0], dtype='i')\n #data['salinity_adjusted_error'] = ma.masked_values(dataset.PSAL_ADJUSTED_ERROR[:],dataset.PSAL_ADJUSTED_ERROR._FillValue)\n return data", "def write_ncfile(exp_name, data): # may add exp_type\n print ('*** SUCCESS writing data into '+exp_name+'.nc!')\n return", "def check_for_ncview_warnings(ds):\n fhnc = tempfile.NamedTemporaryFile(delete=False, suffix=\".nc\")\n encoding = build_valid_encoding(ds=ds)\n ds.to_netcdf(fhnc, encoding=encoding)\n fhnc.close()\n\n nc_filename = fhnc.name\n\n call = f\"ncview {nc_filename}\"\n p = subprocess.Popen(\n call.split(\" \"),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n\n def get_cpu_usage():\n # watch the ncview process for a second and see how much cpu it's using\n return psutil.Process(p.pid).cpu_percent(interval=1.0)\n\n # wait until ncview has finished loading\n while get_cpu_usage() > 0.1:\n continue\n\n # then kill it\n p.send_signal(signal.SIGINT)\n\n # appears ncview writes all its output to stderr...\n _, stderr = p.communicate()\n\n if any([s in stderr for s in PROBLEM_STRINGS]):\n raise Exception(f\"ncview is not happy with the provided dataset: {stderr}\")", "def test_WCSGetCoverageNetCDF3_testdatanc(self):\n AdagucTestTools().cleanTempDir()\n status, data, headers = AdagucTestTools().runADAGUCServer(\"source=testdata.nc&SERVICE=WCS&REQUEST=GetCoverage&COVERAGE=testdata&CRS=EPSG%3A4326&FORMAT=NetCDF3&BBOX=-180,-90,180,90&RESX=1&RESY=1\",\n env=self.env, args=[\"--report\"])\n self.assertEqual(status, 0)\n self.assertEqual(data.getvalue()[0:10],\n b'CDF\\x02\\x00\\x00\\x00\\x00\\x00\\x00')", "def code_sample():\r\n\r\n # open data file\r\n dirMU = \"D:\\Projects\\FIDUCEO\\Data\\Simulated\\m02_n15.nc\"\r\n rootgrp = Dataset(dirMU, 'r')\r\n\r\n # access required data (first 100 match-ups for this example)\r\n u_raw = rootgrp.variables['cal_BB_Ur'][:100, :]\r\n times = rootgrp.variables['CorrIndexArray'][:100]\r\n corrData = rootgrp.variables['corrData'][:100]\r\n\r\n rootgrp.close()\r\n\r\n # run function\r\n C_ICT_err, C_ICT_raw_err = calc_CC_err(u_raw, times, corrData)\r\n\r\n return 0", "def save_ecco_dataset_to_netcdf(ecco_ds,\n output_dir,\n dataset_name = 'by_variable',\n time_method = 'by_record',\n output_array_precision = np.float32,\n output_freq_code=None):\n\n\n # Create a name of the files if not specified\n # ---------------------------------------------\n if dataset_name =='by_variable':\n # concat all data variables together into a single string\n dataset_name = '_'.join(list(ecco_ds.data_vars))\n\n\n # force load coordinate values in case they are in dask array\n # -----------------------------------------------------------\n for coord in ecco_ds.coords:\n ecco_ds[coord].load()\n\n\n # Define fill values for NaN\n # ---------------------------------------------\n if output_array_precision == np.float32:\n netcdf_fill_value = nc4.default_fillvals['f4']\n\n elif output_array_precision == np.float64:\n netcdf_fill_value = nc4.default_fillvals['f8']\n\n\n # Create NetCDF encoding directives\n # ---------------------------------------------\n print('\\n... creating variable encodings')\n # ... data variable encoding directives\n dv_encoding = dict()\n for dv in ecco_ds.data_vars:\n dv_encoding[dv] = {'zlib':True, \\\n 'complevel':5,\\\n 'shuffle':True,\\\n '_FillValue':netcdf_fill_value}\n\n # ... coordinate encoding directives\n print('\\n... creating coordinate encodings')\n coord_encoding = dict()\n for coord in ecco_ds.coords:\n # set default no fill value for coordinate\n if output_array_precision == np.float32:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float32'}\n elif output_array_precision == np.float64:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float64'}\n\n # force 64 bit ints to be 32 bit ints\n if (ecco_ds[coord].values.dtype == np.int32) or \\\n (ecco_ds[coord].values.dtype == np.int64) :\n coord_encoding[coord]['dtype'] ='int32'\n\n # fix encoding of time\n if coord == 'time' or coord == 'time_bnds':\n coord_encoding[coord]['dtype'] ='int32'\n\n if 'units' in ecco_ds[coord].attrs:\n # apply units as encoding for time\n coord_encoding[coord]['units'] = ecco_ds[coord].attrs['units']\n # delete from the attributes list\n del ecco_ds[coord].attrs['units']\n\n elif coord == 'time_step':\n coord_encoding[coord]['dtype'] ='int32'\n\n # ... combined data variable and coordinate encoding directives\n encoding = {**dv_encoding, **coord_encoding}\n\n\n # Create directory for output files\n # ---------------------------------------------\n filepath = output_dir / dataset_name\n\n if not filepath.exists():\n filepath.mkdir(parents=True, exist_ok=True)\n\n\n # Determine output freqency code.\n # ---------------------------------------------\n # user can specify directory or it can be found if the dataset\n # has the 'time_coverage_resolution' global attribute\n if output_freq_code == None:\n if 'time_coverage_resolution' in ecco_ds.attrs:\n\n print('dataset time averaging from metadata')\n time_coverage_resolution = ecco_ds.attrs['time_coverage_resolution']\n if time_coverage_resolution == 'P1M':\n output_freq_code='AVG_MON'\n elif time_coverage_resolution == 'P1D':\n output_freq_code='AVG_DAY'\n elif time_coverage_resolution == 'P0S':\n output_freq_code='SNAP'\n else:\n print('output_freq_code not defined and not available in dataset metadata')\n print('... using full record time in filename')\n\n\n # Write records to disk as NetCDF\n # ---------------------------------------------\n # one file per time level\n\n if time_method == 'by_record':\n for time_i, rec_time in enumerate(ecco_ds.time):\n\n cur_ds = ecco_ds.isel(time=time_i)\n\n # cast data variables to desired precision (if necessary)\n #for data_var in cur_ds.data_vars:\n # if cur_ds[data_var].values.dtype != output_array_precision:\n # cur_ds[data_var].values = cur_ds[data_var].astype(output_array_precision)\n\n time_date_info =\\\n make_date_str_from_dt64(cur_ds.time.values, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' + time_date_info['short'] +\\\n '_' + time_date_info['ppp_tttt'] + '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()\n\n # one file per year\n elif time_method == 'by_year':\n unique_years = np.unique(ecco_ds.time.dt.year)\n print(unique_years)\n\n for year in unique_years:\n # pull out only records for this year\n cur_ds = ecco_ds.sel(time=slice(str(year), str(year)))\n\n first_time = cur_ds.time.values[0]\n last_time = cur_ds.time.values[-1]\n\n first_time_date_info =\\\n make_date_str_from_dt64(first_time, output_freq_code)\n\n last_time_date_info =\\\n make_date_str_from_dt64(last_time, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' +\\\n first_time_date_info['short'] + '_' +\\\n last_time_date_info['short'] + '_' +\\\n first_time_date_info['ppp_tttt']+ '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()", "def _write_nc(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n from netCDF4 import Dataset\n grid_nc = Dataset(FN, 'w', format='NETCDF4')\n grid_nc.createDimension('one', 1)\n grid_nc.createDimension('n_cartesian', 3)\n grid_nc.createDimension('n_points', n_points)\n grid_nc.createVariable('origin', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('counts', 'i8', ('one', 'n_cartesian'))\n grid_nc.createVariable('spacing', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('vals', 'f8', ('one', 'n_points'), zlib=True)\n for key in data.keys():\n grid_nc.variables[key][:] = data[key]\n grid_nc.close()", "def NC_CommonVariables(fn_nc, time_list, np):\r\n import netCDF4\r\n #time\r\n times = fn_nc.createVariable('time', np.float64, ('time',))\r\n #variable attributes\r\n times.type = 'float64'\r\n times.units = 'seconds since 1970-01-01 00:00:00 UTC'\r\n times.standard_name = 'time'\r\n times.long_name = 'Time (seconds since 1970-01-01 00:00:00)'\r\n times.axis = 'T'\r\n times.valid_min = np.float64(netCDF4.date2num(min(time_list),units=times.units))\r\n times.valid_max = np.float64(netCDF4.date2num(max(time_list),units=times.units))\r\n times.calendar = 'standard'\r\n #write data\r\n times[:] = np.float64(netCDF4.date2num(time_list.to_list(),units=times.units))\r\n \r\n #year\r\n years = fn_nc.createVariable('year', np.int32, ('time',))\r\n #variable attributes\r\n years.type = 'int32'\r\n years.units = '1'\r\n years.long_name = 'Year'\r\n years.valid_min = np.int32(min(time_list).year)\r\n years.valid_max = np.int32(max(time_list).year) \r\n #write data\r\n years[:] = np.int32(time_list.year.to_numpy())\r\n\r\n #month\r\n months = fn_nc.createVariable('month', np.int32, ('time',))\r\n #variable attributes\r\n months.type = 'int32'\r\n months.units = '1'\r\n months.long_name = 'Month'\r\n months.valid_min = np.int32(min(time_list).month)\r\n months.valid_max = np.int32(max(time_list).month) \r\n #write data\r\n months[:] = np.int32(time_list.month.to_numpy())\r\n \r\n #day\r\n days = fn_nc.createVariable('day', np.int32, ('time',))\r\n #variable attributes\r\n days.type = 'int32'\r\n days.units = '1'\r\n days.long_name = 'Day'\r\n days.valid_min = np.int32(min(time_list).day)\r\n days.valid_max = np.int32(max(time_list).day)\r\n #write data\r\n days[:] = np.int32(np.int32(time_list.day.to_numpy()))\r\n \r\n #hour\r\n hours = fn_nc.createVariable('hour', np.int32, ('time',))\r\n #variable attributes\r\n hours.type = 'int32'\r\n hours.units = '1'\r\n hours.long_name = 'Hour'\r\n hours.valid_min = np.int32(min(time_list).hour)\r\n hours.valid_max = np.int32(max(time_list).hour) \r\n #write data\r\n hours[:] = np.int32(time_list.hour.to_numpy())\r\n \r\n #minute\r\n minutes = fn_nc.createVariable('minute', np.int32, ('time',))\r\n #variable attributes\r\n minutes.type = 'int32'\r\n minutes.units = '1'\r\n minutes.long_name = 'Minute'\r\n minutes.valid_min = np.int32(min(time_list).minute)\r\n minutes.valid_max = np.int32(max(time_list).minute) \r\n #write data\r\n minutes[:] = np.int32(time_list.minute.to_numpy())\r\n \r\n #second\r\n seconds = fn_nc.createVariable('second', np.float32, ('time',))\r\n #variable attributes\r\n seconds.type = 'float32'\r\n seconds.units = '1'\r\n seconds.long_name = 'Second'\r\n seconds.valid_min = np.float32(min(time_list).second)\r\n seconds.valid_max = np.float32(max(time_list).second) \r\n #write data\r\n seconds[:] = np.int32(time_list.second.to_numpy())\r\n \r\n #doy\r\n doys = fn_nc.createVariable('day_of_year', np.float32, ('time',))\r\n all_doys = np.float32(np.asarray([time_list[i].timetuple().tm_yday for i in range(0,len(time_list))]))\r\n #variable attributes\r\n doys.type = 'float32'\r\n doys.units = '1'\r\n doys.long_name = 'Day of Year'\r\n doys.valid_min = np.float32(min(all_doys))\r\n doys.valid_max = np.float32(max(all_doys))\r\n #write data\r\n doys[:] = np.float32(np.asarray([time_list[i].timetuple().tm_yday for i in range(0,len(time_list))]))\r\n \r\n lats = fn_nc.createVariable('latitude', np.float32, ('latitude',))\r\n #variable attributes\r\n lats.type = 'float32'\r\n lats.units = 'degree_north'\r\n lats.long_name = 'Latitude'\r\n lats[:] = [72.572962]\r\n \r\n lons = fn_nc.createVariable('longitude', np.float32, ('longitude',))\r\n #variable attributes\r\n lons.type = 'float32'\r\n lons.units = 'degree_east'\r\n lons.long_name = 'Longitude'\r\n lons[:] = [-38.470361]\r\n \r\n return", "def check_netcdf_file():\n # check the model file and extract necessary information\n # must be in the argument list\n if NETCDF_FILE_NAME is None:\n print('[ERROR] the netCDF model file name is required', flush=True)\n usage_csv()\n sys.exit(1)\n\n # user may provide full path\n elif os.path.isfile(NETCDF_FILE_NAME):\n model_file_name = NETCDF_FILE_NAME\n base_file_name, ext = os.path.splitext(model_file_name)\n\n # user may place it under the data directory\n elif os.path.isfile(os.path.join(DATA_DIR, NETCDF_FILE_NAME)):\n model_file_name = os.path.join(DATA_DIR, NETCDF_FILE_NAME)\n base_file_name, ext = os.path.splitext(model_file_name)\n\n # could not find the file\n else:\n print('[ERROR] could not find the netCDF model file {}'.format(NETCDF_FILE_NAME), flush=True)\n usage_csv()\n sys.exit(1)\n\n return model_file_name, base_file_name", "def test_WCSGetCoverageNetCDF4_testdatanc(self):\n AdagucTestTools().cleanTempDir()\n status, data, headers = AdagucTestTools().runADAGUCServer(\"source=testdata.nc&SERVICE=WCS&REQUEST=GetCoverage&COVERAGE=testdata&CRS=EPSG%3A4326&FORMAT=NetCDF4&BBOX=-180,-90,180,90&RESX=1&RESY=1\",\n env=self.env, args=[\"--report\"])\n self.assertEqual(status, 0)\n self.assertEqual(data.getvalue()[0:6], b'\\x89HDF\\r\\n')", "def read_netcdf(self):\n logging.info('Start reading from NetCDf file...')\n\n with Dataset(self.cdf_file, 'r') as ncfile:\n\n # NetCDF order is [t, species, ky, kx, theta, r]\n if self.theta_idx == None:\n self.field = np.array(ncfile.variables[self.in_field]\n [self.time_range[0]:self.time_range[1],\n self.spec_idx,:,:,:])\n else:\n self.field = np.array(ncfile.variables[self.in_field]\n [self.time_range[0]:self.time_range[1],\n self.spec_idx,:,:,\n self.theta_idx[0]:self.theta_idx[1],\n :])\n self.t = np.array(ncfile.variables['t'][self.time_range[0]:\n self.time_range[1]])\n\n\n self.field = np.squeeze(self.field)\n self.field = np.swapaxes(self.field, 1, 2)\n if len(self.field.shape) < 5:\n self.field = self.field[:,:,:,np.newaxis,:]\n\n self.drho_dpsi = float(ncfile.variables['drhodpsi'][:])\n self.kx = np.array(ncfile.variables['kx'][:])/self.drho_dpsi\n self.ky = np.array(ncfile.variables['ky'][:])/self.drho_dpsi\n self.theta = np.array(ncfile.variables['theta'][:])\n self.gradpar = np.array(ncfile.variables['gradpar'][:])/self.amin\n self.r_prime = np.array(ncfile.variables['Rprime'][:])\n try:\n self.bpol = np.array(ncfile.variables['bpol'][:])*self.bref\n except KeyError:\n self.bpol = self.geometry[:,7]*self.bref\n\n logging.info('Finished reading from NetCDf file.')", "def verify_netcdf(extents_dir, out_ncfile):\n netcdf_old=out_ncfile #'/g/data/fk4/wofs/water_f7q/extents/149_-036/LS_WATER_149_-036_1987-05-22T23-08-20.154_2014-03-28T23-47-03.171.nc'\n\n tiles = [make_tileinfo(filename) for filename in glob(os.path.join(extents_dir, '*.tif'))]\n tiles.sort(key=lambda t: t.datetime)\n\n with netCDF4.Dataset(netcdf_old) as nco:\n for i in range(0,len(tiles)):\n print nco['time'][i]\n print tiles[i]\n with rasterio.open(tiles[i].filename) as tile_data:\n print \"Any difference? \" \n print numpy.sum(nco['Data'][:,:,i])\n print numpy.sum(tile_data.read(1))\n\n print type(nco['Data'][:,:,i]), type(tile_data.read(1))\n print nco['Data'][:,:,i].shape, tile_data.read(1).shape\n \n print numpy.sum(nco['Data'][:,:,i] - tile_data.read(1)[:,:])\n #print tile_data.read(1)[0:100,0:100] \n\n #print (nco['Data'][:,:,i] == tile_data.read(1)).all()", "def cl_file(tmp_path):\n nc_path = os.path.join(tmp_path, 'cesm2_waccm_cl.nc')\n dataset = Dataset(nc_path, mode='w')\n dataset.createDimension('lev', size=2)\n dataset.createDimension('bnds', size=2)\n\n # Dimensional variables\n dataset.createVariable('lev', np.float64, dimensions=('lev',))\n dataset.createVariable('lev_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['lev'][:] = [1.0, 2.0]\n dataset.variables['lev'].bounds = 'lev_bnds'\n dataset.variables['lev'].units = '1'\n dataset.variables['lev_bnds'][:] = [[0.5, 1.5], [1.5, 3.0]]\n dataset.variables['lev_bnds'].standard_name = (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n dataset.variables['lev_bnds'].units = '1'\n dataset.variables['lev_bnds'].formula_terms = (\n 'p0: p0 a: a_bnds b: b_bnds ps: ps')\n\n # Coordinates for derivation of pressure coordinate\n dataset.createVariable('a', np.float64, dimensions=('lev',))\n dataset.createVariable('a_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.createVariable('b', np.float64, dimensions=('lev',))\n dataset.createVariable('b_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['a'][:] = [1.0, 2.0]\n dataset.variables['a'].bounds = 'a_bnds'\n dataset.variables['a_bnds'][:] = [[1.5, 0.0], [3.0, 1.5]]\n dataset.variables['b'][:] = [0.0, 1.0]\n dataset.variables['b'].bounds = 'b_bnds'\n dataset.variables['b_bnds'][:] = [[0.5, -1.0], [2.0, 0.5]]\n\n dataset.close()\n return nc_path", "def main():\n\n # Read control file and assign values\n import sys\n control_file = None if len(sys.argv) != 2 else sys.argv[1]\n if control_file is not None:\n control = cfg.read_yaml(control_file)\n logging.info(\"Control File: \" + str(control_file) + \" successfully read\")\n else:\n raise RuntimeError(\"A control file must be provided for camps_metar_to_nc. Exiting program.\")\n\n # Read contents of control file\n date_range = control['date_range']\n input_data = control['input_data']\n output = control['output']\n debug_level = control['debug_level']\n log_file = control['log_file']\n err_file = control['err_file']\n station_list = control['station_list']\n station_table = control['station_table']\n qc_flag = control['qc_flag']\n pickle = control['pickle']\n\n num_procs = control['num_processors']\n os.environ['NUM_PROCS'] = str(num_procs)\n num_procs = check_num_procs(num_procs)\n\n if log_file:\n out_log = open(log_file, 'w+')\n sys.stdout = out_log\n sys.stderr = out_log\n\n try:\n logging.getLogger('').handlers = []\n level = logging.getLevelName(debug_level)\n logging.basicConfig(level=level)\n except:\n print(\"Logging setup failed\")\n raise\n\n logging.info(\"Starting main\")\n\n dates = util.generate_date_range(date_range)\n stn_lst = util.read_station_list(station_list)\n stn_lst,stn_tbl = util.read_station_table(station_table,stn_lst)\n\n # This will read the CSV files and put them into stations\n reader = read_obs(input_data,dates,stn_tbl,stn_lst,qc_flag)\n\n # Convert all arrays to numpy arrays\n logging.info(\"Converting station arrays to numpy arrays\")\n stations = reader.station_list\n stations = convert_to_numpy(stations)\n fix_rounding_errors(stations)\n\n # Optionally pickle and save\n if pickle:\n logging.info(\"Pickling\")\n save_object(stations, 'stations.pkl')\n\n # Check if QC is to be performed\n if control.qc_flag:\n stations = qc_main.qc(stations,err_file)\n # Take off the start and end times from the arrays\n remove_time_buffer(stations)\n\n # Sort stations by station name\n stations = OrderedDict(sorted(stations.items()))\n\n # Create Output filename\n filename = output\n\n dimensions = cfg.read_dimensions()\n num_stations = dimensions['nstations']\n time_dim = dimensions['time']\n\n # Format each observation into a 2D array with\n # dimensions of # of stations and # of times\n if pickle:\n logging.info(\"Pickling\")\n save_object(stations, 'postqc.pkl')\n logging.info(\"Construct 2D observation arrays\")\n camps_data = []\n example_station = list(stations.values())[0]\n obs = list(example_station.observations.keys())\n obs.remove('TIME')\n start_time = dates[0]\n end_time = dates[-1]\n logging.info(\"start time: \" + start_time)\n logging.info(\"end time: \" + end_time)\n\n met_to_nc = cfg.read_metar_nc_lookup()\n for metar_name in obs:\n # Set the observation name to the standard CAMPS name\n try:\n observation_name = met_to_nc[metar_name]\n except:\n logging.error(\"Cannot find the netcdf equivalent of \" +\n metar_name +\n \"in METAR lookup table. Skipping.\")\n continue\n\n # Loop through the stations and stitch together the current observation\n temp_obs = []\n for station_name, cur_station in stations.items():\n if 'latitude' in observation_name:\n temp_obs.append(list(np.repeat(stn_tbl[station_name]['lat'],len(dates))))\n elif 'longitude' in observation_name:\n temp_obs.append(list(np.repeat(stn_tbl[station_name]['lon'],len(dates))))\n else:\n temp_obs.append(cur_station.get_obs(metar_name))\n obs_data = np.array(temp_obs)\n logging.info(observation_name)\n\n # Construct Camps data object\n camps_obj = Camps_data(observation_name)\n try:\n camps_obj.metadata['vertical_coord'] = camps_obj.metadata.pop('coordinates')\n except:\n pass\n if camps_obj.is_feature_of_interest() and len(obs_data.shape)>1:\n obs_data = obs_data[:,0]\n camps_obj.set_dimensions((num_stations,))\n else:\n camps_obj.set_dimensions((time_dim,num_stations))\n camps_obj.add_data(obs_data)\n camps_obj.add_source('METAR')\n camps_obj.add_process('DecodeBUFR')\n if qc_flag: camps_obj.add_process('METARQC')\n camps_obj.change_data_type()\n\n # Again check for time bounds, pass extra info to add_time if\n # there are time bounds\n if not camps_obj.is_feature_of_interest():\n if camps_obj.has_time_bounds():\n hours = camps_obj.properties['hours']\n camps_obj.metadata['hours'] = hours\n camps_obj.time = add_time(start_time, end_time, time_bounds=hours)\n else:\n camps_obj.time = add_time(start_time, end_time)\n\n # Transpose the array and swap dimension names. Note that this may be a\n # temporary solution.\n if len(camps_obj.data.shape)>1:\n camps_obj.data = np.transpose(camps_obj.data)\n\n camps_data.append(camps_obj)\n\n camps_obj = pack_station_names(list(stations.keys()))\n camps_obj.add_source('METAR')\n camps_data.append(camps_obj)\n\n if qc_flag:\n extra_globals = {\"source\": \"Data from METAR with MDL Quality Control\"}\n else:\n extra_globals = {\"source\": \"Data from METAR (No MDL Quality Control)\"}\n\n # TEMPORARY: Need to perform 2 actions here. We should do this elsewhere, but here for now...\n #\n # 1) Unscale precip obs. Precip obs in MDL hourly table are units of hundreths of inches\n # (i.e. 1.00 inches is 100).\n # 2) Trace amounts in the MDL hourly table are coded as -4. Here we need to set these\n # to a \"defined\" trace amount as float of value 0.004.\n for c in camps_data:\n if \"precipitation_amount\" in c.standard_name:\n c.data = np.where(np.logical_and(c.data>=0.0,c.data<9999.),c.data/100.0,c.data)\n c.data = np.where(c.data==-4,np.float32(0.004),c.data)\n # TEMPORARY\n\n # Write into netCDF4 file\n writer.write(camps_data, filename, extra_globals)\n if log_file:\n out_log.close()", "def repair_netcdf(fname):\n\n\t# ========== Set the path and the file name ==========\n\t# fname = \"%s_%s_%s_r1i1p1_%s_1950_2050_%s_regrid.nc\" %(var, model, sen, units, sen)\n\tfout = \"%s_setgrid\" % (fname)\n\n\t\n\t# ========== Create a list of files to cleanup ==========\n\tcleanup = []\n\n\t# ========== Check if the file exists ==========\n\tif not os.path.isfile(fname+\".nc\"):\n\t\t# check if the file exists with a different name\n\t\traise IOError(\"WARNING: The file %s cannot be found\"% fname)\n\n\t\n\t# ========== Read longitude from NC file ==========\n\tfh = Dataset(fname+\".nc\", mode='r')\n\ttry:\n\t\tlon = fh.variables['longitude'][:]\n\texcept:\n\t\ttry:\n\t\t\tlon = fh.variables['lon'][:]\n\t\texcept:\n\t\t\tlon = fh.variables['easting'][:] #easting\n\n\n\n\n\t# ========== Create a new grid ==========\n\t# Save the current grid\n\tsubp.call(\"cdo griddes %s.nc > %sGriddes\" % (fname, fname), shell=True)\n\t# add the griddes to the cleanup \n\tcleanup.append(\"%sGriddes\" % fname)\n\n\t# open the current grid\n\tgfile = open(\"%sGriddes\" % fname, \"r\") \n\t# Split the lines of the grid file\n\tginfo = gfile.read().splitlines()\n\t\n\t#Some models have no lat/lon bounds, skip in this case and copy\n\t#\"regrid\" file as \"setgrid\"\n\tif not (any([n.startswith(\"xbounds\") for n in ginfo]) and \n\t\t any([n.startswith(\"ybounds\") for n in ginfo])):\n\t\tsubp.call(\"cp %s.nc %s.nc\" % (fname, fout), shell=True)\n\t\tcleanup.append(\"%s.nc\" % fname)\n\t\treturn cleanup\t\n\t\n\t# Check and see if the start is known\n\tif (\n\t\tany([n.startswith(\"xfirst\") for n in ginfo])\n\t\t) and (\n\t\tany([n.startswith(\"xinc\") for n in ginfo])\n\t\t):\n\t\taddxdet = False\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\"]\n\telse:\n\t\taddxdet = True\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\", \"xfirst\", \"xinc\"]\n\n\t# Create list to hold the new grid details\n\tnew_grid = []\n\n\tfor ginf in ginfo:\n\t\ttest = []\n\t\tfor be in badel:\n\t\t\tif ginf.startswith(be):\n\t\t\t\ttest.append(False)\n\t\t\telif ginf == \"#\":\n\t\t\t\ttest.append(False)\n\t\t\telse:\n\t\t\t\ttest.append(True)\n\t\t\n\t\tif all(test):\n\t\t\tnew_grid.append(ginf)\n\t# Add the additional x variables\n\tif addxdet:\n\t\t# work out the model from the fname\n\t\tmodel = fname.split(\"/\")[-2]\n\t\tnew_grid.append('xfirst = -180')\n\t\tnew_grid.append('xinc = %s' % str(\n\t\t\tfloat(lon) ))\n\t\n\n\t# Check the y values, if they are missing use the ones in the original grid file\n\tif not (any([n.startswith(\"yfirst\") for n in ginfo])):\n\t\t# print (\"Seting the y bounds\")\n\t\tvals = []\n\t\tfor glov in range(0,len(ginfo)):\n\t\t\tif ginfo[glov].startswith(\"yvals\"):\n\t\t\t\tvals.append(glov)\n\t\t\telif ginfo[glov].startswith(\"ybounds\"):\n\t\t\t\tvals.append(glov)\n\t\tif len (vals) == 2:\n\t\t\tfor yv in ginfo[vals[0]:vals[1]]:\n\t\t\t\tnew_grid.append(yv)\n\n\t\telse:\n\t\t\tprint(\"\\n\")\n\t\t\traise IndexError(\"Bounding is incorrect\")\n\n\t# Save the grid out\n\tnewgrid = save_grid(fname, new_grid)\n\tcleanup.append(newgrid)\n\n\t# ========== Set the new grid file ==========\n\t# Save the current grid\n\tsubp.call(\"cdo setgrid,%sGridFix %s.nc %s.nc\" % (fname, fname, fout), shell=True)\n\t\n\tif not os.path.isfile(\"%s.nc\" % fout):\n\t\traise IOError(\"The output file was not created, going interactive\")\n\t\n\t# ========== return the files to be removed ==========\n\tcleanup.append(\"%s.nc\" % fname)\n\treturn cleanup" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test solar data handling from CC data file with clearsky ratio calculated using clearsky ratio from NSRDB h5 file.
def test_solar_cc(): features = ['clearsky_ratio', 'rsds', 'clearsky_ghi'] input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')] nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5') with xr.open_mfdataset(input_files) as fh: min_lat = np.min(fh.lat.values) min_lon = np.min(fh.lon.values) - 360 target = (min_lat, min_lon) shape = (len(fh.lat.values), len(fh.lon.values)) with pytest.raises(AssertionError): handler = DataHandlerNCforCC(input_files, features=features, target=target, shape=shape, val_split=0.0, worker_kwargs=dict(max_workers=1)) handler = DataHandlerNCforCC(input_files, features=features, nsrdb_source_fp=nsrdb_source_fp, target=target, shape=shape, temporal_slice=slice(0, 1), val_split=0.0, worker_kwargs=dict(max_workers=1)) cs_ratio = handler.data[..., 0] ghi = handler.data[..., 1] cs_ghi = handler.data[..., 2] cs_ratio_truth = ghi / cs_ghi assert cs_ratio.max() < 1 assert cs_ratio.min() > 0 assert (ghi < cs_ghi).all() assert np.allclose(cs_ratio, cs_ratio_truth) with Resource(nsrdb_source_fp) as res: meta = res.meta tree = KDTree(meta[['latitude', 'longitude']]) cs_ghi_true = res['clearsky_ghi'] # check a few sites against NSRDB source file for i in range(4): for j in range(4): test_coord = handler.lat_lon[i, j] _, inn = tree.query(test_coord) assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j])
[ "def test_from_ctd():\n dfile = os.path.join(DATA_DIR,'ctd_BM54.cnv')\n\n # Load in the raw data using np.loadtxt\n raw = np.loadtxt(dfile, comments = '#', skiprows = 175,\n usecols = (0, 1, 3, 8, 9, 10, 12))\n\n # State the units of the input data (read by hand from the file)\n units = ['deg C', 'db', 'mg/m^3', 'm', 'psu', 'kg/m^3', 'mg/l']\n\n # State the equivalent mks units (translated here by hand)\n mks_units = ['K', 'Pa', 'kg/m^3', 'm', 'psu', 'kg/m^3', 'kg/m^3']\n\n # Clean the profile to remove depth reversals\n z_col = 3\n p_col = 1\n profile = get_profile(raw, z_col, 50, p_col, 0., 2.124, 1529.789, 11074,\n 7)\n\n # Convert the profile to standard units\n profile, units = get_units(profile, units, 11074, 7, mks_units)\n\n # Create an empty netCDF4-classic dataset to store the CTD information\n nc_file = os.path.join(OUTPUT_DIR,'test_BM54.nc')\n summary = 'Py.Test test file'\n source = 'R/V Brooks McCall, station BM54'\n sea_name = 'Gulf of Mexico'\n p_lat = 28.0 + 43.945 / 60.0\n p_lon = 360 - (88.0 + 22.607 / 60.0)\n p_time = date2num(datetime(2010, 5, 30, 18, 22, 12),\n units = 'seconds since 1970-01-01 00:00:00 0:00',\n calendar = 'julian')\n nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,\n p_lon, p_time)\n\n # Fill the netCDF4-classic dataset with the data in profile\n symbols = ['temperature', 'pressure', 'wetlab_fluorescence', 'z',\n 'salinity', 'density', 'oxygen']\n comments = ['measured', 'measured', 'measured', 'measured', 'measured',\n 'measured', 'measured']\n long_names = ['Absolute temperature', 'pressure', 'Wetlab fluorescence',\n 'depth below the water surface', 'Practical salinity',\n 'Density', 'Oxygen']\n std_names = ['temperature', 'pressure', 'wetlab fluorescence', 'depth',\n 'salinity', 'density', 'oxygen']\n nc = get_filled_nc_db(nc, profile, symbols, units, comments, z_col,\n long_names, std_names)\n\n # Create a Profile object from this netCDF dataset and test the Profile\n # methods\n bm54 = get_profile_obj(nc, ['oxygen'], ['kg/m^3'])\n\n # Close down the pipes to the netCDF dataset files\n bm54.nc.close()", "def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source", "def test_from_txt():\n cdat_file = os.path.join(DATA_DIR,'C.dat')\n tdat_file = os.path.join(DATA_DIR,'T.dat')\n\n # Load in the raw data using np.loadtxt\n C_raw = np.loadtxt(cdat_file, comments = '%')\n T_raw = np.loadtxt(tdat_file, comments = '%')\n\n # Clean the profile to remove depth reversals\n C_data = get_profile(C_raw, 1, 25, None, 0., 1.0256410e+01, 8.0000000e+02,\n 34, 2)\n T_data = get_profile(T_raw, 1, 25, None, 0., 1.0831721e+01, 7.9922631e+02,\n 34, 2)\n\n # Convert the data to standard units\n C_data, C_units = get_units(C_data, ['psu', 'm'], 34, 2, ['psu', 'm'])\n T_data, T_units = get_units(T_data, ['deg C', 'm'], 34, 2, ['K', 'm'])\n\n # Create an empty netCDF4-classic dataset to store the CTD information\n nc_file = os.path.join(OUTPUT_DIR,'test_DS.nc')\n summary = 'Py.Test test file'\n source = 'Profiles from the SINTEF DeepSpill Report'\n sea_name = 'Norwegian Sea'\n p_lat = 64.99066\n p_lon = 4.84725\n p_time = date2num(datetime(2000, 6, 27, 12, 0, 0),\n units = 'seconds since 1970-01-01 00:00:00 0:00',\n calendar = 'julian')\n nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,\n p_lon, p_time)\n\n # Fill the netCDF4-classic dataset with the data in the salinity profile\n symbols = ['salinity', 'z']\n comments = ['measured', 'measured']\n long_names = ['Practical salinity', 'depth below the water surface']\n std_names = ['salinity', 'depth']\n nc = get_filled_nc_db(nc, C_data, symbols, C_units, comments, 1,\n long_names, std_names)\n\n # Because the temperature data will be interpolated to the vertical\n # coordinates in the salinity profile, insert the data and test that\n # insertion worked correctly by hand\n symbols = ['temperature', 'z']\n comments = ['measured', 'measured']\n long_names = ['Absolute temperature', 'depth below the water surface']\n std_names = ['temperature', 'depth']\n nc = ambient.fill_nc_db(nc, T_data, symbols, T_units, comments, 1)\n assert_array_almost_equal(nc.variables['z'][:],\n C_data[:,1], decimal = 6)\n z = nc.variables['z'][:]\n T = nc.variables['temperature'][:]\n f = interp1d(z, T)\n for i in range(T_data.shape[0]):\n assert_approx_equal(T_data[i,0], f(T_data[i,1]), significant = 5)\n assert nc.variables['temperature'].comment == comments[0]\n\n # Calculate and insert the pressure data\n z = nc.variables['z'][:]\n T = nc.variables['temperature'][:]\n S = nc.variables['salinity'][:]\n P = ambient.compute_pressure(z, T, S, 0)\n P_data = np.vstack((z, P)).transpose()\n nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'],\n ['measured', 'computed'], 0)\n\n # Test the Profile object\n ds = get_profile_obj(nc, [], [])\n\n # Close down the pipes to the netCDF dataset files\n ds.close_nc()\n\n return ds", "def code_sample():\r\n\r\n # open data file\r\n dirMU = \"D:\\Projects\\FIDUCEO\\Data\\Simulated\\m02_n15.nc\"\r\n rootgrp = Dataset(dirMU, 'r')\r\n\r\n # access required data (first 100 match-ups for this example)\r\n u_raw = rootgrp.variables['cal_BB_Ur'][:100, :]\r\n times = rootgrp.variables['CorrIndexArray'][:100]\r\n corrData = rootgrp.variables['corrData'][:100]\r\n\r\n rootgrp.close()\r\n\r\n # run function\r\n C_ICT_err, C_ICT_raw_err = calc_CC_err(u_raw, times, corrData)\r\n\r\n return 0", "def read_file_data(file_name):\n dummy = 0\n no_file = 0\n try:\n data = read_generic_hdf5(file_name)\n\n except:\n print(\"File \" + file_name + \" nicht vorhanden.\")\n zh = dummy\n phi = dummy\n rho = dummy\n zdr = dummy\n vel = dummy\n\n no_file = 1\n return dummy, dummy, dummy, dummy, dummy, dummy, no_file, dummy\n\n zh = transform(data['scan0/moment_10']['data'],\n data['scan0/moment_10']['attrs']['dyn_range_min'],\n data['scan0/moment_10']['attrs']['dyn_range_max'],\n data['scan0/moment_10']['attrs']['format'])\n\n phi = transform(data['scan0/moment_1']['data'],\n data['scan0/moment_1']['attrs']['dyn_range_min'],\n data['scan0/moment_1']['attrs']['dyn_range_max'],\n data['scan0/moment_1']['attrs']['format'])\n\n rho = transform(data['scan0/moment_2']['data'],\n data['scan0/moment_2']['attrs']['dyn_range_min'],\n data['scan0/moment_2']['attrs']['dyn_range_max'],\n data['scan0/moment_2']['attrs']['format'])\n\n zdr = transform(data['scan0/moment_9']['data'],\n data['scan0/moment_9']['attrs']['dyn_range_min'],\n data['scan0/moment_9']['attrs']['dyn_range_max'],\n data['scan0/moment_9']['attrs']['format'])\n\n vel = transform(data['scan0/moment_5']['data'],\n data['scan0/moment_5']['attrs']['dyn_range_min'],\n data['scan0/moment_5']['attrs']['dyn_range_max'],\n data['scan0/moment_5']['attrs']['format'])\n\n radar_height = data['where']['attrs']['height']\n s_format = '%SZ'\n if location == 'Juelich':\n s_format = '%S.000Z'\n\n print(data['scan0/how']['attrs']['timestamp'].decode())\n fstring = '%Y-%m-%dT%H:%M:{0}'.format(s_format)\n print(\"Time:\", dt.datetime.strptime(data['scan0/how']['attrs']['timestamp'].decode(), fstring))\n\n return zh, phi, rho, zdr, vel, dt.datetime.strptime(data['scan0/how']['attrs']['timestamp'].decode(),\n '%Y-%m-%dT%H:%M:' + s_format), no_file, radar_height", "def readWRF_CMAQfile(gridFile, dataFile, lat, lon, vrs, eqs, wthFlag): \n # Open the WRF GRIDCRO2D file to determine the WRF pixel for lat/lon.\n GRID = xr.open_dataset(gridFile)\n ilat, ilon = find_WRF_pixel(GRID.LAT[0,0,:,:].values,GRID.LON[0,0,:,:].values,lat,lon)\n # Open WRF-CMAQ data file.\n print('Reading: ', dataFile)\n DATA = xr.open_dataset(dataFile)\n # Create a datetime index.\n datestr = str(DATA.SDATE)\n date = datetime(int(datestr[0:4]),1,1) + timedelta(int(datestr[4:])-1)\n time = [date + timedelta(hours=float(t)) for t in DATA.TSTEP]\n #\n # ............................... CONTAMINANT DATA ............................\n #\n # Create a pandas dataframe with contaminant variables.\n ctm = pd.DataFrame({},index=time)\n #ctm = ctm.set_index(pd.DatetimeIndex(ctm.index))\n \n for x in range(len(vrs)):\n vr = vrs.values[x]\n eq = eqs.values[x]\n dat = DATA[vr].values[:,0,ilat,ilon]\n \n #print(eq)\n if(eq[-1] == 'S'):\n air = DATA['AIR_DENS'].values[:,0,ilat,ilon]\n dat = dat/1000000000/air\n #dat.apply(lambda x: x/1000000000/AIR_DENS)\n else:\n pass\n split_eq = eq.split('/')\n mid_split = split_eq[1].split('*')\n base = float(mid_split[0])\n snd = float(mid_split[1])\n thrd = float(split_eq[2])\n dat = dat / base * snd / thrd\n \n \n ctm[vr] = dat\n\n # ........................... WEATHER DATA ............................\n #\n # Read contaminat data from WRF-CMAQ data file.\n if(wthFlag):\n if('AIR_DENS' in DATA):\n T = DATA.SFC_TMP.values[:,0,ilat,ilon] + 273.15 # in K\n P = DATA.AIR_DENS.values[:,0,ilat,ilon]*287.0*T\n wspd = DATA.WSPD10.values[:,0,ilat,ilon]\n wdir = DATA.WDIR10.values[:,0,ilat,ilon]\n # Conversion from relative humidity to mixing ration \n # ....http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf\n A = 6.116441\n m = 7.591386\n Tn = 240.7263\n es = A*10**(m*(T-273.15)/(T-273.15+Tn))\n ws = 0.622 * (es/P)\n w = DATA.RH.values[:,0,ilat,ilon] * ws * 1000. # Factor of 1000 converts from kg/kg to g/kg.\n \n # Create a pandas dataframe with meteorological variables.\n wth = pd.DataFrame({'Ta':T, \n 'Pb':P,\n 'Ws':wspd,\n 'Wd':wdir,\n 'Hr':w},\n index=time)\n else:\n wth = pd.DataFrame({})\n \n GRID.close()\n DATA.close()\n return ctm, wth", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def _test():\n # set grid\n grid_logt = [6, 7., 9]\n grid_feh = [-2.2, -1., 0, 1., 10]\n grid_mini = np.arange(0.01, 12, 0.01)\n\n # get isochrones\n vgrid_feh, vgrid_logt, grid_list, isoc_list = get_isochrone_grid(\n grid_feh, grid_logt, model=\"parsec12s\", phot=\"sloan\", n_jobs=1)\n\n # transform into cube data\n cube_data_list, cube_name_list = interpolate_to_cube(\n vgrid_feh, vgrid_logt, grid_mini, isoc_list,\n cube_quantities=[\"M_act\", \"g\", \"r\"])\n\n # cube HDUs\n hl = cubelist_to_hdulist(cube_data_list, cube_name_list)\n hl.info()\n # hl.writeto()\n\n # combine isochrone tables\n comb_isoc = combine_isochrones(isoc_list)\n # comb_isoc.write()\n\n # write isochrone list into separate files\n # write_isoc_list(isoc_list, grid_list, \"/pool/comb_isoc\")\n return hl", "def main_bf_MISR(h5f, output_folder, SPATIAL_RESOLUTION=0.5, VZA_MAX=18, CAMERA='AN'):\n\n # =============================================================================\n # 1. Initialization\n # calculate constant parameters\n # initialize output arrays and output hdf5 file\n # check the number of CERES granules \n # =============================================================================\n\n print(\"-------MISR----->\", h5f)\n print(\"-------FID------<>\", h5f.fid)\n print(\"---->\", type(h5f))\n if type(h5f.fid) is str:\n output_nc_name = h5f.fid.split('/')[-1].replace('TERRA_BF_L1B', 'CLIMARBLE')\n else:\n output_nc_name = h5f.fid.name. \\\n decode(\"utf-8\").split('/')[-1]. \\\n replace('TERRA_BF_L1B', 'CLIMARBLE')\n\n output_nc_name = output_nc_name.replace('.h5', '.nc')\n\n # \n NUM_POINTS = 1 / SPATIAL_RESOLUTION\n NUM_LATS = int(180 / SPATIAL_RESOLUTION)\n NUM_LONS = int(360 / SPATIAL_RESOLUTION)\n\n LAT_EDGES = np.arange(-90.0, 90.0001, SPATIAL_RESOLUTION)\n LON_EDGES = np.arange(-180.0, 180.0001, SPATIAL_RESOLUTION)\n\n # \n orbit_radiance_sum = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_radiance_num = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_nc_out = os.path.join(output_folder, output_nc_name)\n\n\n # =============================================================================\n # 2. Main processing\n # Loop through each CERES granule and sort radiances into the corresponding lat/lon bins\n # When encounters an asceding granule, script will move to the next granule\n # =============================================================================\n\n # USE MODIS granules to match first and last time of the descending node\n MISR_blocks = get_descending(h5f, 'MISR.{}'.format(CAMERA))\n if MISR_blocks[0] == 0:\n print(\">> IOError( no available MODIS granule in orbit {} )\".format(bf_file))\n return\n\n # LOAD lat/lon here\n lat = h5f['MISR/Geolocation/GeoLatitude'][:]\n lon = h5f['MISR/Geolocation/GeoLongitude'][:]\n\n # LOAD radiance here\n MISR_bands = ['Blue', 'Green', 'Red', 'NIR']\n rads_all = []\n for iband in MISR_bands:\n rads_all.append(h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, iband)][:])\n\n # SPECIFY data dimension to interpolate SZA/VZA\n rad_shape = (128, 512)\n \n\n # LOOP through MISR blocks (starts from 0)\n for iblk in MISR_blocks:\n\n # INTERPOLATE sza and vza (this part can be replaced by a more accurate function)\n raw_sza = h5f['MISR/Solar_Geometry/SolarZenith'][iblk]\n raw_vza = h5f['MISR/{}/Sensor_Geometry/{}Zenith'.format(CAMERA, ''.join(c.lower() if i==1 else c for i,c in enumerate(CAMERA)))][iblk]\n np.place(raw_sza, raw_sza<0, np.nan)\n np.place(raw_vza, raw_vza<0, np.nan)\n blk_sza = resize(raw_sza, rad_shape)\n blk_vza = resize(raw_vza, rad_shape)\n\n\n # SELECT lat/lon\n idx_geometry = np.where((blk_sza<89.0) & (blk_vza<VZA_MAX))\n select_lat = lat[iblk][idx_geometry]\n select_lon = lon[iblk][idx_geometry]\n\n\n # SELECT spectral radiances here\n # Aggregate 275-m res data to 1.1-km when necessary\n # Separate band by band to allow one (or more) band(s) failure\n for iband, band_name in enumerate(MISR_bands, start=0):\n blk_rad = rads_all[iband][iblk]\n # blk_rad = h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, band_name)][iblk]\n\n if blk_rad.shape == (512, 2048): \n # 275-m res band\n np.place(blk_rad, blk_rad<0, np.nan)\n fnl_blk_rad = np.nanmean(np.reshape(blk_rad, (blk_rad.shape[0]//4, 4, blk_rad.shape[1]//4,4)), axis=(1,3))\n else:\n fnl_blk_rad = blk_rad\n\n\n select_rad = np.nan_to_num(fnl_blk_rad[idx_geometry])\n fnl_idx = np.where((select_rad>0)&(select_rad<1000))[0]\n\n fnl_lat = select_lat[fnl_idx] * -1\n fnl_lon = select_lon[fnl_idx]\n fnl_rad = select_rad[fnl_idx]\n\n try:\n rad_sum, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='sum')\n rad_cnt, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='count')\n\n orbit_radiance_sum[:, :, iband] += rad_sum\n orbit_radiance_num[:, :, iband] += rad_cnt\n except ValueError:\n continue\n\n # =============================================================================\n # 3. Save results\n # =============================================================================\n orbit_radiance_num = np.array(orbit_radiance_num, dtype='int16')\n\n coords_lats = np.linspace(90-SPATIAL_RESOLUTION/2, -90+SPATIAL_RESOLUTION/2, NUM_LATS)\n coords_lons = np.linspace(-180+SPATIAL_RESOLUTION/2, 180-SPATIAL_RESOLUTION/2, NUM_LONS)\n\n xr_rad_sum = xr.DataArray(orbit_radiance_sum, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_num = xr.DataArray(orbit_radiance_num, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_sum.encoding['_FillValue'] = 0\n xr_rad_num.encoding['_FillValue'] = 0\n xr_rad_sum.name = 'MISR spec rad sum'\n xr_rad_num.name = 'MISR spec rad num'\n xr_rad_sum.to_netcdf(orbit_nc_out, 'a')\n xr_rad_num.to_netcdf(orbit_nc_out, 'a')\n return orbit_nc_out", "def test_CFCalculation_hdf_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2, m=0, spin_up=-571.68845386399, spin_down=-558.2336974657351, unit='K', convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-34.982539807305045,\n spin_down=-21.850435868549834,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=3.8503494779930776, spin_down=2.168215129491561, unit='K',\n convention='Stevens'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=110.50156137060345,\n spin_down=85.58558990378205,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=6, spin_up=110.50156137060345, spin_down=85.58558990378205, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration()\n\n assert results == expected_results", "def test_cl_fix_file(mock_get_filepath, cl_file, tmp_path):\n mock_get_filepath.return_value = os.path.join(tmp_path,\n 'fixed_cesm2_waccm_cl.nc')\n fix = Cl(None)\n fixed_file = fix.fix_file(cl_file, tmp_path)\n mock_get_filepath.assert_called_once_with(tmp_path, cl_file)\n fixed_dataset = Dataset(fixed_file, mode='r')\n assert fixed_dataset.variables['lev'].standard_name == (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n assert fixed_dataset.variables['lev'].formula_terms == (\n 'p0: p0 a: a b: b ps: ps')\n assert fixed_dataset.variables['lev'].units == '1'\n np.testing.assert_allclose(fixed_dataset.variables['a'][:], [1.0, 2.0])\n np.testing.assert_allclose(fixed_dataset.variables['b'][:], [0.0, 1.0])\n np.testing.assert_allclose(fixed_dataset.variables['a_bnds'][:],\n [[0.0, 1.5], [1.5, 3.0]])\n np.testing.assert_allclose(fixed_dataset.variables['b_bnds'][:],\n [[-1.0, 0.5], [0.5, 2.0]])", "def test_compute_Sv_ek80_CW_complex_BB_complex(ek80_cal_path):\n ek80_raw_path = ek80_cal_path / \"2018115-D20181213-T094600.raw\"\n ed = ep.open_raw(ek80_raw_path, sonar_model=\"EK80\")\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"CW\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"BB\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)", "def test_CFCalculation_txt_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n #Make sure new script produces the same result as old one\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=-419.7891726292168,\n spin_down=-414.7152560307904,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-35.92607948104669,\n spin_down=-26.384951772020756,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=6.522900740505054, spin_down=5.488104692050172, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation(reference_radius='cdn')\n cf.readPot('files/cf_calculation/VKS.2.0.dat',\n 'files/cf_calculation/VKS.4.0.dat',\n 'files/cf_calculation/VKS.6.0.dat',\n lm=[(2, 0), (4, 0), (6, 0)])\n cf.readCDN('files/cf_calculation/Nd.dat', header=3)\n cf.cdn['RMT'] = 3.138049652\n results = cf.performIntegration()\n\n assert results == expected_results", "def test_WCSGetCoverageNetCDF4_testdatanc(self):\n AdagucTestTools().cleanTempDir()\n status, data, headers = AdagucTestTools().runADAGUCServer(\"source=testdata.nc&SERVICE=WCS&REQUEST=GetCoverage&COVERAGE=testdata&CRS=EPSG%3A4326&FORMAT=NetCDF4&BBOX=-180,-90,180,90&RESX=1&RESY=1\",\n env=self.env, args=[\"--report\"])\n self.assertEqual(status, 0)\n self.assertEqual(data.getvalue()[0:6], b'\\x89HDF\\r\\n')", "def readcoor(self, filepath, date):\n doy = date.timetuple().tm_yday\n filelist = glob.glob(\n os.path.join(filepath, ''.join(\n ['*', '{:0>3d}.{:0>2d}'.format(doy, date.year % 100), 'coor'])))\n if not filelist:\n print(\"Can't find %s's coor file in %s\" % (str(date), filepath))\n return None\n\n report = defaultdict(list)\n refname = re.compile(r'(\\w+)\\d{3}\\.\\d{2}coor')\n for path in filelist:\n station = refname.findall(path)[0]\n report['name'].append(station)\n try:\n data = pd.read_table(\n path, delim_whitespace=True).apply(\n pd.to_numeric, errors='coerce')\n if 'U' not in data or 'N' not in data or 'E' not in data:\n raise ValueError\n except:\n report['U_rms'].append(0)\n report['N_rms'].append(0)\n report['E_rms'].append(0)\n report['H_rms'].append(0)\n report['U_95'].append(0)\n report['N_95'].append(0)\n report['E_95'].append(0)\n report['H_95'].append(0)\n report['effective_rate'].append(0)\n continue\n num = data.shape[0]\n if num == 0:\n report['U_rms'].append(0)\n report['N_rms'].append(0)\n report['E_rms'].append(0)\n report['H_rms'].append(0)\n report['U_95'].append(0)\n report['N_95'].append(0)\n report['E_95'].append(0)\n report['H_95'].append(0)\n report['effective_rate'].append(0)\n continue\n\n threshold_index = int(num * 0.95)\n u_rms = np.sqrt(data.U.pow(2).sum() / num)\n n_rms = np.sqrt(data.N.pow(2).sum() / num)\n e_rms = np.sqrt(data.E.pow(2).sum() / num)\n report['U_rms'].append(u_rms)\n report['N_rms'].append(n_rms)\n report['E_rms'].append(e_rms)\n data_H = np.sqrt((data.N.pow(2) + data.E.pow(2)))\n report['H_rms'].append(np.sqrt(data_H.pow(2).sum() / num))\n report['U_95'].append(data.U.abs().sort_values().iloc[\n threshold_index])\n report['N_95'].append(data.N.abs().sort_values().iloc[\n threshold_index])\n report['E_95'].append(data.E.abs().sort_values().iloc[\n threshold_index])\n report['H_95'].append(data_H.abs().sort_values().iloc[\n threshold_index])\n report['effective_rate'].append(num / 86400.0)\n\n cols = [\n 'name', 'U_rms', 'N_rms', 'E_rms', 'H_rms', 'U_95', 'N_95', 'E_95',\n 'H_95', 'effective_rate'\n ]\n report = pd.DataFrame(report, columns=cols).sort_values('name')\n return report", "def readcoh(filename):\r\n \r\n fid=file(filename,'r')\r\n lines=fid.readlines()\r\n station=lines[0].replace('\\n','')\r\n period=[]\r\n freq=[]\r\n coh1=[]\r\n zcoh1=[]\r\n coh2=[]\r\n zcoh2=[]\r\n coh3=[]\r\n zcoh3=[]\r\n for ii in range(len(lines)-2):\r\n cohstr=lines[ii+2].replace('\\n','')\r\n cohlst=cohstr.split(tsp)\r\n period.append(float(cohlst[0]))\r\n freq.append(float(cohlst[1]))\r\n coh1.append(float(cohlst[2]))\r\n zcoh1.append(float(cohlst[3]))\r\n coh2.append(float(cohlst[4]))\r\n zcoh2.append(float(cohlst[5]))\r\n coh3.append(float(cohlst[6]))\r\n zcoh3.append(float(cohlst[7]))\r\n \r\n return station,np.array(period),np.array(freq),np.array(coh1),\\\r\n np.array(zcoh1),np.array(coh2),np.array(zcoh2),np.array(coh3),\\\r\n np.array(zcoh3)", "def test_get_calibrated_data(self):\n\n fn = self.probe.readCalibratedSensorData\n self.run_datatest(fn)", "def test_CFCalculation_hdf_files_wybourne_convention():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=(-1143.37690772798 + 0j),\n spin_down=(-1116.4673949314702 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=4,\n m=0,\n spin_up=(-279.86031845844036 + 0j),\n spin_down=(-174.80348694839867 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=0,\n spin_up=(61.60559164788924 + 0j),\n spin_down=(34.69144207186498 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=(116.32750335918315 + 4.696327749935313e-06j),\n spin_down=(90.09789430612014 + 3.6373963939901583e-06j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=6,\n spin_up=(116.32750335918315 - 4.696327749935313e-06j),\n spin_down=(90.09789430612014 - 3.6373963939901583e-06j),\n unit='K',\n convention='Wybourne')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration(convert=False)\n\n print(results)\n assert results == expected_results", "def load_chaz_storms(path, mask_distance=None, mask_coordinate=(0.0, 0.0),\n mask_category=None, categorization=\"NHC\"):\n\n\n # Load the mat file and extract pertinent data\n #data = xarray.open_dataset(path)\n data = netCDF4.Dataset(path)\n print(data.dimensions.keys())\n print(data.variables.keys())\n\n # Days from 1-1-1950\n # start_date = datetime.datetime(1950, 1, 1)\n #stormIDs = data.variables['time']['stormID'][:]\n\n #stormIDs = data['time']['stormID']\n storms = []\n\n time_length = data['Mwspd'].shape[0]\n num_tracks = data['Mwspd'].shape[1]\n num_intensities = data['Mwspd'].shape[2]\n\n for i in range(num_tracks):\n\n # Extract initial data ranges\n for n in range(num_intensities):\n\n # Use intensity to find non-nans and extract correct arrays\n max_wind_speed = numpy.array(data.variables['Mwspd'][:, i, n])\n index_set = (numpy.isnan(max_wind_speed) - 1).nonzero()[0]\n #print(\"\")\n #print(\"Max Wind speed\")\n #print(max_wind_speed)\n\n index = len(index_set)\n t = numpy.array(data.variables['time'][0:index, i])\n x = numpy.array(data.variables['longitude'][0:index, i])\n y = numpy.array(data.variables['latitude'][0:index, i])\n\n #print(\"\")\n #print(x)\n #print(\"\")\n #print(y)\n\n # Remove zero-length intensities\n if len(index_set) > 0:\n # Create storm object\n storm = clawpack.geoclaw.surge.storm.Storm()\n storm.ID = i * num_intensities + n\n\n # Initialize the date set\n\n storm.t = [datetime.datetime(2000, 1, 1, 0) + \\\n datetime.timedelta(hours=6) * i\n for i in range(len(index_set))]\n storm.time_offset = storm.t[0]\n #storm.t = t[index_set]\n ## Add fields with proper non-nan values\n #storm.t[0] = datetime.datetime(2007, 1, 1, 0)\n #for i in range(1, index_set):\n # storm.t[i] = storm.t[i-1] + datetime.timedelta(hours = 6)\n\n #storm.t = t[index_set]\n #storm.t -= storm.t[0]\n #storm.t *= 24.0 * 60.0**2\n\n ## Check for missing last time point and adjust index set\n #if storm.t[-1] < 0:\n # index_set = index_set[:-1]\n # #print(index_set)\n # storm.t = storm.t[:-1]\n\n storm.eye_location = numpy.empty((len(index_set), 2))\n x[index_set] = x[index_set] - 360.0 * numpy.ones(len(index_set))\n storm.eye_location[:, 0] = x[index_set]\n storm.eye_location[:, 1] = y[index_set]\n\n #storm.eye_location = numpy.empty((2, len(index_set)))\n #storm.eye_location[0, :] = x[index_set]\n #storm.eye_location[1, :] = y[index_set]\n\n # TODO: Convert from knots\n storm.max_wind_speed = max_wind_speed[index_set]\n #print(\"Storm Max Wind Speed in Knots\") \n #print(storm.max_wind_speed)\n #print(\" \") \n # Assumed values\n storm.storm_radius = 500000 * numpy.ones(len(index_set))\n\n\n # Calculate Radius of Max Wind\n C0 = 218.3784 * numpy.ones(len(index_set))\n storm.max_wind_radius = C0 - 1.2014 * storm.max_wind_speed + \\\n (storm.max_wind_speed / 10.9884)**2 - \\\n (storm.max_wind_speed / 35.3052)**3 - \\\n 145.5090 * \\\n numpy.cos(storm.eye_location[:, 1] * 0.0174533)\n \n #storm.max_wind_radius = units.convert(storm.max_wind_radius, 'nmi', 'm')\n #storm.max_wind_speed = units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n \n #units.convert(storm.max_wind_radius, 'nmi', 'm')\n #units.convert(storm.max_wind_speed,\n # 'knots', 'm/s')\n\n # Define maximum radius for all sotrms\n storm.storm_radius = 50e3 * numpy.ones(len(index_set))\n\n # From Kossin, J. P. WAF 2015\n a = -0.0025\n b = -0.36\n c = 1021.36\n storm.central_pressure = ( a * storm.max_wind_speed**2\n + b * storm.max_wind_speed\n + c)\n\n include_storm = True\n if mask_distance is not None:\n distance = numpy.sqrt((storm.eye_location[:, 0] -\n mask_coordinate[0])**2 +\n (storm.eye_location[:, 1] -\n mask_coordinate[1])**2)\n inlcude_storm = numpy.any(distance < mask_distance)\n\n if mask_category is not None:\n category = storm.category(categorization=categorization)\n include_storm = numpy.any(category > mask_category)\n #raise NotImplementedError(\"Category masking not implemented.\")\n\n if include_storm:\n storms.append(storm)\n return storms" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
keys_to_track order is important! Matches will be tested in this order.
def __init__(self, keys_to_track): self.keys_to_track = keys_to_track self.tracker = {} for key_to_track in self.keys_to_track: self.tracker[key_to_track] = {}
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1ybJ2itxCxPCPkcA9sOgTO',\n 6699: '1182pxG4uNxr3QqIH8b8k0',\n }\n\n matches = {track.i_id: track.id\n for track in self.tracks\n if track.i_id in targets}\n\n for i_id, s_id in targets.iteritems():\n self.assertEqual(s_id, matches[i_id])", "def _order_match(query_key: ReprKey, match_key: ReprKey):\n if query_key.path <= match_key.path:\n return query_key, match_key\n return match_key, query_key", "def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')", "def matches(self):\n return self.set_of_matches", "def get_possible_keys(self):", "def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None", "def test_keywords(self):\n\n test_cases = (\n makeTestCase('adele 21',\n AlbumResultMatcher(title=Equals('21'), artist=Equals('adele')),\n ArtistResultMatcher(title=Equals('adele'))),\n makeTestCase('kanye power',\n TrackResultMatcher(title=Equals('power', artist=Equals('kanye west'))),\n ArtistResultMatcher(title=Equals('kanye west')),\n AlbumResultMatcher(title=Equals('my beautiful dark twisted fantasy'))),\n makeTestCase('ratat party with children',\n TrackResultMatcher(title=Equals('party with children', artist=Equals('ratatat'))),\n ArtistResultMatcher(title=Equals('ratatat'))),\n makeTestCase('flobot fight with tools handlebars',\n TrackResultMatcher(title=Equals('handlebars')),\n ArtistResultMatcher(title=Equals('flobots')),\n AlbumResultMatcher(title=Equals('fight with tools')))\n )\n\n self._run_tests(tests, {})", "def add(self, obj, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if match_val is None or match_val == '':\r\n pass\r\n else:\r\n self.tracker[key_to_track][match_val] = obj", "def _order_keys(self, keys):\n sorted_keys = list(sorted(keys))\n if self.baseline_label in keys:\n sorted_keys.remove(self.baseline_label)\n sorted_keys.append(self.baseline_label)\n return sorted_keys", "def test_keys(self):\n self.assertEqual(self.cache.keys(), [\"some\", \"foo\"])", "def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)", "def testTrackDict1(self):\n\n goodTrackDict = {\n \"number\": \"1\", \"uid\": \"1493619965\",\n \"codec_id\": \"V_MPEG4/ISO/AVC\", \"codec_private_length\": \"44\",\n \"codec_private_data\": \"014d4028ffe1001c80\", \"language\": \"eng\",\n \"pixel_dimensions\": \"1920x1080\", \"display_dimensions\": \"1920x1080\",\n \"default_track\": \"1\", \"forced_track\": \"0\", \"enabled_track\": \"1\",\n \"packetizer\": \"mpeg4_p10_video\", \"default_duration\": \"41708332\",\n \"content_encoding_algorithms\": \"3\"\n }\n\n trackLine = _buildTrackLine(0, 'video', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )", "def test_sample_mapped_keys(self):\r\n\r\n # With num_coverage=1 only the keys will be sampled\r\n actual = sample_mapped_keys(self.test_map, 1)\r\n self.assertEqual(actual, {'1': ['1'], '2': ['2']})\r\n\r\n actual = sample_mapped_keys(self.test_map, 3)\r\n for key in actual.keys():\r\n # check number of sampled keys\r\n self.assertEqual(3, len(actual[key]))\r\n for x in actual[key]:\r\n # check that sampled key is in the full list\r\n correct = list(self.test_map[key])\r\n correct.append(key)\r\n self.assertTrue(x in correct)", "def test_match_ordered(self):\n first = dict(\n a=1,\n b=2,\n )\n\n second = OrderedDict(\n b=2,\n a=1,\n )\n\n check_keys_match_recursive(first, second, [])", "def test_search_housekeeping(self):\n pass", "def test_dict_completion(dict_, match, extras):\n regex = re.compile(match)\n got = set(FIFF[key] for key in FIFF if regex.search(key) is not None)\n for e in extras:\n got.add(e)\n want = set(dict_)\n assert got == want, match", "def match_keypoints(self):\n\n # Create a BFMMatcher object to find all the matching keypoints\n bf = BFMatcher_create(NORM_HAMMING)\n\n # Find matching features\n matches = bf.knnMatch(self.descriptors[0], self.descriptors[1], k=2)\n\n # Keep only strong matches\n for m, n in matches:\n if m.distance < 0.6 * n.distance:\n self.good_matches.append(m)\n\n # Draw only matching strong keypoints for images\n if self.details:\n matched_keypoints1 = [self.keypoints[0][m.queryIdx] for m in self.good_matches]\n matched_keypoints2 = [self.keypoints[1][m.trainIdx] for m in self.good_matches]\n\n imshow(\"Image 1: Matched Strong Keypoints\",\n drawKeypoints(self.images[0], matched_keypoints1, None, (255, 0, 255)))\n imshow(\"Image 2: Matched Strong Keypoints\",\n drawKeypoints(self.images[1], matched_keypoints2, None, (255, 0, 255)))\n\n waitKey(0)", "def _match_tracks(artist, title, mb_tracks):\n # pylint: disable=R0914\n dbg(\"artists is %s\", artist)\n dbg(\"title is %s\", title)\n title_artist_str = c.g + title + c.w, c.g + artist + c.w\n xprint(\"\\nSearching for %s by %s\\n\\n\" % title_artist_str)\n\n def dtime(x):\n \"\"\" Format time to M:S. \"\"\"\n return time.strftime('%M:%S', time.gmtime(int(x)))\n\n # do matching\n for track in mb_tracks:\n ttitle = track['title']\n length = track['length']\n xprint(\"Search : %s%s - %s%s - %s\" % (c.y, artist, ttitle, c.w,\n dtime(length)))\n q = \"%s %s\" % (artist, ttitle)\n w = q = ttitle if artist == \"Various Artists\" else q\n query = generate_search_qs(w, 0, result_count=50)\n dbg(query)\n have_results = _search(q, query, splash=False, pre_load=False)\n\n if not have_results:\n xprint(c.r + \"Nothing matched :(\\n\" + c.w)\n continue\n\n results = g.model.songs\n s, score = _best_song_match(results, artist + \" \" + ttitle, length)\n cc = c.g if score > 85 else c.y\n cc = c.r if score < 75 else cc\n xprint(\"Matched: %s%s%s - %s \\n[%sMatch confidence: \"\n \"%s%s]\\n\" % (c.y, s.title, c.w, fmt_time(s.length),\n cc, score, c.w))\n yield s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add obj as a match for match_dict values. Checks to make sure match_dict keys are valid.
def add(self, obj, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if match_val is None or match_val == '': pass else: self.tracker[key_to_track][match_val] = obj
[ "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def match_from_dict(match_dict):\n kwargs = {}\n for of_match, field in match_dict.items():\n of_match = OLD_MATCH_FIELDS.get(of_match, of_match)\n test_config_condition(\n of_match not in MATCH_FIELDS, \"Unknown match field: %s\" % of_match\n )\n try:\n encoded_field = MATCH_FIELDS[of_match](field)\n except TypeError as type_error:\n raise InvalidConfigError(\n \"%s cannot be type %s\" % (of_match, type(field))\n ) from type_error\n kwargs[of_match] = encoded_field\n\n return parser.OFPMatch(**kwargs)", "def add_match(self, event):\n event = copy.deepcopy(event)\n # Convert datetime's back to timestamps\n ts = self.rules.get(\"timestamp_field\")\n if ts in event:\n event[ts] = dt_to_ts(event[ts])\n\n self.matches.append(event)", "def add_matching(self, matching: list):\n self.matching = matching", "def add_match(self,t,match):\n self.__matches[t].append(match)", "def apply_match_match(match,orig_match,apply_match):\n orig_match = dict((x,apply_match(match,y)) for x,y in orig_match.iteritems())\n orig_match.update((x,y) for x,y in match.iteritems() if x not in orig_match)\n return orig_match", "def add_match(self, match):\n self.matches.append(match)", "def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None", "def parse_match_stage(dict, field_wrapper):\n print(f'aggr_utils.parse_match_stage: dict = {dict}, wrapper = {field_wrapper}')\n match = {}\n for key in dict:\n if isinstance(dict[key], list):\n if field_wrapper:\n match['$or'] = [ {key: field_wrapper(field)} for field in dict[key]]\n else:\n match[key] = {'$in': dict[key]}\n elif isinstance(dict[key], str):\n if key == \"_id\":\n match[key] = ObjectId(dict[key])\n elif field_wrapper:\n match[key] = field_wrapper(dict[key])\n else:\n match[key] = dict[key]\n return match", "def add_match(self, match_payload):\n if self.order.status != \"open\":\n self._logger.info(\"Ignoring match payload, order %s not open anymore\", self.order.order_id)\n return\n\n other_order_id = OrderId(match_payload.trader_id, match_payload.order_number)\n if other_order_id not in self.matches:\n self.matches[other_order_id] = []\n\n # We do not want to add the match twice\n exists = False\n for match_payload in self.matches[other_order_id]:\n match_order_id = OrderId(match_payload.trader_id, match_payload.order_number)\n if match_order_id == other_order_id:\n exists = True\n break\n\n if not exists:\n self.matches[other_order_id].append(match_payload)\n\n if not self.queue.contains_order(other_order_id) and not (self.outstanding_request and self.outstanding_request[2] == other_order_id):\n self._logger.debug(\"Adding match payload with own order id %s and other id %s to queue\",\n self.order.order_id, other_order_id)\n self.queue.insert(0, match_payload.assets.price, other_order_id)\n\n if not self.schedule_task:\n # Schedule a timer\n self._logger.info(\"Scheduling batch match of order %s\" % str(self.order.order_id))\n self.schedule_task = reactor.callLater(self.community.settings.match_window, self.start_process_matches)\n elif self.schedule_task_done and not self.outstanding_request:\n # If we are currently not processing anything and the schedule task is done, process the matches\n self.process_match()", "def process_match(self, match):\n if not self.match_processed(match):\n self.elo.update(match)\n self.processed_matches.add(match['key'])", "def buildmatchfromdict(args):\n return SimDataMatch(**args)", "def accept_dict(match, include_rejected=False, include_denied=False):\n with salt.key.get_key(__opts__) as skey:\n return skey.accept(\n match_dict=match,\n include_rejected=include_rejected,\n include_denied=include_denied,\n )", "def test_dict_completion(dict_, match, extras):\n regex = re.compile(match)\n got = set(FIFF[key] for key in FIFF if regex.search(key) is not None)\n for e in extras:\n got.add(e)\n want = set(dict_)\n assert got == want, match", "def add_match(self, f, exclusions=None, **match_kwargs):\n assert not self._checked, 'can\\'t add after matchlist has been checked'\n\n if not match_kwargs: # Do nothing if no match_kwargs.\n return f\n\n self._verify_match_kwargs(match_kwargs, exclusions)\n self.matchers.append((match_kwargs, exclusions, f))\n return f", "def match(self, obj):\n raise NotImplementedError", "def add_ignored_match(self, secret: dict) -> None:\n\n matches_ignore = [\n match[\"match\"] if isinstance(match, dict) else match\n for match in self.matches_ignore\n ]\n if secret[\"match\"] not in matches_ignore:\n self.matches_ignore.append(secret)\n else:\n for match in self.matches_ignore:\n if (\n isinstance(match, dict)\n and match[\"match\"] == secret[\"match\"]\n and match[\"name\"] == \"\"\n ):\n match.update({\"name\": secret[\"name\"]})", "def __add__(aMatchList, bMatchList):\n for id in bMatchList._matches.keys():\n aMatchList.addMatch(id, bMatchList._matches[id])\n return aMatchList", "def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False):\n matcher = re.compile(pattern)\n for line in iterable:\n match = matcher.match(line)\n if not match:\n if must_match:\n raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match is set to true\" % (line, pattern))\n else:\n continue\n key = match.group(1).strip()\n try:\n value = match.group(2).strip()\n value = json.loads(value) if len(value) > 0 else None\n if add_only_keys is None or key in add_only_keys:\n dictionary[key] = value\n logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\", key, str(value))\n except ValueError as err:\n if not ignore_errors:\n raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s\" % (value, key, line, str(err)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a match using match_dict. Returns None if there is no match. Checks to make sure match_dict keys are valid.
def match(self, match_dict): for match_key in match_dict.keys(): assert match_key in self.keys_to_track for key_to_track in self.keys_to_track: if match_dict.has_key(key_to_track): match_val = match_dict[key_to_track] if self.tracker[key_to_track].has_key(match_val): return self.tracker[key_to_track][match_val] return None
[ "def _match_key(d, key, require=False):\n if not isinstance(d,dict):\n raise RuntimeError('Input object must be a dict, got %s' % d)\n keys = list( d.keys() )\n keyslow = [k.lower() for k in keys]\n keylow = key.lower()\n if keyslow.count(keylow) != 0:\n ind = keyslow.index(keylow)\n return d[keys[ind]]\n else:\n if not require:\n return None\n else:\n raise RuntimeError(\"Could not find required key: '%s'\" % key)", "def find_key(rec_dict, target, depth=0):\r\n try:\r\n if isinstance(rec_dict, dict):\r\n for key, value in rec_dict.items():\r\n if key == target:\r\n return rec_dict[key]\r\n for key, value in rec_dict.items():\r\n r = find_key(value, target, depth + 1)\r\n if r is not None:\r\n return r\r\n except:\r\n traceback.print_exc()", "def match_from_dict(match_dict):\n kwargs = {}\n for of_match, field in match_dict.items():\n of_match = OLD_MATCH_FIELDS.get(of_match, of_match)\n test_config_condition(\n of_match not in MATCH_FIELDS, \"Unknown match field: %s\" % of_match\n )\n try:\n encoded_field = MATCH_FIELDS[of_match](field)\n except TypeError as type_error:\n raise InvalidConfigError(\n \"%s cannot be type %s\" % (of_match, type(field))\n ) from type_error\n kwargs[of_match] = encoded_field\n\n return parser.OFPMatch(**kwargs)", "def _FindField(output_dict, key):\n\n if not isinstance(output_dict, dict):\n return None\n if key in output_dict and len(output_dict[key]) > 0:\n return output_dict[key]\n\n for child in output_dict.values():\n value = _FindField(child, key)\n if value is not None:\n return value\n\n return None", "def dict_match(d, key, default=None):\n\n if key in d and \"[\" not in key:\n return d[key]\n else:\n for pattern, value in iteritems(d):\n if fnmatchcase(key, pattern):\n return value\n return default", "async def get_match_from_id(match_id: int) -> Match or None:\n if match_id is None:\n return None\n\n if match_id in match_library:\n return match_library[match_id]\n\n raw_data = await matchdb.get_raw_match_data(match_id)\n if raw_data is not None:\n return await make_match_from_raw_db_data(raw_data)\n else:\n return None", "def get_match_result(self, variable):\n try:\n return self._match_result_dict[variable]\n except KeyError:\n return None", "def _find(d: dict, k: str, T: int=75)->str:\n\n if k in d:\n return(k)\n if (k == ''):\n return(None)\n ret = extractOne(k, d.keys(), score_cutoff=T)\n if ret:\n return(ret[0])\n return(None)", "def find_key_value(self, key):\n lst = self.hash_table[self.hash_gen(key)]\n if lst != None:\n return lst.find_value(key)\n return None", "def find_in_dict(obj, key):\n if key in obj:\n return obj[key]\n for k, v in obj.items():\n if isinstance(v,dict):\n item = find_in_dict(v, key)\n if item is not None:\n return item", "def find_in_listdict(listdict, search_key, val, return_key):\n search_key_exists = False\n\n for _ in listdict:\n if search_key in _:\n search_key_exists = True\n if _[search_key] == val:\n if return_key in _:\n return _[return_key]\n else:\n raise ValueError(\"return_key doesn't exist\")\n else:\n if search_key_exists:\n return None\n else:\n raise ValueError(\"search_key doesn't exist\")", "def find(sheet: config.Spreadsheet, matcher: Callable[[dict], bool]) -> Optional[Row]:\n match = find_rows(sheet, matcher, 1)\n if not match:\n return None\n return match[0]", "def _find_key(dict_in, key_regex):\n keys = list(dict_in.keys())\n found_key = None\n for k in keys:\n if re.search(key_regex, k) is not None:\n if found_key is None:\n found_key = k\n else:\n raise ValueError('Multiple keys matching \"{}\" found'.format(key_regex))\n\n if found_key is None:\n raise ValueError('No key matching \"{}\" found'.format(key_regex))\n return found_key", "def findKey(dict, value, ifNotFound = None):\n for i in dict.items():\n if i[1] == value:\n return i[0]\n return ifNotFound", "def find_value(dic, key):\n return dic[key]", "def _search_list_of_dictionaries(key, value, list_of_dictionaries):\n\n for element in list_of_dictionaries:\n if element.get(key) == value:\n return element\n return None", "def findMatcher(self, ch):\n for m in self.matchers:\n if m.match(ch):\n return m\n return None", "def match(self, arg):\n regex = self._build_regex()\n# print regex, arg\n m = re.match(regex, arg)\n if m is None:\n return None\n return m.groupdict()", "def find_by(dict_or_list, key, value, *args):\n search_params = [(key, value)]\n if args:\n search_params += [(args[i], args[i+1]) for i in range(0, len(args), 2)]\n if isinstance(dict_or_list, dict):\n dict_or_list = dict_or_list.values()\n for item in dict_or_list:\n for key, value in search_params:\n _item = item\n keys = key.split(\".\")\n for key in keys[:-1]:\n if not hasmember(_item, key):\n break\n _item = getmember(_item, key)\n key = keys[-1]\n if isinstance(value, tuple):\n if not hasmember(_item, key) or getmember(_item, key) not in value:\n break\n else:\n if not hasmember(_item, key) or getmember(_item, key) != value:\n break\n else:\n return item\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function to populate key_matcher from self.records.
def _add_matches(self): for record in self.records: match_dict={key_to_track: record.get(key_to_track) for key_to_track in self.key_matcher.keys()} self.key_matcher.add(obj=record, match_dict=match_dict)
[ "def init_record_fields(self, run_record_key, record_fields):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n # make a dataset for the sparse fields allowed. this requires\n # a 'special' datatype for variable length strings. This is\n # supported by HDF5 but not numpy.\n vlen_str_dt = h5py.special_dtype(vlen=str)\n\n # create the dataset with the strings of the fields which are records\n record_group_fields_ds = record_fields_grp.create_dataset(run_record_key,\n (len(record_fields),),\n dtype=vlen_str_dt,\n maxshape=(None,))\n\n # set the flags\n for i, record_field in enumerate(record_fields):\n record_group_fields_ds[i] = record_field", "def __init__(self, raw_sequence, str_keys):\n self.raw_sequence = raw_sequence\n self.str_counts_map = defaultdict(int)\n # Don't know how many STRs we'll get.\n for k in str_keys:\n self.str_counts_map[k]", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def init_by_keys(cls, **query):\n raise NotImplementedError()", "def initMatch(self):\n pass", "def buildFromRecords(self, records):\n probes = {}\n for record in records:\n fields = {}\n for field in record.split(self.FIELD_DELIMITER):\n index = field.find(self.KEY_VALUE_DELIMITER)\n if index == -1 or len(field) < (index+1):\n raise InvariantViloation('detected invalid probe record in app info file - {}'.format(record))\n fields.update({field[:index]:field[index+1:]})\n if fields:\n try:\n fields[self.FIELD_FILE] = self.trimWorkspace(fields[self.FIELD_FILE], self.workspace)\n probes.update({\n fields[self.FIELD_RECORDER_RETURN_SITE] : AnchoredProbe(\n fields[self.FIELD_NAME], fields[self.FIELD_FILE], fields[self.FIELD_LINE],\n fields[self.FIELD_ATTRIBUTES], fields[self.FIELD_STATUS] == self.PROBE_STATUS_ENABLED,\n fields[self.FIELD_NAME]\n )\n })\n except KeyError as error:\n raise InvariantViloation('detected record missing field {} - \\n{}\\n{}'.format(error, record, fields))\n return probes", "def _test_keys_helper(\n self,\n keyvals: List[str],\n expected_keys: Dict[str, Dict[str, kvstore_types.Value]],\n ) -> None:\n\n keyvals = [kv for kv in keyvals if kv != \"\"]\n num_expected_keyvals = 0\n for _, keyvals_of_area in expected_keys.items():\n num_expected_keyvals += len(keyvals_of_area.keys())\n\n self.assertEqual(\n len(keyvals),\n num_expected_keyvals,\n f\"Expected number of KeyVals: {num_expected_keyvals}, got {len(keyvals)}\",\n )\n\n for keyval in keyvals:\n keyval = [token for token in keyval.split(\" \") if token != \"\"]\n\n actual_key = keyval[0]\n actual_originator = keyval[1]\n actual_ver = keyval[2]\n actual_hash = keyval[3]\n # The size is formatted as [num of KB] KB\"\n # We have to account for the space when tokenizing\n actual_size = f\"{keyval[4]} {keyval[5]}\"\n actual_area = keyval[6]\n actual_ttl = keyval[7]\n actual_ttl_version = keyval[9]\n\n self.assertTrue(\n actual_area in expected_keys, f\"Unexpected area, {actual_area}\"\n )\n self.assertTrue(\n actual_key in expected_keys[actual_area],\n f\"Unexpected key, {actual_key}\",\n )\n\n expected_keyval = expected_keys[actual_area][actual_key]\n\n self.assertEqual(\n actual_ver,\n str(expected_keyval.version),\n f\"Expected Version: {expected_keyval.version}, got: {actual_ver}\",\n )\n\n self.assertEqual(\n actual_originator,\n expected_keyval.originatorId,\n f\"Expected Originator ID: {expected_keyval.originatorId}, got: {actual_originator}\",\n )\n\n expected_ttl = str(datetime.timedelta(milliseconds=expected_keyval.ttl))\n self.assertEqual(\n actual_ttl,\n expected_ttl,\n f\"Expected TTL: {expected_ttl}, got: {actual_ttl}\",\n )\n\n self.assertEqual(\n actual_ttl_version,\n str(expected_keyval.ttlVersion),\n f\"Expected TTL Version: {expected_keyval.ttlVersion}, got: {actual_ttl_version}\",\n )\n\n if expected_keyval.hash is not None:\n hash_sign = \"+\" if expected_keyval.hash > 0 else \"\"\n expected_hash = f\"{hash_sign}{expected_keyval.hash:x}\"\n\n self.assertEqual(\n actual_hash,\n expected_hash,\n f\"Expected Hash: {expected_hash} got: {actual_hash}\",\n )\n\n value_size = len(\n expected_keyval.value if expected_keyval.value is not None else b\"\"\n )\n expected_size = sprint_bytes(\n 32 + len(actual_key) + len(expected_keyval.originatorId) + value_size\n )\n self.assertEqual(\n actual_size,\n expected_size,\n f\"Expected Size: {expected_size} got: {actual_size}\",\n )", "def train_test_split(record_dictionary, ratio=.5):\n\n num_training_records = int(len(record_dictionary) * ratio)\n\n keys = list(record_dictionary.keys())\n\n training_records = np.random.choice(\n keys, num_training_records, replace=False)\n testing_records = [key for key in keys if key not in training_records]\n\n training_dictionary = {\n record: record_dictionary[record]\n for record in training_records\n }\n testing_dictionary = {\n record: record_dictionary[record]\n for record in testing_records\n }\n\n return training_dictionary, testing_dictionary", "def fill_container(self, key_val):", "def harmonize_keys(self):\n self._data.key_regex_replacements = _key_regex_replacements\n self._data.key_replacements = _key_replacements", "def _make_key(self, record_dict: Dict[str, Any]) -> int:\n return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))", "def make_digested_record(self):\n self.digested_record = {}\n for dest_key, src_key in self.field_mappings:\n value = self.ref.get(src_key)\n if value:\n self.digested_record[dest_key] = value\n\n self.normalized_authors = None\n if \"author\" in self.digested_record:\n self.digested_record[\"author\"] = self.ETAL_PAT.sub('', self.digested_record[\"author\"])\n self.normalized_authors = normalize_author_list(self.digested_record[\"author\"], initials='.' in self.digested_record[\"author\"])\n self.normalized_first_author = re.sub(r\"\\.( ?[A-Z]\\.)*\", \"\", re.sub(r\"-[A-Z]\\.\", \"\", self.normalized_authors)).split(\";\")[0].strip()\n if len(self.normalized_first_author) <= 3:\n self.digested_record.pop(\"author\")\n if \"year\" in self.digested_record and len(self.digested_record[\"year\"]) > 4:\n # the extra character(s) are at the end, just to be smart about it let's go with RE\n self.digested_record[\"year\"] = self.YEAR_PATTERN.findall(self.digested_record[\"year\"])[0]\n\n # sometimes parser identifies title where it is actually journal\n # if we have volume and page, no pub, but title, it is actually pub so switch\n if not self.digested_record.get(\"pub\", None) and self.digested_record.get(\"title\", None) and \\\n self.digested_record.get(\"volume\", None) and self.digested_record.get(\"page\", None):\n self.digested_record[\"pub\"] = self.digested_record.pop(\"title\")\n\n if self.digested_record.get(\"page\", None):\n if \"-\" in self.digested_record.get(\"page\"):\n # we are querying on page stat, for now through out the page end\n self.digested_record[\"page\"] = self.digested_record[\"page\"].split(\"-\")[0]\n qualifier, self.digested_record[\"page\"] = self.tokenize_page(self.digested_record[\"page\"])\n if qualifier is not None:\n self.digested_record[\"qualifier\"] = qualifier\n\n if \"volume\" in self.digested_record and \"pub\" in self.digested_record:\n # if volume has a alpha character at the beginning, remove it and attach it to the journal\n # ie. A. Arvanitaki, S. Dimopoulos, S. Dubovsky, N. Kaloper, and J. March-Russell, \"String Axiverse,\" \"Phys. Rev.\", vol. D81, p. 123530, 2010.\n # which is in fact Journal `Phys. Rev. D.` Volume `81`\n match = self.JOURNAL_LETTER_ATTACHED_VOLUME.match(self.digested_record[\"volume\"])\n if match:\n self.digested_record[\"pub\"] = '%s %s'%(self.digested_record[\"pub\"], self.digested_record[\"volume\"][0])\n self.digested_record[\"volume\"] = self.digested_record[\"volume\"][1:]\n\n if \"title\" in self.digested_record:\n # remove too much information\n self.digested_record[\"title\"] = self.TITLE_MAIN.split(self.digested_record[\"title\"])[0]\n\n pub = self.digested_record.get(\"pub\", None) or self.digested_record.get(\"title\", None)\n if pub:\n try:\n if len(pub) <= 2:\n if self.digested_record.get(\"pub\", None):\n self.digested_record.pop(\"pub\")\n elif self.digested_record.get(\"title\", None):\n self.digested_record.pop(\"title\")\n else:\n bibstem = get_best_bibstem_for(pub)\n # if bibstem is one of the multi-section journal,\n # sometimes user do not include the section char\n # so included a wildcard in bibstem\n if any([bib==bibstem for bib in self.BIBSTEM_WITH_SECTIONS]):\n self.digested_record[\"bibstem\"] = '%s*'%bibstem\n else:\n self.digested_record[\"bibstem\"] = bibstem\n except KeyError:\n # when bibstem can not be infered from pub, get_best_bibstem_for raises this exception\n self.digested_record[\"bibstem\"] = ''\n\n if \"arxiv\" in self.digested_record:\n # authors specify arxiv id different ways,\n # sometimes for the new format they include class name, which is wrong\n # sometimes for the old format they include class name, but out of order\n # get the correct arxiv_id\n self.digested_record[\"arxiv\"] = self.digested_record[\"arxiv\"].split(\":\")[-1]\n for aie in self.ARXIV_ID_EXTRACTOR:\n match = aie.match(self.digested_record[\"arxiv\"])\n if match:\n group_names = list(match.groupdict().keys())\n if 'new_pattern' in group_names:\n self.digested_record[\"arxiv\"] = match.group('new_pattern')\n elif 'old_pattern' in group_names:\n self.digested_record[\"arxiv\"] = \"%s/%s\"%(match.group('class_name'), match.group('old_pattern'))\n break\n\n if \"ascl\" in self.digested_record:\n # remove the ascl prefix if included\n self.digested_record[\"ascl\"] = self.digested_record[\"ascl\"].split(\":\")[-1]", "def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')", "def key_lookup_batch(self, batchiter):\n pass", "def create_matcher(self):\n self.matcher = None\n if \"matcher\" in self.config:\n self.matcher = matcher.Matcher(self.config[\"matcher\"])\n else:\n self.matcher = matcher.TrueMatcher()\n \n self.use_fields_for_id = []\n if \"matcherfield\" in self.config:\n self.use_fields_for_id = self.config[\"matcherfield\"].split(\",\")\n \n if \"clear\" in self.config:\n self.clear_matcher = matcher.Matcher(self.config[\"clear\"])\n self.autoclear = self.auto_acknowledge\n else:\n self.clear_matcher = matcher.FalseMatcher()\n self.autoclear = False", "def __init__(self, scorer, match_results, matcher):\n self.match_results = match_results\n self.matcher = matcher\n self.collection1 = scorer.collection1\n self.collection2 = scorer.collection2\n gold = scorer._gold\n actual = set(match_results.pairs)\n self._gold = gold\n self._actual = actual\n self._tp = actual & gold\n self._fp = actual - gold\n self._fn = gold - actual", "def find_records(self, check, keys=None):\n matches = self._match(check)\n if keys:\n return [self._extract_subdict(rec, keys) for rec in matches]\n else:\n return matches", "def _make_key(self, key):\n if callable(key):\n return key\n return lambda row, index: row[key]", "def _build_dicts(self):\n self._id2spkmask = dict([(id, self.unit_IDs == id) \\\n for id in self.get_unique_unit_IDs()])\n \n \n # Annoying bug where trials that don't exist in self.spike_trials\n # are called in pick_trials, and cause key error.\n # Why is this happening anyway??\n # TODO: check this bugfix\n \n # Old\n #self._tr2spkmask = dict([(tr, self.spike_trials == tr) \\\n # for tr in np.unique(self.spike_trials)])\n \n # New\n self._tr2spkmask = defaultdict(\\\n lambda: np.zeros(self.spike_trials.shape, dtype=np.bool))\n for tr in np.unique(self.spike_trials):\n self._tr2spkmask[tr] = (self.spike_trials == tr)\n \n\n #~ # Some silly hacks for missing trials, fix me!\n #~ if 2 not in self._tr2spkmask.keys():\n #~ self._tr2spkmask[2] = (self.spike_trials == 2)\n #~ if 3 not in self._tr2spkmask.keys():\n #~ self._tr2spkmask[3] = (self.spike_trials == 3)\n #~ for tr in np.arange(408, 501):\n #~ if tr not in self._tr2spkmask.keys():\n #~ self._tr2spkmask[tr] = (self.spike_trials == tr) " ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the origin_imgs are flipped correctly.
def _check_flip(origin_imgs, result_imgs): h, w, c = origin_imgs.shape for i in range(h): for j in range(w): for k in range(c): if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]: return False return True
[ "def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(n):\n if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501\n return False\n # yapf: enable\n return True", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def is_flipped_about_xy(self):\n return self.silhouette.is_flipped_about_xy", "def is_flipped(self, transformation):\n flipped = transformations.spatial.common.flipped_dimensions(transformation, self.output_size)\n is_flipped = functools.reduce(lambda a, b: a ^ b, flipped, 0)\n return is_flipped", "def test_image_inverted(self):\n star = Starshot.from_demo_image()\n top_left_corner_val_before = star.image.array[0,0]\n star._check_image_inversion()\n top_left_corner_val_after = star.image.array[0,0]\n self.assertNotEqual(top_left_corner_val_before, top_left_corner_val_after)", "def _check_same_fov(img1, img2):\n img1 = check_niimg(img1)\n img2 = check_niimg(img2)\n return (img1.shape[:3] == img2.shape[:3]\n and np.allclose(img1.get_affine(), img2.get_affine()))", "def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))", "def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def drone_has_flipped(self, current_orientation):\n has_flipped = True\n\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n\n rospy.logwarn(\"#### HAS FLIPPED? ########\")\n rospy.logwarn(\"RPY current_orientation\"+str(current_orientation))\n rospy.logwarn(\"max_roll\"+str(self.max_roll) +\n \",min_roll=\"+str(-1*self.max_roll))\n rospy.logwarn(\"max_pitch\"+str(self.max_pitch) +\n \",min_pitch=\"+str(-1*self.max_pitch))\n rospy.logwarn(\"############\")\n\n if current_orientation.x > -1*self.max_roll and current_orientation.x <= self.max_roll:\n if current_orientation.y > -1*self.max_pitch and current_orientation.y <= self.max_pitch:\n has_flipped = False\n\n return has_flipped", "def mark_image_flipped(self, balid, side):\n imgpaths_sorted = sorted(self.bal2imgs[balid], key=lambda imP: self.img2page[imP])\n imgpath = imgpaths_sorted[side]\n print \"Setting img2flip for image '{0}' FROM '{1}' TO '{2}'\".format(imgpath,\n self.img2flip[imgpath],\n not self.img2flip[imgpath])\n self.img2flip[imgpath] = not self.img2flip[imgpath]\n pickle.dump(self.img2flip, open(os.path.join(self.proj.projdir_path, self.proj.image_to_flip), 'wb'))\n if balid == self.cur_ballotid and side == self.cur_side:\n self.display_image(side, balid)", "def is_mirror(I, R):\n return numpy.array_equal(I, R)", "def test_02_image_fix_orientation(self):\n\n # Colors that can be distinguished among themselves even with jpeg loss.\n blue = (0, 0, 255)\n yellow = (255, 255, 0)\n green = (0, 255, 0)\n pink = (255, 0, 255)\n # Image large enough so jpeg loss is not a huge factor in the corners.\n size = 50\n expected = (blue, yellow, green, pink)\n\n # They are all supposed to be same image: (blue, yellow, green, pink) in\n # that order, but each encoded with a different orientation.\n self._orientation_test(1, (blue, yellow, green, pink), size, expected) # top/left\n self._orientation_test(2, (yellow, blue, pink, green), size, expected) # top/right\n self._orientation_test(3, (pink, green, yellow, blue), size, expected) # bottom/right\n self._orientation_test(4, (green, pink, blue, yellow), size, expected) # bottom/left\n self._orientation_test(5, (blue, green, yellow, pink), size, expected) # left/top\n self._orientation_test(6, (yellow, pink, blue, green), size, expected) # right/top\n self._orientation_test(7, (pink, yellow, green, blue), size, expected) # right/bottom\n self._orientation_test(8, (green, blue, pink, yellow), size, expected) # left/bottom", "def images_the_same(image1, image2):\n\n \"\"\"\n im1 = cv.imread(image1)\n im2 = cv.imread(image2)\n\n if im1.shape != im2.shape:\n return False\n\n difference = cv.subtract(im1, im2)\n b, g, r = cv.split(difference)\n\n if cv.countNonZero(b) == 0 and cv.countNonZero(g) == 0 and cv.countNonZero(r) == 0:\n return True\n return False\n \"\"\"\n im1 = cv.imread(image1)\n im2 = cv.imread(image2)\n\n if im1.shape != im2.shape:\n return False\n\n difference = cv.absdiff(im1, im2)\n d = (difference == 0).all()\n return d", "def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def _check_same_fov(*args, **kwargs):\n raise_error = kwargs.pop(\"raise_error\", False)\n for i, arg in enumerate(args):\n kwargs[f\"img_#{i}\"] = arg\n errors = []\n for (a_name, a_img), (b_name, b_img) in itertools.combinations(\n kwargs.items(), 2\n ):\n if not a_img.shape[:3] == b_img.shape[:3]:\n errors.append((a_name, b_name, \"shape\"))\n if not np.allclose(a_img.affine, b_img.affine):\n errors.append((a_name, b_name, \"affine\"))\n if len(errors) > 0 and raise_error:\n raise ValueError(\n \"Following field of view errors were detected:\\n\"\n + \"\\n\".join(\n [\n f\"- {e[0]} and {e[1]} do not have the same {e[2]}\"\n for e in errors\n ]\n )\n )\n return len(errors) == 0", "def readout_flipped(self, iamp):\n flipped = ct.c_int()\n self.lib.IsReadoutFlippedByAmplifier(ct.c_int(iamp),\n ct.pointer(flipped))\n return bool(flipped.value)", "def invertible(self):\n a = self._data\n return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]", "def _check_img_inversion(self):\n for image in [self.image_open, self.image_dmlc]:\n image.check_inversion()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs a single cycle of the sample collection. It should read the monitored file and extract all metrics.
def run_single_cycle(self, collector=None): self._timestamp = int(time.time()) # There are certain error conditions, such as the system not supporting # a particular proc file type, that we will never recover from. So, # just always early exit. if self._failed: return {} filename = self._file_pattern % self._pid if not collector: collector = {} if self._file is None: try: self._file = open(filename, "r") except IOError as e: # We take a simple approach. If we don't find the file or # don't have permissions for it, then just don't collect this # stat from now on. If the user changes the configuration file # we will try again to read the file then. self._failed = True if e.errno == errno.EACCES: self._logger.error( "The agent does not have permission to read %s. " "Maybe you should run it as root.", filename, ) elif e.errno == errno.ENOENT: self._logger.error( ( "The agent cannot read %s. Your system may not support that proc file " 'type or the process with pid "%s" doesn\'t exist' ), filename, self._pid, ) # Ignore 'process not found' errors (likely caused because the process exited # but re-raise the exception for all other errors elif e.errno != errno.ESRCH: raise e if self._file is not None: try: self._file.seek(0) return self.gather_sample(self._file, collector=collector) except IOError as e: # log the error if the errno isn't 'process not found'. Process not found likely means the # process exited, so we ignore that because it's within the realm of expected behaviour if e.errno != errno.ESRCH: self._logger.error( "Error gathering sample for file: '%s'\n\t%s" % (filename, six.text_type(e)) ) # close the file. This will cause the file to be reopened next call to run_single_cycle self.close() return collector
[ "def run(self):\r\n self.collect_data()", "def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)", "def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line", "def run(self):\n # noinspection PyBroadException\n try:\n while not self._is_stopped():\n # noinspection PyBroadException\n try:\n self.gather_sample()\n except Exception:\n self._logger.exception('Failed to gather sample due to the following exception')\n self.increment_counter(errors=1)\n\n self._sleep_but_awaken_if_stopped(self._sample_interval_secs)\n self._logger.info('Monitor has finished')\n except Exception:\n # TODO: Maybe remove this catch here and let the higher layer catch it. However, we do not\n # right now join on the monitor threads, so no one would catch it. We should change that.\n self._logger.exception('Monitor died from due to exception:', error_code='failedMonitor')", "def __call__(self, subserver, stats):\n iteration = 0\n while True:\n iteration += 1\n subserver.logger.info(\n '%s sample daemon log line %s' % (self.name, iteration))\n stats.set('last_run', time())\n stats.set('iterations', iteration)\n sleep(self.interval)", "def run(self):\n for filename in self.files:\n events = self._load_events_from_disk(filename)\n channel_manager = self.channel_manager_class(reporter=self.reporter)\n\n for event in events:\n if (\n '*' in channel_manager.INTERESTING_EVENTS or\n event['Event'] in channel_manager.INTERESTING_EVENTS\n ):\n channel_manager.on_event(event)\n\n self.channel_managers.append(channel_manager)\n self.reporter.close()", "def run(self):\n for target in self.config['targets']:\n mm = MetricManager(target['name'])\n\n hostname = socket.gethostname()\n myhb = UniqueMetric.create_heartbeat(hostname, self.config['templates'], self.config['hostgroups'])\n\n print 'Writing heartbeat to %s' % (target['name'])\n mm.write_metrics(myhb)", "def collectSamples(self):\n# if self.sample_iter > self.params[\"N_samples\"]:\n# log.error(\"ERROR: Attempted to collect more than presribed number of samples!\")\n# return\n \n self.__collect(\"postburnin\")", "def run_experiment(self):\n self._print_experiment_start()\n self.get_data()\n for epoch in xrange(self.epochs):\n self.run_epoch(epoch)", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def collector_runner(self, config):\n \n # Get the class for the collector because this is what is used to \n # load it. \n fqcn = config.get(\"class\")\n MyCollector = self.get_collector_class(fqcn)\n \n #we pass the config dict to the collector so as to \n #make it available for use in the template\n collector = MyCollector(config=config)\n \n x = queue.Queue()\n \n while 1:\n #Collector is running. Gather data from this thread\n \n json_str = collector.json\n self.logger.debug(json_str)\n \n # We turn the data into a dict via json.loads for two reasons\n # 1. to ensure that the json string is valid\n # 2. posting the data requires it to be in dict format\n metric_data = json.loads(json_str)\n \n \n #We automatically add the timestamp\n metric_data[\"DeviceTimeStamp\"] = int(datetime.utcnow().timestamp())\n \n self.metric_queue.put(metric_data)\n \n pprint(metric_data)\n #get the update interval or use a default of 600 seconds\n update_interval = config.get(\"update\", 600)\n sleep(int(update_interval))", "def run():\n try:\n with open(CONFIG_PATH, 'r') as f:\n last_ts = json.loads(f.read())['last_ts']\n except FileNotFoundError:\n sys.exit('Config file not found, please provide one')\n\n while 1:\n last_ts, data = get_current_feed(last_ts)\n print('{} new entries retrieved from USGS'.format(len(data)))\n with open(CONFIG_PATH, 'w') as f:\n f.write(json.dumps({'last_ts': last_ts}))\n adjust_structure(data)\n data.sort(key=lambda x: x['time'])\n count_foreshocks(stub, data)\n # Add new data to CosmosDB\n for item in data:\n stub.create(item['ids'], item)\n print('Sleep for {} seconds'.format(QUERY_INTERVAL))\n time.sleep(QUERY_INTERVAL)", "def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH,\n '--logtostderr',\n '--input_file=sample.x',\n '--options_file=options.pbtxt',\n ]\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.pbtxt', smp.options.to_pbtxt())\n args_filename = 'args.txt'\n _write_to_file(\n run_dir, args_filename, sample.args_batch_to_text(smp.args_batch)\n )\n args.append('--args_file=args.txt')\n ir_channel_names_filename = None\n if smp.ir_channel_names is not None:\n ir_channel_names_filename = 'ir_channel_names.txt'\n _write_to_file(run_dir, ir_channel_names_filename,\n sample.ir_channel_names_to_text(smp.ir_channel_names))\n args.append('--ir_channel_names_file=ir_channel_names.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files(\n 'sample.x', 'options.pbtxt', args_filename, ir_channel_names_filename\n )\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)", "def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.json', smp.options.to_json())\n if smp.args_batch:\n _write_to_file(run_dir, 'args.txt',\n sample.args_batch_to_text(smp.args_batch))\n\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH, '--logtostderr', '--input_file=sample.x',\n '--options_file=options.json'\n ]\n if smp.args_batch:\n args.append('--args_file=args.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files('sample.x', 'options.json', 'args.txt')\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)", "def run(self):\n\n logme(\"INFO: Processing sample %d\" % self._sample.id)\n (status, msg) = commands.getstatusoutput(\"cd %s && visDQMIndex streampb --sample %d %s\" % \\\n (WORKDIR, self._sample.id, INDEX))\n if status == 0:\n if self.moveSampleInPlace():\n if self.writeSampleInfo():\n self._sample.done = True\n logme(\"INFO: Updating cache for sample %d to done\", self._sample.id)\n self._sm.updateCache()\n else:\n logme(\"ERROR: Failing to write info file for sample %d [%s]\",\\\n self._sample.id, self._sample.streamFile)\n if os.path.exists('%s/%s' %\\\n (EXPORTDIR, self._sample.streamFile)):\n os.remove('%s/%s' %\\\n (EXPORTDIR, self._sample.streamFile))\n else:\n logme(\"ERROR: Failing to write .(dat|pb) file for sample %d [%s]\",\\\n self._sample.id, self._sample.streamFile)\n else:\n logme(\"ERROR: Failing to stream sample %d [%s]\\n%s\", \\\n self._sample.id, self._sample.streamFile, msg)", "def run(self):\n\n # How to retrieve your input data.\n input_1_data = self.in_data['input_1']\n\n # How to retrieve your params value.\n param_1 = self.param['param_1']\n\n # How to process data.\n # Just write any number of methods you want and use them here.\n sample_out_data = self.sample_method(input_1_data, param_1)\n\n # Go to the definition of this method to see how to log.\n self.demo_log()\n\n # This is how to set output data.\n self.out_data['output_1'] = sample_out_data", "def startMeasuring(self):\n\n if (not self._shouldMeasure):\n \n self._shouldMeasure = True\n self._lastUpdate = time.time()\n\n t = threading.Thread(target=self._measureCycle)\n t.start()", "def run(self):\n while True and self.should_run:\n mtime = os.stat(self.filename).st_mtime\n if mtime > self.lastmtime:\n logging.info('[*] Target file was modified and it has to be read again...')\n self.lastmtime = mtime\n self.readTargets()\n time.sleep(1.0)", "def run(self):\n\n self.started = datetime.now()\n\n logger.info('Load Data Script (index citation)')\n\n self.controller.load_mapping()\n\n logger.info('Get all ids from articlemeta')\n\n if self.args.full:\n logger.info('You have selected full processing... this will take a while')\n\n if self.args.rebuild_index:\n logger.info('This will remove EVERYTHING from your search index')\n self.controller.index_reset()\n\n self._bulk()\n\n else:\n logger.info('You have selected incremental processing... It will include, update and remove records according to ArticleMeta history change API.')\n\n self._bulk_incremental()\n\n self.finished = datetime.now()\n\n logger.info(\"Total processing time: %s sec.\" % self._duration())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of centiseconds (1/100ths secs) for the given number of jiffies (a weird timing unit used the kernel).
def __calculate_time_cs(self, jiffies): return int((jiffies * 100.0) / self._jiffies_per_sec)
[ "def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3", "def ticks_us():\n\ttry:\n\t\t# pylint: disable=no-member\n\t\treturn time.ticks_us()\n\texcept:\n\t\treturn time.time_ns()//1000", "def ticks_us() -> int:\n return int()", "def calc_time_diff_50MHz(start, stop):\n return (stop-start)*2e-8", "def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),\n _load_time=time.time()):\n try:\n f=open(_proc_pid_stat,'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except:\n return int(100*(time.time()-_load_time))", "def ticks_ms() -> int:\n return int()", "def __float__(self):\n return self.sec + self.nsec / 10.0e9", "def clocktime_to_millisecond(value):\n if _gst_available():\n return value // Gst.MSECOND", "def get_period_ns(self):\n self.microblaze.write_blocking_command(MEASURE_PERIOD)\n count = self.microblaze.read_mailbox(0)\n return count * self.clk_period_ns", "def elapsed_micros(start: int, /) -> int:", "def interval_seconds():\n return int(interval_to_milliseconds(interval())/1000)", "def seconds_in_nano(seconds):\n return seconds * (10 ** 9)", "def get_total_cpu_clock_cycles():\n try:\n with open(LINUX_STAT_LOCATION, 'r') as f:\n cpu_entries = f.readline().split(' ')\n except IOError:\n return None\n\n cpu_cycles = 0\n for entry in cpu_entries:\n try:\n cpu_cycles += int(entry)\n except ValueError:\n pass\n return cpu_cycles", "def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r", "def elapsed_micros(start):\n pass", "def _get_cpu_interval(self):\n self._polling_execute_frequency = int(self._plugin_conf[u'main'][u'polling_frequency'])\n\n if 5 <= self._polling_execute_frequency < 60:\n return cpmCPUTotalMonIntervalValue # replaces cpmCPUTotal5SecRev\n elif 60 <= self._polling_execute_frequency < 300:\n return cpmCPUTotal1minRev\n elif 300 <= self._polling_execute_frequency:\n return cpmCPUTotal5minRev\n else:\n return cpmCPUTotal1minRev", "def ticks_per_second(self):\n return self._ticks_per_second" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of milliseconds for the given number of jiffies (a weird timing unit used the kernel).
def calculate_time_ms(self, jiffies): return int((jiffies * 1000.0) / self._jiffies_per_sec)
[ "def __calculate_time_cs(self, jiffies):\n\n return int((jiffies * 100.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def ticks_us():\n\ttry:\n\t\t# pylint: disable=no-member\n\t\treturn time.ticks_us()\n\texcept:\n\t\treturn time.time_ns()//1000", "def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),\n _load_time=time.time()):\n try:\n f=open(_proc_pid_stat,'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except:\n return int(100*(time.time()-_load_time))", "def millis() -> int:", "def ticks_us() -> int:\n return int()", "def millis():\n return int(round(time.time() * 1000))", "def ticks_ms() -> int:\n return int()", "def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3", "def timestamp_ms():\n dt = datetime.now()\n return int((mktime(dt.timetuple()) + dt.microsecond / 1e6) * 1000)", "def get_time_ms():\n return int(round(time.time() * 1000))", "def clocktime_to_millisecond(value):\n if _gst_available():\n return value // Gst.MSECOND", "def get_millis(seconds):\n return seconds * 10 ** 3", "def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)", "def millisecond():\n return int(round(time.time() * 1000))", "def elapsed_micros(start: int, /) -> int:", "def millisecond_to_clocktime(value):\n if _gst_available():\n return value * Gst.MSECOND", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def epoch_time_ms_now():\n ms = int(time.time() * 1000)\n return ms" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gathers the metrics from the stat file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} # The file format is just a single line of all the fields. line = stat_file.readlines()[0] # Chop off first part which is the pid and executable file. The # executable file is terminated with a paren so just search for that. line = line[(line.find(") ") + 2) :] fields = line.split() # Then the fields we want are just at fixed field positions in the # string. Just grab them. # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number # 19, but in our case it's 16 aka 19 - 3) process_uptime = self.__get_uptime_ms() - self.calculate_time_ms( int(fields[19]) ) collector.update( { Metric("app.cpu", "user"): self.__calculate_time_cs(int(fields[11])), Metric("app.cpu", "system"): self.__calculate_time_cs(int(fields[12])), Metric("app.uptime", None): process_uptime, Metric("app.nice", None): float(fields[16]), Metric("app.threads", None): int(fields[17]), Metric("app.mem.majflt", None): int(fields[9]), Metric("app.io.wait", None): int(fields[39]) if len(fields) >= 39 else 0, } ) return collector
[ "def _read_stat(self):\n stat_file = '/proc/{:d}/stat'.format(self.pid)\n with open(stat_file, 'r') as handle:\n self._stat = handle.read()", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector", "def collect(self):\n\n if str_to_bool(self.config['use_sudo']):\n if not os.access(self.config['sudo_cmd'], os.X_OK):\n self.log.error(\"Cannot find or exec %s\"\n % self.config['sudo_cmd'])\n return None\n\n command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE).communicate()[0][:-1]\n lines = p.split(\"\\n\")\n\n else:\n if not os.access(self.MOUNTSTATS, os.R_OK):\n self.log.error(\"Cannot read path %s\" % self.MOUNTSTATS)\n return None\n\n f = open(self.MOUNTSTATS)\n lines = f.readlines()\n f.close()\n\n path = None\n for line in lines:\n tokens = line.split()\n if len(tokens) == 0:\n continue\n\n if tokens[0] == 'device':\n path = tokens[4]\n\n skip = False\n if self.exclude_reg:\n skip = self.exclude_reg.match(path)\n if self.include_reg:\n skip = not self.include_reg.match(path)\n\n if skip:\n self.log.debug(\"Ignoring %s\", path)\n else:\n self.log.debug(\"Keeping %s\", path)\n\n path = path.replace('.', '_')\n path = path.replace('/', '_')\n elif skip:\n # If we are in a skip state, don't pay any attention to\n # anything that isn't the next device line\n continue\n elif tokens[0] == 'events:':\n for i in range(0, len(self.EVENTS_MAP)):\n metric_name = \"%s.events.%s\" % (path, self.EVENTS_MAP[i])\n metric_value = long(tokens[i + 1])\n self.publish_counter(metric_name, metric_value)\n elif tokens[0] == 'bytes:':\n for i in range(0, len(self.BYTES_MAP)):\n metric_name = \"%s.bytes.%s\" % (path, self.BYTES_MAP[i])\n metric_value = long(tokens[i + 1])\n self.publish_counter(metric_name, metric_value)\n elif tokens[0] == 'xprt:':\n proto = tokens[1]\n if not self.XPRT_MAP[proto]:\n self.log.error(\"Unknown protocol %s\", proto)\n continue\n\n for i in range(0, len(self.XPRT_MAP[proto])):\n metric_name = \"%s.xprt.%s.%s\" % (path, proto,\n self.XPRT_MAP[proto][i])\n metric_value = long(tokens[i + 2])\n self.publish_counter(metric_name, metric_value)\n elif tokens[0][:-1] in self.RPCS_MAP:\n rpc = tokens[0][:-1]\n ops = long(tokens[1])\n rtt = long(tokens[7])\n exe = long(tokens[8])\n\n metric_fmt = \"%s.rpc.%s.%s\"\n ops_name = metric_fmt % (path, rpc.lower(), 'ops')\n rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')\n exe_name = metric_fmt % (path, rpc.lower(), 'exe')\n\n self.publish_counter(ops_name, ops)\n self.publish_counter(rtt_name, rtt)\n self.publish_counter(exe_name, exe)", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def file_stats(filename):\n words = read_file(filename)\n print_stats(words)", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def file_stat(self, file_path):", "def get_all_metrics(path):\n files = [os.path.join(path, f) for f in os.listdir(path)]\n files = [f for f in files if os.path.isfile(f)]\n return {\n 'count': {\n 'data': get_files_count(files, ''),\n 'tmp': get_files_count(files, '.tmp'),\n 'md5': get_files_count(files, '.md5'),\n },\n 'size': get_files_size(files),\n }", "def _lane_stats(cur_name, work_dir):\n parser = PicardMetricsParser()\n metrics_files = glob.glob(os.path.join(work_dir, \"%s*metrics\" % cur_name))\n metrics = parser.extract_metrics(metrics_files)\n return metrics", "def read_metrics(self):\n raise NotImplementedError()", "def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def stat(file_path):\n\n completed_process = subprocess.run([\"gstat\", \"--format\", \"%s %x %w %n\", file_path], stdout=subprocess.PIPE)\n\n # print(completed_process.stdout)\n stats = completed_process.stdout\n return stats", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def _collect_metrics(self, parcel_id):\n results = {'parcel_id': parcel_id}\n pa = self.parcel_accessor(parcel_id)\n path = pa.find_data()\n if path:\n results['success'] = 'Y'\n else:\n results['success'] = 'N'\n path = _find_parcel_data_path(self.incomplete_path, parcel_id)\n\n if path:\n results['files'] = str(_fsutil.total_file_count(path))\n results['bytes'] = str(_fsutil.total_size_bytes(path))\n\n cfg = self.read_config()\n for name in cfg.get('metrics', {}):\n cmd = cfg['metrics'][name]['cmd']\n env = {**os.environ,\n 'PARCEL_PATH': str(path),\n 'DATASET_PATH': str(self.path)}\n try:\n out = subprocess.check_output(cmd, shell=True, env=env)\n results[name] = str(out, 'utf-8').strip()\n except Exception as e:\n results[name] = 'ERROR'\n print(f'metric {name} failed for {path}', file=sys.stderr)\n print(e, file=sys.stderr)\n\n return results", "def print_file_stats(self):\n\n # current epoch time, file number, filename, filesize, trans secs, status\n print(f\"TRANS_STATS_FILE: {time.time()} {self.batchvals['numfiles']} {self.filevals['filename']} {self.filevals['numbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def fill_file_metrics(f):\n # Trees\n trees = map(lambda i: f.Get(i.GetName()),filter(lambda x: x.GetClassName() == \"TTree\", f.GetListOfKeys()))\n # Histograms\n histos= filter(lambda obj: obj.InheritsFrom(\"TH1\"),\\\n map(lambda i: f.Get(i.GetName()),filter(lambda x: x.GetClassName().find(\"TH\") == 0, f.GetListOfKeys())))\n\n # build the trees with its metrics\n tree_metrics_dict = dict(map(lambda t: (t.GetName(),tree_metrics(t)),trees))\n histo_metrics_dict = dict(map(lambda h: (h.GetName(),histo_metrics(h)),histos))\n\n return tree_metrics_dict,histo_metrics_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gathers the metrics from the sockstat file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} for line in stat_file: # We just look for the different "inuse" lines and output their # socket type along with the count. m = re.search(r"(\w+): inuse (\d+)", line) if m is not None: collector.update( { Metric("app.net.sockets_in_use", m.group(1).lower()): int( m.group(2) ) } ) return collector
[ "def _read_stat(self):\n stat_file = '/proc/{:d}/stat'.format(self.pid)\n with open(stat_file, 'r') as handle:\n self._stat = handle.read()", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def collect(self):\n\n if str_to_bool(self.config['use_sudo']):\n if not os.access(self.config['sudo_cmd'], os.X_OK):\n self.log.error(\"Cannot find or exec %s\"\n % self.config['sudo_cmd'])\n return None\n\n command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE).communicate()[0][:-1]\n lines = p.split(\"\\n\")\n\n else:\n if not os.access(self.MOUNTSTATS, os.R_OK):\n self.log.error(\"Cannot read path %s\" % self.MOUNTSTATS)\n return None\n\n f = open(self.MOUNTSTATS)\n lines = f.readlines()\n f.close()\n\n path = None\n for line in lines:\n tokens = line.split()\n if len(tokens) == 0:\n continue\n\n if tokens[0] == 'device':\n path = tokens[4]\n\n skip = False\n if self.exclude_reg:\n skip = self.exclude_reg.match(path)\n if self.include_reg:\n skip = not self.include_reg.match(path)\n\n if skip:\n self.log.debug(\"Ignoring %s\", path)\n else:\n self.log.debug(\"Keeping %s\", path)\n\n path = path.replace('.', '_')\n path = path.replace('/', '_')\n elif skip:\n # If we are in a skip state, don't pay any attention to\n # anything that isn't the next device line\n continue\n elif tokens[0] == 'events:':\n for i in range(0, len(self.EVENTS_MAP)):\n metric_name = \"%s.events.%s\" % (path, self.EVENTS_MAP[i])\n metric_value = long(tokens[i + 1])\n self.publish_counter(metric_name, metric_value)\n elif tokens[0] == 'bytes:':\n for i in range(0, len(self.BYTES_MAP)):\n metric_name = \"%s.bytes.%s\" % (path, self.BYTES_MAP[i])\n metric_value = long(tokens[i + 1])\n self.publish_counter(metric_name, metric_value)\n elif tokens[0] == 'xprt:':\n proto = tokens[1]\n if not self.XPRT_MAP[proto]:\n self.log.error(\"Unknown protocol %s\", proto)\n continue\n\n for i in range(0, len(self.XPRT_MAP[proto])):\n metric_name = \"%s.xprt.%s.%s\" % (path, proto,\n self.XPRT_MAP[proto][i])\n metric_value = long(tokens[i + 2])\n self.publish_counter(metric_name, metric_value)\n elif tokens[0][:-1] in self.RPCS_MAP:\n rpc = tokens[0][:-1]\n ops = long(tokens[1])\n rtt = long(tokens[7])\n exe = long(tokens[8])\n\n metric_fmt = \"%s.rpc.%s.%s\"\n ops_name = metric_fmt % (path, rpc.lower(), 'ops')\n rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')\n exe_name = metric_fmt % (path, rpc.lower(), 'exe')\n\n self.publish_counter(ops_name, ops)\n self.publish_counter(rtt_name, rtt)\n self.publish_counter(exe_name, exe)", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def echofilter_to_stats(file_handle):\n #!FIXME: this doesn't work because the connection is only closed once.\n\n stats = DapStats()\n open_requests = {}\n request = {}\n\n\n for line in file_handle:\n mo = re.match('--- ((.*)->(.*)) (opened|closed) (\\d+) --', line)\n # Can ignore open events\n if mo:\n connection_details, source, dest, event, timestamp = mo.groups()\n timestamp = float(timestamp)\n if event == 'closed':\n start_timestamp = open_requests[connection_details]\n \n host, port = dest.split(':')\n stats.add_request(host, start_timestamp/1000, timestamp/1000, \n request[connection_details])\n del request[connection_details] \n\n mo = re.match('------ ((.*)->(.*)) (\\d+) ------', line)\n if mo:\n connection_details, source, dest, timestamp = mo.groups()\n timestamp = float(timestamp)\n\n if connection_details in open_requests:\n start_timestamp = open_requests[connection_details]\n \n host, port = dest.split(':')\n stats.add_request(host, start_timestamp/1000, timestamp/1000, \n request[connection_details])\n del request[connection_details]\n\n open_requests[connection_details] = timestamp\n\n continue\n\n mo = re.match('GET ', line)\n if mo:\n request[connection_details] = DapRequest.from_get(source, line)\n continue\n\n return stats", "def read_lnet_stats(f):\n ret = {'send_count': 0, 'recv_count': 0, 'send_length':0, 'recv_length': 0}\n\n pfile = os.path.normpath(f) + \"/stats\"\n with open(pfile, \"r\") as f:\n for line in f:\n chopped = line.split()\n if chopped[3]:\n ret[\"send_count\"] = int(chopped[3])\n if chopped[4]:\n ret[\"recv_count\"] = int(chopped[4])\n if chopped[7]:\n ret[\"send_length\"] = int(chopped[7])\n\t\tif chopped[8]:\n\t\t ret[\"recv_length\"] = int(chopped[8])\t\n \n\n if ret['send_count'] == 0 and ret['recv_count'] == 0 and ret['send_length'] == 0 and ret['recv_length'] == 0 :\n return None\n\n return ret", "def fetch_stats(self):\n self.frontends = []\n self.backends = []\n self.listeners = []\n\n csv = [ l for l in self._poll().strip(' #').split('\\n') if l ]\n if self.failed:\n return\n\n #read fields header to create keys\n fields = [ f for f in csv.pop(0).split(',') if f ]\n \n #add frontends and backends first\n for line in csv:\n service = HAProxyService(fields, line.split(','), self.name)\n\n if service.svname == 'FRONTEND':\n self.frontends.append(service)\n elif service.svname == 'BACKEND':\n service.listeners = []\n self.backends.append(service)\n else:\n self.listeners.append(service)\n \n #now add listener names to corresponding backends\n for listener in self.listeners:\n for backend in self.backends:\n if backend.iid == listener.iid:\n backend.listeners.append(listener.__dict__)\n\n self.stats = { 'frontends': [ s.__dict__ for s in self.frontends ],\n 'backends': [ s.__dict__ for s in self.backends ] }\n\n self.last_update = datetime.utcnow()", "def get_udp_stats(self):\n print(\"### get udp stats ###\")\n udp_stat = dict()\n output = getattr(self.warp17_obj, 'shell')(command=\"show udp statistics\", pattern=\"warp17>\").response()\n out = output.split(\"\\n\")\n out = [i.rstrip().strip() for i in out]\n for line in out:\n if len(line) > 0:\n\n if re.search(r\"Port\\s+(\\d+)\\s+UDP\\s+statistics:\", line) is not None:\n match = re.search(r\"Port\\s+(\\d+)\\s+UDP\\s+statistics:\", line)\n port = match.group(1)\n udp_stat[port] = dict()\n\n if re.search(r\"Received\\s*Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Received\\s*Packets\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['rcvd_pkts'] = match.group(1)\n\n if re.search(r\"Received\\s*Bytes\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Received\\s*Bytes\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['rcvd_bytes'] = match.group(1)\n\n if re.search(r\"Sent\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Packets\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['sent_pkts'] = match.group(1)\n\n if re.search(r\"Sent\\s+Bytes\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Bytes\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['sent_bytes'] = match.group(1)\n\n if re.search(r\"Sent\\s+Data\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Data\\s+Packets\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['sent_data_pkts'] = match.group(1)\n\n if re.search(r\"Sent\\s+Data\\s+Bytes\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Data\\s+Bytes\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['sent_data_bytes'] = match.group(1)\n\n if re.search(r\"Malloced\\s+UCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Malloced\\s+UCBs\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['free_ucbs'] = match.group(1)\n\n if re.search(r\"Freed\\s+UCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Freed\\s+UCBs\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['free_ucbs'] = match.group(1)\n\n if re.search(r\"Not\\s+found\\s+UCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Not\\s+found\\s+UCBs\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['not_found_ucbs'] = match.group(1)\n\n if re.search(r\"UCB\\s+alloc\\s+errors\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"UCB\\s+alloc\\s+errors\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['ucb_alloc_err'] = match.group(1)\n\n if re.search(r\"Invalid\\s+checksum\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Invalid\\s+checksum\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['invl_csum'] = match.group(1)\n\n if re.search(r\"Small\\s+mbuf\\s+fragment\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Small\\s+mbuf\\s+fragment\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['small_mbuf_frag'] = match.group(1)\n\n if re.search(r\"Failed\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Failed\\s+Packets\\s*:\\s*(\\d+)\", line)\n udp_stat[port]['failed_pkts'] = match.group(1)\n\n print(json.dumps(udp_stat, indent=4))\n return udp_stat", "def get_tcp_stats(self):\n print(\"### get tcp stats ###\")\n tcp_stat = dict()\n #if 'port' in kwargs:\n # self.port = kwargs.get('port')\n\n output = getattr(self.warp17_obj, 'shell')(command=\"show tcp statistics\", pattern=\"warp17>\").response()\n out = output.split(\"\\n\")\n out = [i.rstrip().strip() for i in out]\n for line in out:\n if len(line) > 0:\n if re.search(r\"Port\\s+(\\d+)\\s+TCP\\s+statistics:\", line) is not None:\n match = re.search(r\"^Port\\s+(\\d+)\\s+TCP\\s+statistics:$\", line)\n port = match.group(1)\n tcp_stat[port] = dict()\n\n if re.search(r\"Received\\s*Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Received\\s*Packets\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['rcvd_pkts'] = match.group(1)\n\n if re.search(r\"Received\\s*Bytes\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Received\\s*Bytes\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['rcvd_bytes'] = match.group(1)\n\n if re.search(r\"Sent\\s+Ctrl\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Ctrl\\s+Packets\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['sent_ctrl_pkts'] = match.group(1)\n\n if re.search(r\"Sent\\s+Ctrl\\s+Bytes\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Ctrl\\s+Bytes\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['sent_ctrl_bytes'] = match.group(1)\n\n if re.search(r\"Sent\\s+Data\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Data\\s+Packets\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['sent_data_pkts'] = match.group(1)\n\n if re.search(r\"Sent\\s+Data\\s+Bytes\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+Data\\s+Bytes\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['sent_data_bytes'] = match.group(1)\n\n if re.search(r\"Malloced\\s+TCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Malloced\\s+TCBs\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['mal_tcbs'] = match.group(1)\n\n if re.search(r\"Freed\\s+TCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Freed\\s+TCBs\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['free_tcbs'] = match.group(1)\n\n if re.search(r\"Not\\s+found\\s+TCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Not\\s+found\\s+TCBs\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['not_found_tcbs'] = match.group(1)\n\n if re.search(r\"TCB\\s+alloc\\s+errors\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"TCB\\s+alloc\\s+errors\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['tcb_alloc_err'] = match.group(1)\n\n if re.search(r\"Invalid\\s+checksum\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Invalid\\s+checksum\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['invl_csum'] = match.group(1)\n\n if re.search(r\"Small\\s+mbuf\\s+fragment\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Small\\s+mbuf\\s+fragment\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['small_mbuf_frag'] = match.group(1)\n\n if re.search(r\"TCP hdr\\s+\\w+\\s+small\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"TCP hdr\\s+\\w+\\s+small\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['tcp_hdr_too_small'] = match.group(1)\n\n if re.search(r\"Ctrl\\s+Failed\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Ctrl\\s+Failed\\s+Packets\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['ctrl_failed_pkts'] = match.group(1)\n\n if re.search(r\"DATA\\s+Failed\\s+Packets\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"DATA\\s+Failed\\s+Packets\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['data_failed_pkts'] = match.group(1)\n\n if re.search(r\"DATA\\s+Clone\\s+Failed\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"DATA\\s+Clone\\s+Failed\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['data_clone_failed'] = match.group(1)\n\n if re.search(r\"Reserved\\s+bit\\s+set\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Reserved\\s+bit\\s+set\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['resv_bit_set'] = match.group(1)\n\n if re.search(r\"Freed\\s+TCBs\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"Freed\\s+TCBs\\s*:\\s*(\\d+)\", line)\n tcp_stat[port]['freed_tcbs'] = match.group(1)\n\n print(json.dumps(tcp_stat, indent=4))\n return tcp_stat", "def _get_openvpn_stats(path=\"/var/run/openvpn/server-0.sock\"):\n try:\n logging.debug(\"Getting metrics from %s\", path)\n with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:\n sock.connect(path)\n sock.send(b\"load-stats\\n\")\n sock.setblocking(0)\n\n ready = select.select([sock], [], [], 5.0)\n if ready[0]:\n data = sock.recv(4096)\n if not data:\n logging.debug(\"No result?\")\n return 0\n data = data.decode('utf-8')\n logging.debug(\"Received %s\", data)\n data_match = re.search(r'nclients=(\\d+)', data)\n logging.debug(\"pattern match result %s\", data_match)\n if data_match:\n logging.debug(\"%s connections\", data_match.group(1))\n return int(data_match.group(1))\n except Exception as exc:\n logging.debug(\"Error gathering openvpn stats: %s\", exc)\n\n return 0", "def get_tsm_stats(self):\n print(\"### get tsm stats ###\")\n tsm_stats = dict()\n output = getattr(self.warp17_obj, 'shell')(command=\"show tsm statistics\", pattern=\"warp17>\").response()\n out = output.split(\"\\n\")\n out = [i.rstrip().strip() for i in out]\n for line in out:\n\n if len(line) > 0:\n if re.search(r\"^Port\\s+(\\d+)\\s+TSM\\s+statistics:$\", line) is not None:\n match = re.search(r\"^Port\\s+(\\d+)\\s+TSM\\s+statistics:$\", line)\n port = match.group(1)\n tsm_stats[port] = dict()\n\n if re.search(r\"^INIT\\s+:\\s+(\\d+)$\", line) is not None:\n match = re.search(r\"^INIT\\s+:\\s+(\\d+)$\", line)\n tsm_stats[port]['init'] = match.group(1)\n\n if re.search(r\"^LISTEN\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^LISTEN\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['listen'] = match.group(1)\n\n if re.search(r\"^SYN_SENT\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^SYN_SENT\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['syn_sent'] = match.group(1)\n\n if re.search(r\"^SYN_RECV\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^SYN_RECV\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['syn_rcvd'] = match.group(1)\n\n if re.search(r\"^ESTAB\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^ESTAB\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['estab'] = match.group(1)\n\n if re.search(r\"^FIN_WAIT_1\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^FIN_WAIT_1\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['fin_wait1'] = match.group(1)\n\n if re.search(r\"^FIN_WAIT_2\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^FIN_WAIT_2\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['fin_wait2'] = match.group(1)\n\n if re.search(r\"^LAST_ACK\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^LAST_ACK\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['last_ack'] = match.group(1)\n\n if re.search(r\"^CLOSING\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^CLOSING\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['closing'] = match.group(1)\n\n if re.search(r\"^TIME_WAIT\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^TIME_WAIT\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['time_wait'] = match.group(1)\n\n if re.search(r\"^CLOSE_WAIT\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^CLOSE_WAIT\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['close_wait'] = match.group(1)\n\n if re.search(r\"^CLOSED\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^CLOSED\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['closed'] = match.group(1)\n\n if re.search(r\"^SYN\\s+retrans\\s+TO\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^SYN\\s+retrans\\s+TO\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['syn_retrans_to'] = match.group(1)\n\n if re.search(r\"^SYN\\/ACK\\s+retrans\\s+TO\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^SYN\\/ACK\\s+retrans\\s+TO\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['synack_retrans_to'] = match.group(1)\n\n if re.search(r\"^Retrans\\s+TO\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^Retrans\\s+TO\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['retrans_to'] = match.group(1)\n\n if re.search(r\"^Retrans\\s+bytes\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^Retrans\\s+bytes\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['retrans_bytes'] = match.group(1)\n\n if re.search(r\"^Missing\\s+seq\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^Missing\\s+seq\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['miss_seq'] = match.group(1)\n\n if re.search(r\"^SND\\s+win\\s+full\\s*:\\s*(\\d+)$\", line) is not None:\n match = re.search(r\"^SND\\s+win\\s+full\\s*:\\s*(\\d+)$\", line)\n tsm_stats[port]['snd_win_full'] = match.group(1)\n\n print(json.dumps(tsm_stats, indent=4))\n return tsm_stats", "def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')", "def nginx_status_metrics(self):\n\n try:\n nginx_status_conn = urllib2.urlopen(self.url)\n nginx_status_data = nginx_status_conn.read()\n self.nginx_status_available = True\n except urllib2.URLError:\n print 'status err URLError: check the URL and that Nginx running.'\n sys.exit(1)\n except Exception:\n print 'status err failed to obtain nginx status metrics.'\n sys.exit(1)\n\n if self.nginx_status_available:\n # Use regexes to parse /nginx_stats.\n match1 = re.search(r'Active connections:\\s+(\\d+)', nginx_status_data)\n match2 = re.search(r'\\s*(\\d+)\\s+(\\d+)\\s+(\\d+)', nginx_status_data)\n match3 = re.search(r'Reading:\\s*(\\d+)\\s*Writing:\\s*(\\d+)\\s*'\n 'Waiting:\\s*(\\d+)', nginx_status_data)\n print 'metric active_connections int64', int(match1.group(1))\n print 'metric accepted_connections int64', int(match2.group(1))\n print 'metric handled_connections int64', int(match2.group(2))\n print 'metric number_of_requests int64', int(match2.group(3))\n print 'metric connections_reading int64', int(match3.group(1))\n print 'metric connections_writing int64', int(match3.group(2))\n print 'metric connections_waiting int64', int(match3.group(3))\n print 'status ok succeeded in obtaining nginx status metrics.'\n else:\n print 'status err failed to obtain nginx status metrics.'\n sys.exit(1)", "def readNetStat(self):\n try:\n output = os.popen('netstat -an 2>/dev/null')\n sockets = {'sockets_tcp': 0, 'sockets_udp': 0, 'sockets_unix': 0, 'sockets_icm': 0}\n tcpDetails = {'sockets_tcp_ESTABLISHED': 0, 'sockets_tcp_SYN_SENT': 0,\n 'sockets_tcp_SYN_RECV': 0, 'sockets_tcp_FIN_WAIT1': 0, 'sockets_tcp_FIN_WAIT2': 0,\n 'sockets_tcp_TIME_WAIT': 0, 'sockets_tcp_CLOSED': 0, 'sockets_tcp_CLOSE_WAIT': 0,\n 'sockets_tcp_LAST_ACK': 0, 'sockets_tcp_LISTEN': 0, 'sockets_tcp_CLOSING': 0,\n 'sockets_tcp_UNKNOWN': 0}\n line = output.readline()\n while line != '':\n arg = line.split()\n proto = arg[0]\n if proto.find('tcp') == 0:\n sockets['sockets_tcp'] += 1\n state = arg[len(arg)-1]\n key = 'sockets_tcp_'+state\n if key in tcpDetails:\n tcpDetails[key] += 1\n if proto.find('udp') == 0:\n sockets['sockets_udp'] += 1\n if proto.find('unix') == 0:\n sockets['sockets_unix'] += 1\n if proto.find('icm') == 0:\n sockets['sockets_icm'] += 1\n\n line = output.readline()\n output.close()\n\n for key in list(sockets.keys()):\n self.data[key] = sockets[key]\n for key in list(tcpDetails.keys()):\n self.data[key] = tcpDetails[key]\n except IOError as ex:\n del ex\n self.logger.log(Logger.ERROR, \"ProcInfo: cannot get output from netstat command\")\n return", "def sstat(self):\n coh = self.cohorts[0]\n nsample = count_lines(wtccc2_sample_file(coh, opts.platform)) - 2 \n nfac = count_lines(opts.factor_file)\n if nsample != nfac:\n raise Exception('Number of individuals in sample file (%d) does not match number if factor file (%d)' % (\n (nsample, nfac)))\n for chrom in opts.chroms:\n system('gunzip -c %s | sstat -n %d -p -f %s > %s-%02d.sstat' % (\n gen_gz_file(coh, chrom, opts.platform), nsample, opts.factor_file, coh, chrom),\n verbose=True)", "def internal_stats(self):\n lastcount = 0\n lasthit = 0\n while True:\n eventlet.sleep(self.int_stats_interval)\n self.send_event(\"statsdlog.lines:%s|c\" %\n (self.counter - lastcount))\n lastcount = self.counter\n self.send_event(\"statsdlog.hits:%s|c\" % (self.hits - lasthit))\n lasthit = self.hits", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def get_http_stats(self):\n print(\"### get http stats ###\")\n http_stats = dict()\n output = getattr(self.warp17_obj, 'shell')(command=\"show http statistics\", pattern=\"warp17>\").response()\n out = output.split(\"\\n\")\n out = [i.rstrip().strip() for i in out]\n for line in out:\n if len(line) > 0:\n if re.search(r\"Port\\s+(\\d+)\\s+HTTP\\s+statistics:\", line) is not None:\n match = re.search(r\"Port\\s+(\\d+)\\s+HTTP\\s+statistics:\", line)\n port = match.group(1)\n http_stats[port] = dict()\n print(port)\n if re.search(r\"HTTP\\s+Req\\s+Build\\s+Err\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"HTTP\\s+Req\\s+Build\\s+Err\\s*:\\s*(\\d+)\", line)\n http_stats[port]['req_err'] = match.group(1)\n\n if re.search(r\"HTTP\\s+Resp\\s+Build\\s+Err\\s*:\\s*(\\d+)\", line) is not None:\n match = re.search(r\"HTTP\\s+Resp\\s+Build\\s+Err\\s*:\\s*(\\d+)\", line)\n http_stats[port]['resp_err'] = match.group(1)\n\n print(json.dumps(http_stats, indent=4))\n return http_stats", "def parseSockPerfOutput(filename, result, percentile, finalResult, params):\n with open(filename, 'r') as file:\n for line in file:\n if line.startswith('sockperf:'): # All output from sockperf starts with \"sockperf:\"\n params['TestTool'] = 'SockPerf'\n if line.startswith('sockperf: ---> '): \n if line.startswith('sockperf: ---> percentile'):\n percentiles = line.split(' ')\n percentile[percentiles[3]] = percentiles[-1].strip() \n else:\n if line.startswith('sockperf: ---> <MAX>'):\n percentile['Max_Latency_usec'] = line.split(' ')[-1].strip()\n else:\n percentile['Min_Latency_usec'] = line.split(' ')[-1].strip()\n if 'observations' in line:\n result['Observations'] = line.split(' ')[2]\n else:\n if line.startswith('sockperf: [Valid Duration] '): ## RunTime, SentMessages and ReceivedMessages\n removeEquals = line.split('=')\n removeSemiColon = []\n for item in removeEquals:\n removeSemiColon += item.split(';')\n result['RunTime'] = removeSemiColon[1].strip()[:-4]\n result['SentMessages'] = removeSemiColon[3].strip()\n result['ReceivedMessages'] = removeSemiColon[5].strip()\n \n if 'avg-lat' in line:\n ## avg-lat results slightly change the output is there results are >99 usecs\n ## This is due to sockperf format reserving 7 positions for the output\n ## as follows: avg-lat=%7.3lf\n ## Source: https://github.com/Mellanox/sockperf/blob/31a0b54b26e4619b79d1296ad608e03075e9e255/src/client.cpp\n if len(line.split('= ')) > 1: ## If avg-lat =<99 usecs there'll be space after '= '\n result['Avg_Latency_usec'] = line.split('= ')[1].split(' (')[0]\n else: ## If avg-lat are >99 usecs there'll be no space after '='\n result['Avg_Latency_usec'] = line.split('=')[5].split(' ')[0]\n # Common fields\n result['std_Deviation'] = line.split('std-dev=')[1].split(')')[0]\n result['isFullRTT'] = False\n\n # avg-rtt is used instead of avg-lat when selecting full rtt results\n if 'avg-rtt' in line:\n ## avg-rtt results slightly change the output is there results are >99 usecs\n ## This is due to sockperf format reserving 7 positions for the output\n ## as follows: avg-lat=%7.3lf\n ## Source: https://github.com/Mellanox/sockperf/blob/31a0b54b26e4619b79d1296ad608e03075e9e255/src/client.cpp\n if len(line.split('= ')) > 1: ## If avg-rtt =<99 usecs there'll be space after '= '\n result['Avg_Latency_usec'] = line.split('= ')[1].split(' (')[0]\n else: ## If avg-rtt are >99 usecs there'll be no space after '='\n result['Avg_Latency_usec'] = line.split('=')[5].split(' ')[0]\n # Common fields\n result['std_Deviation'] = line.split('std-dev=')[1].split(')')[0]\n result['isFullRTT'] = True\n\n else:\n if (debug): print('Unrecognized input: {0}\\n'.format(line))\n\n if (debug): print('SockPerf results: {0}\\n'.format(finalResult))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects the metrics from the gathers
def collect(self): collector = {} for gather in self.gathers: try: stats = gather.run_single_cycle(collector=collector) if stats: collector.update(stats) except Exception as ex: self._logger.exception( "Exception while collecting metrics for PID: %s of type: %s. Details: %s", self.pid, type(gather), repr(ex), ) return collector
[ "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def collect(self):\n try:\n job_stats = self.get_job_stats()\n except Exception:\n self.log.exception(\"error occured fetching job hash\")\n return\n for state, user_counts in job_stats.iteritems():\n for user, count in user_counts.iteritems():\n metric_name = '{state}.{user}'.format(\n state=self.convert2metric(state),\n user=self.convert2metric(user)\n )\n self.publish(metric_name, count)", "def collect_and_publish_metrics(self):\n try:\n metrics = self.collect_metrics()\n self.publish_metrics(metrics)\n\n except Exception as e:\n self.log.exception('failed-to-collect-kpis', e=e)", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def _CreateStatsCollector(self):", "def collect(self):\n log.info('collecting metrics')\n\n temperature = self.sense.get_temperature()\n humidity = self.sense.get_humidity()\n pressure = self.sense.get_pressure()\n temperature_from_humidity = self.sense.get_temperature_from_humidity()\n temperature_from_pressure = self.sense.get_temperature_from_pressure()\n\n metric = Metric('rpi_sensehat', 'sensehat metric values', 'gauge')\n metric.add_sample('rpi_sensehat_temperature', value=temperature, labels={'name': 'SenseHat Temperature'})\n metric.add_sample('rpi_sensehat_temperature_from_humidity', value=temperature_from_humidity, labels={'name': 'SenseHat Temperature from humidity sensor'})\n metric.add_sample('rpi_sensehat_temperature_from_pressure', value=temperature_from_pressure, labels={'name': 'SenseHat Temperature from pressure sensor'})\n metric.add_sample('rpi_sensehat_humidity', value=humidity, labels={'name': 'SenseHat Humidity'})\n metric.add_sample('rpi_sensehat_pressure', value=pressure, labels={'name': 'SenseHat Pressure'})\n if self.orientation:\n roll = self.sense.orientation['roll']\n yaw = self.sense.orientation['yaw']\n pitch = self.sense.orientation['pitch']\n metric.add_sample('rpi_sensehat_roll', value=roll, labels={'name': 'SenseHat Roll'})\n metric.add_sample('rpi_sensehat_yaw', value=yaw, labels={'name': 'SenseHat Yaw'})\n metric.add_sample('rpi_sensehat_pitch', value=pitch, labels={'name': 'SenseHat Pitch'})\n\n yield metric", "def send_metrics(self):\n self.last_run = time.time()\n self.collect_stats()\n for metric in self.collapse_stats():\n self.send_to_speakeasy(metric)\n self.metrics = {}", "def collect(self) -> None:\n if self._callback is not None:\n self._callback()\n\n while self._metrics_to_export:\n for export_record in self._metrics_to_export.popleft():\n prometheus_metric = self._translate_to_prometheus(\n export_record\n )\n if prometheus_metric is not None:\n yield prometheus_metric", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def collect():\n date_format = \"%Y-%m-%dT%H:%M:%S\"\n root_dir = self.config['rsnap_log_home']\n metrics = {}\n for log in sorted(os.listdir(root_dir)):\n for line in reversed(open(os.path.join(root_dir, log))\n .readlines()):\n if '.pid' in line and 'rm' in line:\n end_date = line.split()[0].strip(\"[\").strip(\"]\")\n endd = datetime.strptime(end_date, date_format)\n if endd:\n metric_value = endd\n metrics[os.path.splitext(log)[0]] = metric_value\n for metric_name, metric_value in metrics.iteritems():\n self.publish(metric_name, metric_value)", "def load_metrics(self):\n limiters.get_limiter('api').get_token()\n self.metrics = \\\n [m for m in self.driver.list_metrics(self.entity_id, self.id)]", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def handle_metrics(request: Request) -> Response:\n registry = REGISTRY\n if (\n \"prometheus_multiproc_dir\" in os.environ\n or \"PROMETHEUS_MULTIPROC_DIR\" in os.environ\n ):\n registry = CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n\n headers = {\"Content-Type\": CONTENT_TYPE_LATEST}\n return Response(generate_latest(registry), status_code=200, headers=headers)", "def handle_metrics(request: Request) -> Response:\n registry = REGISTRY\n if (\n 'prometheus_multiproc_dir' in os.environ\n or 'PROMETHEUS_MULTIPROC_DIR' in os.environ\n ):\n registry = CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n\n headers = {'Content-Type': CONTENT_TYPE_LATEST}\n return Response(generate_latest(registry), status_code=200, headers=headers)", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def compute_metrics(self, results: list) -> dict:", "def _sync_metrics(self):\n monitored_metrics = {}\n available_metrics = {}\n for counter in self._perf_manager.perfCounter:\n metric_full_name = self._format_metric_full_name(counter)\n available_metrics[metric_full_name] = counter\n for mor in self._required_metrics.keys():\n mor_metrics = {}\n for metric in self._required_metrics[mor]:\n if metric in available_metrics.keys():\n counter = available_metrics[metric]\n if counter.key not in mor_metrics.keys():\n mor_metrics[counter.key] = self._get_metric_info(counter, metric)\n monitored_metrics[mor] = mor_metrics\n with self.update_lock:\n self._monitored_metrics = monitored_metrics\n self._has_metrics.set()", "def run(self):\r\n self.collect_data()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the process of the agent.
def current_process(self): return self._current_process
[ "def getProcess(self):\n\t\treturn self.getOpArgument(0)", "def getProcess(self):\n\t\treturn self.getOpArgument(1)", "def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE )", "def process_pid(self):\n return self._process_pid", "def get_my_process():\n return get_process_object(os.getpid())", "def getProcessManager(self): \n \n return self.procmgr", "def get_initial_process(self):\n\n if not \"behavior\" in self.results or not \"processes\" in self.results[\"behavior\"] or not len(self.results[\"behavior\"][\"processes\"]):\n return None\n\n return self.results[\"behavior\"][\"processes\"][0]", "def pid(self):\n return self._process.pid", "def _get_process(self, pid):\n if pid == 0:\n return None\n elif pid in self.process_cache:\n return self.process_cache[pid]\n else:\n return psutil.Process(pid)", "def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()", "def process_id(self):\n return self._process_id", "def wait_process(self):\n return self._wait_process", "def __get_id_process(self):\n self.__idProcess += 1\n return self.__idProcess", "def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def get_pid(self):\n if self.is_stopped():\n return None\n return self._process.pid", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def process(self) -> Union['psutil.Process', None]:\n psutil = attempt_import('psutil')\n pid = self.pid\n if pid is None:\n return None\n if not '_process' in self.__dict__ or self._process.pid != int(pid):\n try:\n self._process = psutil.Process(int(pid))\n except Exception as e:\n if self.pid_path.exists():\n self.pid_path.unlink()\n return None\n return self._process", "def getCurrentSceneProcessId(self):\r\n fn = self.function_table.getCurrentSceneProcessId\r\n result = fn()\r\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a process id, return all children processes (recursively)
def get_child_processes(self, ppid): all_children = [] children_to_explore = set() for _pid in self.parent_to_children_map[ppid]: all_children.append(_pid) children_to_explore.add(_pid) # get the children 'recursively' while children_to_explore: # the invariant child_to_explore = children_to_explore.pop() if not self.parent_to_children_map.get(child_to_explore): continue unvisited = self.parent_to_children_map[child_to_explore] for node in unvisited: if node not in all_children: children_to_explore.add(node) all_children.append(node) return list(set(all_children))
[ "def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]", "def child_pids(pid):\n pid = str(pid)\n tasks = LocalPath('/proc').join(pid, 'task').listdir()\n return set(\n int(child_pid)\n for task in tasks\n for child_pid in task.join('children').read().split()\n )", "def get_children(self, recursive=False):\r\n if not self.is_running():\r\n name = self._process_name\r\n raise NoSuchProcess(self.pid, name)\r\n\r\n ret = []\r\n if not recursive:\r\n for p in process_iter():\r\n try:\r\n if p.ppid == self.pid:\r\n # if child happens to be older than its parent\r\n # (self) it means child's PID has been reused\r\n if self.create_time <= p.create_time:\r\n ret.append(p)\r\n except NoSuchProcess:\r\n pass\r\n else:\r\n # construct a dict where 'values' are all the processes\r\n # having 'key' as their parent\r\n table = defaultdict(list)\r\n for p in process_iter():\r\n try:\r\n table[p.ppid].append(p)\r\n except NoSuchProcess:\r\n pass\r\n # At this point we have a mapping table where table[self.pid]\r\n # are the current process's children.\r\n # Below, we look for all descendants recursively, similarly\r\n # to a recursive function call.\r\n checkpids = [self.pid]\r\n for pid in checkpids:\r\n for child in table[pid]:\r\n try:\r\n # if child happens to be older than its parent\r\n # (self) it means child's PID has been reused\r\n intime = self.create_time <= child.create_time\r\n except NoSuchProcess:\r\n pass\r\n else:\r\n if intime:\r\n ret.append(child)\r\n if child.pid not in checkpids:\r\n checkpids.append(child.pid)\r\n return ret", "def find_child_pids(pid):\n\n try:\n raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='])\n except RuntimeError as e:\n # Unexpected errors are the responsibility of the caller\n with excutils.save_and_reraise_exception() as ctxt:\n # Exception has already been logged by execute\n no_children_found = 'Exit code: 1' in str(e)\n if no_children_found:\n ctxt.reraise = False\n return []\n return [x.strip() for x in raw_pids.split('\\n') if x.strip()]", "def getChildrenProcesses(self, file_id):\n DBlogging.dblogger.debug(\"Entered getChildrenProcesses(): file_id: {0}\".format(file_id))\n product_id = self.getEntry('File', file_id).product_id\n\n # get all the process ids that have this product as an input\n return self.getProcessFromInputProduct(product_id)", "def children_of(self, pid, all=False):\r\n self._raise_unless_has_pid(pid)\r\n if all:\r\n all_children = set()\r\n self._calculate_children(pid, all_children)\r\n return all_children\r\n else:\r\n return copy(self._pid_to_children[pid])", "def pid_tree(pid):\n children = child_pids(pid)\n return set(\n pid\n for child in children\n for pid in pid_tree(child)\n ) | children", "def kill_process_children(pid):\n root_process_path = \"/proc/{pid}/task/{pid}/children\".format(pid=pid)\n if not os.path.isfile(root_process_path):\n return\n with open(root_process_path) as children_list_file:\n children_list_pid = children_list_file.read().split()\n\n for child_pid in children_list_pid:\n children_proc_path = \"/proc/%s/task/%s/children\" % (\n child_pid,\n child_pid,\n )\n if not os.path.isfile(children_proc_path):\n continue\n with open(children_proc_path) as children_list_file_2:\n children_list_pid_2 = children_list_file_2.read().split()\n for _pid in children_list_pid_2:\n try:\n os.kill(int(_pid), signal.SIGTERM)\n except ProcessLookupError:\n continue\n try:\n os.kill(int(child_pid), signal.SIGTERM)\n except ProcessLookupError:\n continue", "def get_ptree(process):\n if not isinstance(process, psutil.Process):\n process = psutil.Process(process)\n\n result = []\n children = process.get_children()\n if children:\n for child in children:\n if child.get_children():\n result.extend(get_ptree(child))\n result.append(child)\n else:\n result.append(child)\n return result", "def get_epistle_children(id, recursive = True):\n all_ep = []\n ep = dbsession.query(Epistle).filter(Epistle.parent == id).all()\n for e in ep:\n all_ep.append(e)\n get_epistle_children(e.id)\n return all_ep", "def GetAllPids(self, browser_process):\n\n pids = [browser_process.pid]\n try:\n children = browser_process.children(recursive=True)\n except psutil.NoSuchProcess:\n return []\n\n for child in children:\n pids.append(child.pid)\n\n return pids", "def _get_child_ids_recursively(self, directory_id):\n directory = Directory.objects.get(id=directory_id)\n children_ids = []\n for child in directory.children.filter(is_history=False).filter(is_managed=False):\n children_ids.append(child.id)\n if child.is_directory:\n children_ids.extend(self._get_child_ids_recursively(child.id))\n return children_ids", "def get_children_processes(session, parent_process, ptypes):\n\n query=\"select distinct pro.* from process pro \\\n inner join processiotracker pio on pio.processid=pro.processid \\\n inner join outputmapping om on om.trackerid=pio.trackerid \\\n inner join artifact_ancestor_map aam on om.outputartifactid=aam.artifactid\\\n inner join processiotracker pio2 on pio2.inputartifactid=aam.ancestorartifactid \\\n inner join process pro2 on pro2.processid=pio2.processid \\\n where pro2.processid={parent} and pro.typeid in ({typelist});\".format(parent=parent_process, typelist=\",\".join([str(x) for x in ptypes]))\n\n return session.query(Process).from_statement(text(query)).all()", "def getChildPIDs(self):\n\t\treturn self.pids", "def children(self):\n return [Process(self._cb, initial_data=child) for child in self.nodes[\"children\"]]", "def child_processes(self):\n\n # get a list of postmaster's children\n children = self.os_ops.get_process_children(self.pid)\n\n return [ProcessProxy(p) for p in children]", "def children_working(ppid):\n out = subprocess.getoutput(\"ps --ppid %i -o pid,state,vsize,rss,sz,start,cputime,etime\" % ppid)\n ps_lines = out.splitlines()\n ps_lines.pop(0)\n \n if len(ps_lines) > 0:\n for line in ps_lines:\n ps_str = line.split()\n pid = int(ps_str[0])\n if pid in mp_stat[\"cpid\"].keys():\n mp_stat[\"cpid\"][pid].add_ps_line(line)\n #print (\"child_stat.appended for kid: %i\" % pid )\n return True #ps returns something -> children still exist \n else:\n print (\" mpMonTools.children_working: no children exist for parent: %i\" % ppid)\n return False #ps returns nothing -> children either weren't born or died. \n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all running process ids
def get_running_processes(self): all_processes = [] for _process in self.processes: all_processes.append(_process["pid"]) return all_processes
[ "def get_all_running_processes():\n thispid = os.getpid()\n rpids = set()\n for pid in psutil.pids():\n try:\n if psutil.Process(pid).status() == 'running' or psutil.Process(pid).status() == 'disk-sleep':\n rpids.add(pid)\n except psutil.NoSuchProcess:\n continue\n return rpids", "def list_active_processes():\n return psutil.process_iter()", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]", "def get_pids():\n pids = []\n for name in os.listdir('/proc'):\n try:\n pids.append(int(name))\n except ValueError:\n pass\n return pids", "def _get_device_pids() -> list:\n name_list = config.get('device/emulator_progress_names')\n pids = set()\n for p in psutil.process_iter():\n # print(p.name())\n if p.name() in name_list:\n pids.add(p.pid)\n return list(pids)", "def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out", "def get_pids(app):\n if not app:\n return []\n pid = app['NSApplicationProcessIdentifier']\n pids = [pid]\n try:\n pids += map(int, subprocess.check_output(['pgrep', '-P %s' % pid]).split())\n except subprocess.CalledProcessError:\n pass\n return pids", "def running_processes(self):\n\n # Is it possible to have a machine that has no active connections?\n if ACTIVE_INTERNET_CONNECTIONS not in self.data:\n return set()\n return set(\n pg.split('/', 1)[1].strip()\n for pg in self.data[ACTIVE_INTERNET_CONNECTIONS]['PID/Program name']\n if '/' in pg\n )", "def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))", "def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]", "def power_bi_pid(cls):\n\n powerbi_pid = os.popen( # Looks for pid.\n 'tasklist | findstr \"PBIDesktop.exe\"'\n ).readlines()\n if not powerbi_pid:\n raise Exception(\"No PowerBI local instances.\")\n pid_list = [\n re.sub(\"\\\\s+\", \" \", f).split(\" \")[1] for f in powerbi_pid\n ] # Extract pid from string.\n return pid_list", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def GetAllPids(self, browser_process):\n\n pids = [browser_process.pid]\n try:\n children = browser_process.children(recursive=True)\n except psutil.NoSuchProcess:\n return []\n\n for child in children:\n pids.append(child.pid)\n\n return pids", "def running_processes(self, box=None, **kwargs):\n output = self.start(listpids=True, box=box, wait=True, **kwargs)\n return (int(pid) for pid in output.split())", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def list_processes(self):\n proc = subprocess.Popen([\"ps\", \"x\", \"-o\", \"pid,command\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n pid = None\n try:\n for line in proc.stdout:\n if self.__basename in line:\n flds = line.split()\n\n # convert process ID to its integer value\n try:\n pid = int(flds[0])\n except ValueError:\n logging.error(\"Bad integer PID \\\"%s\\\" in \\\"%s\\\"\",\n flds[0], line.rstrip())\n continue\n\n if len(flds) == 2:\n args = None\n else:\n args = flds[2:]\n\n yield (pid, flds[1], args)\n\n proc.stdout.close()\n finally:\n proc.wait()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Like get_matches_commandline method, given a string, match the processes on the name but also returns the matched processes' children
def get_matches_commandline_with_children(self, match_pattern): matched_pids = self.get_matches_commandline(match_pattern) for matched_pid in matched_pids: matched_pids.extend(self.get_child_processes(matched_pid)) return list(set(matched_pids))
[ "def globsearch_procs(s: str) -> List[Process]:\n pat = re.compile(fnmatch.translate(s))\n\n procs_ = procs()\n procs_out = list(filter(lambda p: re.search(pat, cmdline(p)) is not None, procs_))\n notify(msg=f\"Glob search returned {len(procs_out)} matching processes\")\n return procs_out", "def findProcesses(s):", "def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None", "def pids_by_process_name(name: str) -> Optional[List[int]]:\n try:\n return list(map(int, subprocess.check_output([\"pidof\", name]).split()))\n except subprocess.CalledProcessError:\n return None", "def test_find_processes_handles_children(self, test_system_status):\n mocked_procs = test_system_status.system_status._processes[\n test_system_status.mocked_proc_name]\n assert mocked_procs[test_system_status.parent_process] \\\n == mocked_procs[test_system_status.child_process]", "def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids", "def find(name, exact=False):\n processes = run(\"ps aux | grep {0}\".format(name))\n res = []\n for line in processes.split(\"\\n\"):\n if not line.strip():\n continue\n line = RE_SPACES.split(line, 10)\n # We skip lines that are not like we expect them (sometimes error\n # message creep up the output)\n if len(line) < 11:\n continue\n user, pid, cpu, mem, vsz, rss, tty, stat, start, time, command = line\n if (exact and command == name) \\\n or (not exact and command.startswith(name)):\n res.append(pid)\n return res", "def test_find_processes_matches_cmdline(self, test_system_status):\n num_procs_found = len(test_system_status.system_status.find_processes_by_name(\n test_system_status.mocked_proc_name\n ))\n assert num_procs_found == test_system_status.num_mocked_procs", "def _findProcessIdByName(string):\n\n listOfProcessObjects = []\n\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name', 'create_time'])\n # Check if process name contains the given name string.\n if string.lower() in pinfo['name'].lower() :\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess):\n pass\n\n return listOfProcessObjects;", "def find_child_pids(pid):\n\n try:\n raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='])\n except RuntimeError as e:\n # Unexpected errors are the responsibility of the caller\n with excutils.save_and_reraise_exception() as ctxt:\n # Exception has already been logged by execute\n no_children_found = 'Exit code: 1' in str(e)\n if no_children_found:\n ctxt.reraise = False\n return []\n return [x.strip() for x in raw_pids.split('\\n') if x.strip()]", "def get_similar_processes():\n myprocess = get_my_process()\n result = []\n for item in psutil.process_iter():\n try:\n if item.cmdline() == myprocess.cmdline():\n result.append(item)\n except psutil.NoSuchProcess:\n pass\n return result", "def names_match_process_or_parents(proc, names):\n\n if proc is None:\n return False\n elif any(name == proc.name().lower() for name in names):\n return True\n elif proc.parent() is not None and proc.pid == proc.parent().pid:\n return False\n else:\n return names_match_process_or_parents(proc.parent(), names)", "def findprocessesofacontract( contractid ):\n functionflow(\"findprocessesofacontract\")\n processes = []\n sanitized_contractid=sanitizing(contractid,\"0-9\")\n debug(\"sanitized contractid \" +sanitized_contractid)\n pgrep_output = subprocess.check_output(['pgrep', '-d,' ,'-c '+str(sanitized_contractid)])\n pgrep_output_text = pgrep_output.decode('utf-8')\n pgrep_output_text = pgrep_output_text.rstrip('\\n')\n processes = pgrep_output_text.split(\",\")\n return processes", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def child_pids(pid):\n pid = str(pid)\n tasks = LocalPath('/proc').join(pid, 'task').listdir()\n return set(\n int(child_pid)\n for task in tasks\n for child_pid in task.join('children').read().split()\n )", "def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a process, record the metrics in a historical metrics collector Collects the historical result of each metric per process in __metrics_history
def record_metrics(self, pid, metrics): for _metric, _metric_value in metrics.items(): if not self.__metrics_history[pid].get(_metric): self.__metrics_history[pid][_metric] = [] self.__metrics_history[pid][_metric].append(_metric_value) # only keep the last 2 running history for any metric self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][ -2: ]
[ "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def _get_cpu_util_process_history(self):\n return self.__cpu_util_process_history", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def plot_history(his, metrics):\n for metric in metrics:\n plt.plot(his.history[metric], label=metric)\n plt.legend()", "def addMonitoring(process):\n import FWCore.ParameterSet.Config as cms\n \n process.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\",\n jobReportOutputOnly = cms.untracked.bool(True)\n )\n process.Timing = cms.Service(\"Timing\",\n summaryOnly = cms.untracked.bool(True)\n )\n \n return process", "def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0", "def get_histories(self, process_instance_id):\n result = self.client.service.getHistories(self.user, self.password, self.company_id, self.user_id,\n process_instance_id)\n return result", "def _process_history(self, game_id, winner):\n for hist in self.histories[game_id]:\n v = 1 if winner == hist[1] else -1\n self.exp_queue.put((hist[0], hist[2], v))", "def update(self, history):\n for metric_name, metric_value in history.items():\n if metric_name not in self.history.keys():\n self.history[metric_name] = [metric_value]\n else:\n self.history[metric_name].append(metric_value)", "def on_step_end(self, step, logs):\n self.metrics[logs['episode']].append(logs['metrics'])", "def step_changedProcesses(self):\n self.cmd.result.output = \"\\n\".join(\n (\n \" PID RSS TIME COMMAND\",\n \"124 1 00:00:00 someJob a b c\",\n \"456 1 00:00:00 someOtherJob 1 2 3\",\n )\n )\n\n results = ParsedResults()\n ps().processResults(self.cmd, results)\n\n self.assertEqual(len(results.values), 3)\n\n cpu, cpuValue = _getDatapoint(results.values, \"cpu\")\n self.assertIsNotNone(cpu)\n self.assertEqual(cpuValue, 0)\n\n mem, memValue = _getDatapoint(results.values, \"mem\")\n self.assertIsNotNone(mem)\n self.assertEqual(memValue, 2048)\n\n count, countValue = _getDatapoint(results.values, \"count\")\n self.assertIsNotNone(count)\n self.assertEqual(countValue, 2)\n\n self.assertEqual(len(results.events), 1)\n\n event = results.events[0]\n\n self.assertEqual(event.get(\"severity\"), 3)\n\n message = event.get(\"message\", \"\")\n\n begin = message.find(\"[\") + 1\n end = message.find(\"]\", begin)\n discardedPids = [p.strip() for p in message[begin:end].split(\",\")]\n\n begin = message.find(\"[\", end + 1) + 1\n end = message.find(\"]\", begin)\n newPids = [p.strip() for p in message[begin:end].split(\",\")]\n\n self.assertSetEqual(set(discardedPids), set([\"345\", \"123\", \"234\"]))\n self.assertSetEqual(set(newPids), set([\"124\", \"456\"]))", "def handle_metrics(request: Request) -> Response:\n registry = REGISTRY\n if (\n \"prometheus_multiproc_dir\" in os.environ\n or \"PROMETHEUS_MULTIPROC_DIR\" in os.environ\n ):\n registry = CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n\n headers = {\"Content-Type\": CONTENT_TYPE_LATEST}\n return Response(generate_latest(registry), status_code=200, headers=headers)", "def handle_metrics(request: Request) -> Response:\n registry = REGISTRY\n if (\n 'prometheus_multiproc_dir' in os.environ\n or 'PROMETHEUS_MULTIPROC_DIR' in os.environ\n ):\n registry = CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n\n headers = {'Content-Type': CONTENT_TYPE_LATEST}\n return Response(generate_latest(registry), status_code=200, headers=headers)", "def register_process_statistics():\n if resource is None:\n log.warning(\n 'Unable to import resource module, memory diags not available'\n )\n return\n\n rusage_fields = [\n ('Execution time in user mode (seconds)', 'ru_utime'),\n ('Execution time in kernel mode (seconds)', 'ru_stime'),\n ('Maximum Resident Set Size (KB)', 'ru_maxrss'),\n ('Soft page faults', 'ru_minflt'),\n ('Hard page faults', 'ru_majflt'),\n ('Input events', 'ru_inblock'),\n ('Output events', 'ru_oublock'),\n ('Voluntary context switches', 'ru_nvcsw'),\n ('Involuntary context switches', 'ru_nivcsw'),\n ]\n\n def dump(log):\n process = resource.getrusage(resource.RUSAGE_SELF)\n for name, field in rusage_fields:\n data = getattr(process, field, 'None')\n log.info('%s: %s', name, data)\n\n register_diags('Process Statistics', dump)", "def emit_processes(self, event):\n cache = dict()\n\n for data in self.supervisor.rpc.getAllProcessInfo():\n pid = data.pop('pid')\n cache[pid] = self._get_process(pid)\n self.log.debug(\"Emitting signal for process {0}({1})\".format(\n data['name'], pid))\n supermann.signals.process.send(self, process=cache[pid], data=data)\n\n self.process_cache = cache", "def _process_stats(pid, service, monit_name, private_ip):\n # Get information about processes hierarchy (the process and its children)\n process = psutil.Process(pid)\n children_info = [child.as_dict(PROCESS_ATTRS)\n for child in process.children()]\n process_info = process.as_dict(PROCESS_ATTRS)\n\n # CPU usage\n raw_cpu = process_info['cpu_times']\n cpu = ProcessCPU(user=raw_cpu.user, system=raw_cpu.system,\n percent=process_info['cpu_percent'])\n children_cpu = ProcessCPU(user=raw_cpu.children_user,\n system=raw_cpu.children_system,\n percent=sum(child['cpu_percent']\n for child in children_info))\n\n # Memory usage\n raw_mem = process_info['memory_full_info']\n memory = ProcessMemory(resident=raw_mem.rss, virtual=raw_mem.vms,\n unique=raw_mem.uss)\n children_raw_mem = [child['memory_full_info'] for child in children_info]\n children_memory = ProcessMemory(\n resident=sum(m.rss for m in children_raw_mem),\n virtual=sum(m.vms for m in children_raw_mem),\n unique=sum(m.uss for m in children_raw_mem)\n )\n\n # Summarized values of DiskIO usage\n raw_disk = process_info['io_counters']\n disk_io = ProcessDiskIO(read_count=raw_disk.read_count,\n write_count=raw_disk.write_count,\n read_bytes=raw_disk.read_bytes,\n write_bytes=raw_disk.write_bytes)\n children_raw_disk = [child['io_counters'] for child in children_info]\n children_disk_io = ProcessDiskIO(\n read_count=sum(d.read_count for d in children_raw_disk),\n write_count=sum(d.write_count for d in children_raw_disk),\n read_bytes=sum(d.read_bytes for d in children_raw_disk),\n write_bytes=sum(d.write_bytes for d in children_raw_disk)\n )\n\n # Summarized values of Network usage\n network = ProcessNetwork(connections_num=len(process_info['connections']))\n children_network = ProcessNetwork(\n connections_num=sum(len(child['connections']) for child in children_info)\n )\n\n # Summarized values about Threading\n threads_num = len(process_info['threads'])\n children_threads_num = sum(len(child['threads']) for child in children_info)\n\n children_sum = ProcessChildrenSum(\n cpu=children_cpu, memory=children_memory, disk_io=children_disk_io,\n network=children_network, threads_num=children_threads_num\n )\n\n return ProcessStats(\n pid=pid, monit_name=monit_name, unified_service_name=service.name,\n application_id=service.get_application_id_by_monit_name(monit_name),\n port=service.get_port_by_monit_name(monit_name), private_ip=private_ip,\n cmdline=process_info['cmdline'], cpu=cpu, memory=memory, disk_io=disk_io,\n network=network, threads_num=threads_num, children_stats_sum=children_sum,\n children_num=len(children_info)\n )", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def Query(self, program_name):\n if not self._running_metrics:\n raise NotRunning(\"No metrics are running.\")\n\n # Get the bare executable name.\n tail = os.path.split(program_name)[1]\n root = os.path.splitext(tail)[0]\n\n _LOGGER.info(\"Querying performance counters for '%s': %s.\",\n root, self._running_metrics)\n lines = self._Run(_PTT, [\"dump\", \"-pl\", root])\n\n values = {}\n metrics = None\n\n for line in lines:\n line = line.strip()\n\n # Keep an eye out for the line containing the metric names. If\n # the header pattern is matched, then we are guaranteed to have at\n # least 5 items after the split.\n if not metrics:\n if re.match(self._DUMP_HEADER, line):\n columns = re.split(\"\\s+\", line)\n metrics = columns[4:]\n\n if set(metrics) != set(self._running_metrics):\n raise UnexpectedOutput(\"Reported metrics do not match running \"\n \"metrics: %s.\" % metrics)\n\n for metric in metrics:\n values[metric] = {}\n\n continue\n\n # Is this a PID without data? Then store zero values for the metrics.\n match = re.match(self._NO_DATA, line)\n if match:\n pid = int(match.group(1))\n for metric in metrics:\n values[metric][pid] = 0\n\n continue\n\n # Is this a PID/TID/Disp/Intr/Metrics line? Then tally the\n # running sum for the PID. We manually summarize because\n # summary lines are only produced if there is more than one\n # thread for a PID.\n if re.match(self._DUMP_DATA, line):\n data = re.split(\"\\s+\", line)\n if len(data) == len(metrics) + 4:\n pid = int(data[0])\n for i, metric in enumerate(metrics):\n count = int(data[4+i])\n values[metric][pid] = values[metric].get(pid, 0) + count\n\n if not metrics:\n raise UnexpectedOutput(\"No results seen for metrics: %s.\" %\n self._running_metrics)\n\n return values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
At the beginning of each process metric calculation, the absolute (noncumulative) metrics need to be overwritten to the combined process(es) result. Only the cumulative metrics need the previous value to calculate delta. We should set the absolute metric to 0 in the beginning of this "epoch"
def _reset_absolute_metrics(self): for pid, process_metrics in self.__metrics_history.items(): for _metric, _metric_values in process_metrics.items(): if not _metric.is_cumulative: self.__aggregated_metrics[_metric] = 0
[ "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def _update(self):\n aps = []\n recall, precs = self._recall_prec()\n for ll, rec, prec in zip(range(len(precs)), recall, precs):\n ap = self._average_precision(rec, prec)\n aps.append(ap)\n if self.num is not None and ll < (self.num - 1):\n self.sum_metric[ll] = ap\n self.num_inst[ll] = 1\n if self.num is None:\n self.num_inst = 1\n self.sum_metric = np.nanmean(aps)\n else:\n self.num_inst[-1] = 1\n self.sum_metric[-1] = np.nanmean(aps)", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def getCummulativeValues(self):\n self.cumulativePhaseHeightInRing1 = np.cumsum(self.phaseHeightInRing1)\n self.cumulativePhaseHeightInRing2 = np.cumsum(self.phaseHeightInRing2)\n self.cumulativeLeftCriticalPointsRing1 = np.cumsum(self.leftCriticalPointsRing1)\n self.cumulativeRightCriticalPointsRing1 = np.cumsum(self.rightCriticalPointsRing1)\n self.cumulativeLeftCriticalPointsRing2 = np.cumsum(self.leftCriticalPointsRing2)\n self.cumulativeRightCriticalPointsRing2 = np.cumsum(self.rightCriticalPointsRing2)\n\n if(self.init1 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing1):\n self.cumulativeLeftCriticalPointsRing1[index] = value + self.init1\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing1):\n self.cumulativeRightCriticalPointsRing1[index] = value + self.init1\n\n if(self.init2 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing2):\n self.cumulativeLeftCriticalPointsRing2[index] = value + self.init2\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing2):\n self.cumulativeRightCriticalPointsRing2[index] = value + self.init2\n\n self.cumulativePhaseHeightInRing1 = np.insert(self.cumulativePhaseHeightInRing1, 0, 0.0)\n self.cumulativePhaseHeightInRing2 = np.insert(self.cumulativePhaseHeightInRing2, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing1 = np.insert(self.cumulativeLeftCriticalPointsRing1, 0, 0.0)\n self.cumulativeRightCriticalPointsRing1 = np.insert(self.cumulativeRightCriticalPointsRing1, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing2 = np.insert(self.cumulativeLeftCriticalPointsRing2, 0, 0.0)\n self.cumulativeRightCriticalPointsRing2 = np.insert(self.cumulativeRightCriticalPointsRing2, 0, 0.0)", "def update_statistics(self):\n avg_disc_loss = self.mid_epoch_stats[\"avg_disc_loss\"]\n avg_gen_loss = self.mid_epoch_stats[\"avg_gen_loss\"]\n avg_combined_loss = self.mid_epoch_stats[\"avg_combined_loss\"]\n\n true_positives = self.mid_epoch_stats[\"true_positives\"]\n false_positives = self.mid_epoch_stats[\"false_positives\"]\n true_negatives = self.mid_epoch_stats[\"true_negatives\"]\n false_negatives = self.mid_epoch_stats[\"false_negatives\"]\n\n self.generator_losses.append(avg_gen_loss[0] / avg_gen_loss[1])\n self.discriminator_losses.append(avg_disc_loss[0] / avg_disc_loss[1])\n self.combined_losses.append(avg_combined_loss[0] / avg_combined_loss[1])\n self.discriminator_accuracy.append((true_positives + true_negatives) /\n (true_positives + false_positives + true_negatives + false_negatives))\n\n self.discriminator_precision.append(0 if (true_positives + false_positives) == 0 else true_positives / (true_positives + false_positives))\n self.discriminator_recall.append(0 if (true_positives + false_negatives) == 0 else true_positives / (true_positives + false_negatives))\n\n # reset epoch-level stats\n self.mid_epoch_stats = {\n \"true_positives\": 0,\n \"false_positives\": 0,\n \"true_negatives\": 0,\n \"false_negatives\": 0,\n \"avg_disc_loss\": [0, 0],\n \"avg_gen_loss\": [0, 0],\n \"avg_combined_loss\": [0, 0]\n }", "def additional_reset_steps(self):\n # compute current objectives\n curr_optimization_metric = self.get_current_optimization_metrics()\n\n self.curr_optimization_metric = deepcopy(curr_optimization_metric)\n self.init_optimization_metric = deepcopy(curr_optimization_metric)\n self.prev_optimization_metric = deepcopy(curr_optimization_metric)", "def increment_metrics(self, results):\n self.timesteps_total += sum([result['timesteps_total'] for result in results])\n self.episodes_total += len(results)\n self.generation += 1", "def calculate_epoch_metrics(self, val_metrics=False):\n metric_names = self.tracked_metrics()\n\n for metric in metric_names:\n if val_metrics:\n mean_val = np.array(self.metrics_history[f\"val_{metric}\"][\"batch_vals\"]).mean()\n self.metrics_history[f\"val_{metric}\"][\"epoch_vals\"].append(mean_val)\n else:\n mean_val = np.array(self.metrics_history[metric][\"batch_vals\"]).mean()\n self.metrics_history[metric][\"epoch_vals\"].append(mean_val)", "def calculate_final_stats(self):\n self.run_stats['k/d'] = self.run_stats['kills'] * 1.0 / max(1, self.run_stats['deaths'])", "def reset_running_stats(self):\n super().reset_running_stats()\n self.inv_learning_rate.zero_()\n self.num_batches.zero_()", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n #would be nice to have dt^{n+1} alone\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#figure this out\n #mwf debug\n logEvent(\"HaukeSangalliTrackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n #mwf should be\n q[('mt',ci)] -= self.subgridTmp[ci]\n #don't think this matters right now because called after calculateSubgridError\n self.subgridTmp_ip[ci][:] = self.subgridError_ip_last[ci]\n self.subgridTmp_ip[ci] *= dtInv\n self.subgridTmp_ip[ci] *= self.subgridErrorMassCoef_ip_last[ci]#figure this out\n self.cip[('mt',ci)] -= self.subgridTmp_ip[ci]", "def accumulate_metrics__(metrics, cum_metrics, batch_metrics, validation_dataset=False):\n if metrics is not None:\n for metric in metrics:\n if validation_dataset:\n cum_metrics['val_%s' % metric] += batch_metrics['val_%s' % metric]\n else:\n cum_metrics[metric] += batch_metrics[metric]\n\n # check for loss separately\n if 'loss' not in metrics:\n if validation_dataset:\n cum_metrics['val_loss'] += batch_metrics['val_loss']\n else:\n cum_metrics['loss'] += batch_metrics['loss']\n return cum_metrics", "def update_running_moments(self, reward_batch):\n new_count = len(reward_batch)\n new_sum = torch.sum(reward_batch)\n new_mean = new_sum / new_count\n\n curr_mean = self.reward_sum / self.reward_count\n new_m2 = torch.sum((reward_batch - new_mean) ** 2) + (\n (self.reward_count * new_count)\n / (self.reward_count + new_count)\n * (new_mean - curr_mean) ** 2\n )\n\n self.reward_count += new_count\n self.reward_sum += new_sum\n self.reward_m2 += new_m2", "def compute_metrics(self, previous):\n delta_t = self.time_difference(previous)\n delta_x = self.distance(previous)\n vel = 0\n delta_v = 0\n acc = 0\n if delta_t != 0:\n vel = delta_x/delta_t\n delta_v = vel - previous.vel\n acc = delta_v/delta_t\n\n self.dt = delta_t\n self.dx = delta_x\n self.acc = acc\n self.vel = vel\n return self", "def _m_step(self):\n log_mu_ij = ((self.log_mu[0,:] * np.ones((self.data.num_workers, self.data.num_instance))).transpose())\n log_one_minus_mu_ij = ((self.log_mu[1,:] * np.ones((self.data.num_workers, self.data.num_instance))).transpose())\n alpha_log_denomi = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y != 0))\n alpha_log_nume_pos = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y == 1))\n alpha_log_nume_neg = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y == -1))\n beta_log_denomi = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y != 0))\n beta_log_nume_pos = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y == 1))\n beta_log_nume_neg = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y == -1))\n self.log_p = sp.special.logsumexp(self.log_mu, axis=1)\n self.log_p = self.log_p - sp.special.logsumexp(self.log_p)\n self.log_alpha = np.array([alpha_log_nume_pos - alpha_log_denomi, alpha_log_nume_neg - alpha_log_denomi])\n self.log_beta = np.array([beta_log_nume_neg - beta_log_denomi, beta_log_nume_pos - beta_log_denomi])", "def processed_cum_overall(self):\n self.processed_cum_overall = (\n self.cumulative_stats_for_team_each_year\n [['Season','TeamID','win_rate','total_score','total_opponent_score','fgp','fg3p','ftp', 'total_rebounds','total_off_rebounds','total_def_rebounds',\n 'total_off_rebounds_percent','total_def_rebounds_percent','total_rebound_possession_percent','total_rebound_possessiongain_percent','total_blocks',\n 'total_assists','total_steals','total_turnover','total_personalfoul','total_block_opp_FGA_percent','total_assist_per_fgm','total_assist_turnover_ratio',\n 'expectation_per_game','avg_lose_score_by','avg_win_score_by']]\n )", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(1,self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"NS_ASGS trackSubScales accumulating delta u^n ci=%s .abs.max= %s dm.max=%s \" % (ci,max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n\n q[('mt',ci)] -= self.subgridTmp[ci]", "def set_momentum_zero(self):\n #self.pruner.optimizer\n for wrapper in self.pruner.modules_wrapper:\n if wrapper.type=='BatchNorm2d':\n filters=wrapper.module.weight.size(0)\n elif wrapper.type=='ReLU':\n filters=wrapper.weight_mask.size(0)\n for idx in range(filters):\n if wrapper.type=='BatchNorm2d':\n if wrapper.weight_mask.data[idx]==0:continue\n if 'momentum_buffer' in self.pruner.optimizer.state[wrapper.weight_mask].keys():\n self.pruner.optimizer.state[wrapper.weight_mask]['momentum_buffer'][idx] *= 0.0\n _logger.debug('momentum set to be zero!!!')\n _logger.debug(idx)\n _logger.debug(self.pruner.optimizer.state[wrapper.weight_mask]['momentum_buffer'][idx])\n elif wrapper.type=='ReLU':\n if wrapper.module_added.weight_mask.data[idx]==0:\n continue\n if 'momentum_buffer' in self.pruner.optimizer.state[wrapper.module_added.weight_mask].keys():\n self.pruner.optimizer.state[wrapper.module_added.weight_mask]['momentum_buffer'][idx] *= 0.0\n _logger.debug('momentum set to be zero!!!')\n _logger.debug(idx)\n _logger.debug(self.pruner.optimizer.state[wrapper.module_added.weight_mask]['momentum_buffer'][idx])\n _logger.debug('momentum buffer set zero done for wrapper: ')\n _logger.debug(wrapper.name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the aggregated metric values based on the current running processes and the historical metric record
def _calculate_aggregated_metrics(self): # using the historical values, calculate the aggregate # there are two kinds of metrics: # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles) # b) absolute metrics - the last absolute value is used running_pids_set = set(self.__pids) for pid, process_metrics in self.__metrics_history.items(): for _metric, _metric_values in process_metrics.items(): if not self.__aggregated_metrics.get(_metric): self.__aggregated_metrics[_metric] = 0 if _metric.is_cumulative: if pid in running_pids_set: if len(_metric_values) > 1: # only report the cumulative metrics for more than one sample self.__aggregated_metrics[_metric] += ( _metric_values[-1] - _metric_values[-2] ) else: if pid in running_pids_set: # absolute metric - accumulate the last reported value self.__aggregated_metrics[_metric] += _metric_values[-1]
[ "def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0", "def _update(self):\n aps = []\n recall, precs = self._recall_prec()\n for ll, rec, prec in zip(range(len(precs)), recall, precs):\n ap = self._average_precision(rec, prec)\n aps.append(ap)\n if self.num is not None and ll < (self.num - 1):\n self.sum_metric[ll] = ap\n self.num_inst[ll] = 1\n if self.num is None:\n self.num_inst = 1\n self.sum_metric = np.nanmean(aps)\n else:\n self.num_inst[-1] = 1\n self.sum_metric[-1] = np.nanmean(aps)", "def compute_global_indicators(msm):", "def aggregate(global_params, running_aggregate, aggregation_result):\n running_ref = running_aggregate.get_ref('values')\n agg_ref = aggregation_result.get_ref('values')\n for i in range(global_params.dims):\n running_ref[i] += agg_ref[i]\n return running_aggregate", "def _compute_current_performance(self):\n\n jobs = self.jobs[-DEFAULT_NUMBER_LAST_RUNS:]\n return self._compute_performance(jobs)", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def calculate(self, data, *args, **kwargs):\n \n # Sets up priority queue, where data is prioritized by date\n queue = []\n \n # Sets up data dictionaries that will be used to contain calculated data\n severity_data = OrderedDict()\n status_data = OrderedDict()\n current_state = { }\n \n # List of fields used\n fields = [PROJECT, TRANS, STATUS, PRIORITY]\n \n # Populates priority queue with appropriate data\n for key, param_data in data.iteritems():\n # Grabs param_data fields\n priority = param_data.get(PRIORITY, None)\n hist = param_data.get(HIST, None)\n proj = param_data.get(PROJECT, self.project)\n \n # Adds the historical statuses of the current JIRA item to the queue\n if (hist):\n for i, date in enumerate(hist[TRANS]):\n heapq.heappush(queue, (date, proj, key, hist[NEW][i], priority))\n \n # Iterates through dates to populate status and severity data dictionaries\n if (queue):\n earliest = queue[0][0]\n for date in get_historical_dates(earliest, self.extraction_day, False):\n # Pops items off queue until queue is empty or date limit is reached\n while(queue and queue[0][0].date() <= date):\n curr, proj, key, status, priority = heapq.heappop(queue)\n \n # Maps the key's current parameters, overwriting previous mapping\n current_state[key] = { }\n for field, value in zip(fields, [proj, curr, status, priority]):\n current_state[key][field] = value\n \n # Sets severity and status metric data at the given date\n severity_data[date] = self._get_severity_data(current_state)\n status_data[date] = self._get_status_data(current_state)\n \n # Gets age data separately from status and severity\n age_map = self._get_average_age_data(data)\n \n return severity_data, status_data, age_map", "def populate_metric_values(self):\n self.new_counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]] = get_counter_metrics(\n self.counter_metric_specs, \n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n \n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_counter_metrics(self.new_counter_metrics[detailed_version.id])\n\n self.aggregated_counter_metrics = self.get_aggregated_counter_metrics()\n\n self.new_ratio_metrics: Dict[iter8id, Dict[iter8id, RatioDataPoint]] = get_ratio_metrics(\n self.ratio_metric_specs, \n self.counter_metric_specs, \n self.aggregated_counter_metrics,\n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n\n # This is in the shape of a Dict[str, RatioMaxMin], where the keys are ratio metric ids\n # and values are their max mins. \n\n self.ratio_max_mins = self.get_ratio_max_mins()\n\n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_ratio_metrics(\n self.new_ratio_metrics[detailed_version.id]\n )", "def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))", "def compute_metrics(self, results: list) -> dict:", "def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def get_current(self):\n monit_status = subprocess.check_output('monit status', shell=True)\n processes_stats = []\n for match in MONIT_PROCESS_PATTERN.finditer(monit_status):\n monit_name = match.group('name')\n pid = int(match.group('pid'))\n service = find_service_by_monit_name(monit_name)\n private_ip = appscale_info.get_private_ip()\n try:\n stats = _process_stats(pid, service, monit_name, private_ip)\n processes_stats.append(stats)\n except psutil.Error as err:\n logging.warn(u\"Unable to get process stats for {monit_name} ({err})\"\n .format(monit_name=monit_name, err=err))\n stats = ProcessesStatsSnapshot(\n utc_timestamp=time.mktime(datetime.utcnow().timetuple()),\n processes_stats=processes_stats\n )\n if time.time() - self.last_debug > LOCAL_STATS_DEBUG_INTERVAL:\n ProcessesStatsSource.last_debug = time.time()\n logging.debug(stats)\n return stats", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def _update_metrics(self):\n raise NotImplementedError", "def Query(self, program_name):\n if not self._running_metrics:\n raise NotRunning(\"No metrics are running.\")\n\n # Get the bare executable name.\n tail = os.path.split(program_name)[1]\n root = os.path.splitext(tail)[0]\n\n _LOGGER.info(\"Querying performance counters for '%s': %s.\",\n root, self._running_metrics)\n lines = self._Run(_PTT, [\"dump\", \"-pl\", root])\n\n values = {}\n metrics = None\n\n for line in lines:\n line = line.strip()\n\n # Keep an eye out for the line containing the metric names. If\n # the header pattern is matched, then we are guaranteed to have at\n # least 5 items after the split.\n if not metrics:\n if re.match(self._DUMP_HEADER, line):\n columns = re.split(\"\\s+\", line)\n metrics = columns[4:]\n\n if set(metrics) != set(self._running_metrics):\n raise UnexpectedOutput(\"Reported metrics do not match running \"\n \"metrics: %s.\" % metrics)\n\n for metric in metrics:\n values[metric] = {}\n\n continue\n\n # Is this a PID without data? Then store zero values for the metrics.\n match = re.match(self._NO_DATA, line)\n if match:\n pid = int(match.group(1))\n for metric in metrics:\n values[metric][pid] = 0\n\n continue\n\n # Is this a PID/TID/Disp/Intr/Metrics line? Then tally the\n # running sum for the PID. We manually summarize because\n # summary lines are only produced if there is more than one\n # thread for a PID.\n if re.match(self._DUMP_DATA, line):\n data = re.split(\"\\s+\", line)\n if len(data) == len(metrics) + 4:\n pid = int(data[0])\n for i, metric in enumerate(metrics):\n count = int(data[4+i])\n values[metric][pid] = values[metric].get(pid, 0) + count\n\n if not metrics:\n raise UnexpectedOutput(\"No results seen for metrics: %s.\" %\n self._running_metrics)\n\n return values", "def compute_metrics(self, timeframe: int, metric_data: List[MetricEntry]) -> None:\n raise NotImplementedError(\"Metric must implement compute_metrics functionality\")", "def compute_statistics(self):", "def _get_cpu_util_process_history(self):\n return self.__cpu_util_process_history" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect the perprocess tracker for the monitored process(es).
def gather_sample(self): for _pid in self._select_processes(): if not self.__trackers.get(_pid): self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id) self._reset_absolute_metrics() for _tracker in self.__trackers.values(): _metrics = _tracker.collect() self.record_metrics(_tracker.pid, _metrics) self._calculate_aggregated_metrics() self._remove_dead_processes() self.print_metrics()
[ "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def monitor():\n set()\n\n # Loop through all processes checking if they are making children\n while True:\n nc_count = Counter()\n nc_to_proc = {}\n\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=\n [\"pid\", \"name\", \"cmdline\", \"create_time\", \"username\"]\n )\n nc_tup = (pinfo[\"name\"], tuple(pinfo[\"cmdline\"]))\n\n nc_count[nc_tup] += 1\n\n if nc_tup not in nc_to_proc.keys():\n nc_to_proc[nc_tup] = set([proc])\n else:\n nc_to_proc[nc_tup].add(proc)\n\n except psutil.NoSuchProcess:\n pass\n\n check_if_bomb(nc_count, nc_to_proc)", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def GetPublishedProcesses():\r\n pass", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def processes(self):\n procs = sorted(self.process_data.values(), key=lambda ptd: ptd.process_start_time_sec)\n return procs", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def _get_cpu_util_process_history(self):\n return self.__cpu_util_process_history", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def _process_stats(pid, service, monit_name, private_ip):\n # Get information about processes hierarchy (the process and its children)\n process = psutil.Process(pid)\n children_info = [child.as_dict(PROCESS_ATTRS)\n for child in process.children()]\n process_info = process.as_dict(PROCESS_ATTRS)\n\n # CPU usage\n raw_cpu = process_info['cpu_times']\n cpu = ProcessCPU(user=raw_cpu.user, system=raw_cpu.system,\n percent=process_info['cpu_percent'])\n children_cpu = ProcessCPU(user=raw_cpu.children_user,\n system=raw_cpu.children_system,\n percent=sum(child['cpu_percent']\n for child in children_info))\n\n # Memory usage\n raw_mem = process_info['memory_full_info']\n memory = ProcessMemory(resident=raw_mem.rss, virtual=raw_mem.vms,\n unique=raw_mem.uss)\n children_raw_mem = [child['memory_full_info'] for child in children_info]\n children_memory = ProcessMemory(\n resident=sum(m.rss for m in children_raw_mem),\n virtual=sum(m.vms for m in children_raw_mem),\n unique=sum(m.uss for m in children_raw_mem)\n )\n\n # Summarized values of DiskIO usage\n raw_disk = process_info['io_counters']\n disk_io = ProcessDiskIO(read_count=raw_disk.read_count,\n write_count=raw_disk.write_count,\n read_bytes=raw_disk.read_bytes,\n write_bytes=raw_disk.write_bytes)\n children_raw_disk = [child['io_counters'] for child in children_info]\n children_disk_io = ProcessDiskIO(\n read_count=sum(d.read_count for d in children_raw_disk),\n write_count=sum(d.write_count for d in children_raw_disk),\n read_bytes=sum(d.read_bytes for d in children_raw_disk),\n write_bytes=sum(d.write_bytes for d in children_raw_disk)\n )\n\n # Summarized values of Network usage\n network = ProcessNetwork(connections_num=len(process_info['connections']))\n children_network = ProcessNetwork(\n connections_num=sum(len(child['connections']) for child in children_info)\n )\n\n # Summarized values about Threading\n threads_num = len(process_info['threads'])\n children_threads_num = sum(len(child['threads']) for child in children_info)\n\n children_sum = ProcessChildrenSum(\n cpu=children_cpu, memory=children_memory, disk_io=children_disk_io,\n network=children_network, threads_num=children_threads_num\n )\n\n return ProcessStats(\n pid=pid, monit_name=monit_name, unified_service_name=service.name,\n application_id=service.get_application_id_by_monit_name(monit_name),\n port=service.get_port_by_monit_name(monit_name), private_ip=private_ip,\n cmdline=process_info['cmdline'], cpu=cpu, memory=memory, disk_io=disk_io,\n network=network, threads_num=threads_num, children_stats_sum=children_sum,\n children_num=len(children_info)\n )", "def _get_processes(self):\n return self.__processes", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()", "def subprocesses(self):\n return {i: p for i, p in enumerate(self._processes)}", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def get_processes():\n yield from psutil.process_iter()", "def list_active_processes():\n return psutil.process_iter()", "def monitorAll(self):\n\n websites = self.user.mySites.values()\n\n # subprocesses to get the requests logs\n self.processes = [Process(target=self.monitorOne, args=(website,)) for website in websites]\n\n for process in self.processes:\n process.daemon = True\n\n for process in self.processes:\n process.start()\n\n for process in self.processes:\n process.join()\n\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the PID of the process that was marked as $$TBD.
def set_pid(self, pid): # type: (int) -> None for i in range(len(self.__target_pids)): if self.__target_pids[i] == "$$TBD": self.__target_pids[i] = pid break
[ "def pid(self, pid):\n\n self._pid = pid", "def test_set_ultimate_pid(self):\n print(\"Testing setting ultimate PID\")\n pass", "def set_pid(self,san,key,val='',test=0):\n if val == str(self.pid):\n return (0,'')\n if self.state <> ObjState.created:\n return (1,'Cannot change pid for running targets')\n if self.device:\n max_pid = self.device.tgtprocs - 1\n else:\n max_pid = 0\n return self.san_interface.robots.set_int(self,key,val,test,0,max_pid)", "def tpid(self, tpid: str):\n\n self._tpid = tpid", "def set_pid(self, pid):\n if type(pid) is not int:\n raise TypeError(\"Expected value was integer, value given is: \", type(pid))\n self.container_pid = pid", "def setPid(self, *args):\n return _yarp.IPidControl_setPid(self, *args)", "def _update_PID(self):\n self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory)", "def host_pid(self, host_pid):\n self._host_pid = host_pid", "def set_process_title():\n if os.path.exists(options.pid):\n pid = int(open(options.pid, 'r').read().strip())\n if pid == os.getpid():\n setproctitle.setproctitle(\"blackhole: master\")\n else:\n setproctitle.setproctitle(\"blackhole: worker\")", "def host_pid(self, host_pid):\n\n self._host_pid = host_pid", "def change_pid_server(self, pid):\n self.lbl_pid['text'] = pid", "def process_id(self, process_id):\n\n self._process_id = process_id", "def writePID():\n \n try:\n file('/var/run/pybal.pid', 'w').write(str(os.getpid()) + '\\n')\n except Exception:\n raise", "def test_missingPIDVariable(self):\n fakeEnvironment = self.initializeEnvironment(3, os.getpid())\n del fakeEnvironment['LISTEN_PID']\n sddaemon = ListenFDs.fromEnvironment(environ=fakeEnvironment)\n self.assertEqual([], sddaemon.inheritedDescriptors())", "def cmd_process_kill(self, mysql_pid):\n raise NotImplementedError", "def cli_set_process_title():\n raise NotImplementedError()", "def set_id(self, bash_var: str) -> None:\n pass", "def resetPid(self, *args):\n return _yarp.IPidControl_resetPid(self, *args)", "def pending_pod_number(self, pending_pod_number):\n self._pending_pod_number = pending_pod_number" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the probability of a word following a context. i.e. The conditional probability P(word|context)
def prob(self, word, context=None): if not context: context = () else: context = tuple(context) prob = 0 for i in range(len(context) + 1): prob += self.weights[i] * self.ngram_cpd[context[i:]][word] return prob
[ "def prob(self, word, context):\n assert type(context) == tuple\n result = 0\n\n # print(f\"p({word} | {context}) = \", end=\"\")\n\n\n # for each n-gram model, calculate the probability of the word given the context weigthed by the lambda\n # and add it to the result\n # s = []\n for i in range(0, len(self.ngram_models)):\n p = self._ngram_prob(word, context, i)\n # s.append(f\"{self.lambdas[i]}*{p}\")\n result += self.lambdas[i] * p\n\n # print(f\"{' + '.join(s)} = {result}\")\n\n return result", "def prob(self, word, context=tuple(), pdist=None):\n\n if type(context) is not tuple: \n context = tuple(context)\n if pdist==None:\n pdist = self.probdist(context)\n\n if word not in self.vocab:\n return self.prob(self.OOV, context, pdist)\n\n return pdist[self.vocab.index(word)]", "def prob_words(context, vocab, temperature=1.0):\n dot = np.dot(vocab, context)\n return _softmax(dot / temperature)", "def prob(self, token, context):\n count = self.ngram_count(context + [token])\n count_all = self.ngram_count(context)\n\n return float(count) / count_all", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def get_word_probability(self, word, previous_words):\n assert len(previous_words) == self.n - 1, \"Error in probability calculation: invalid number of previous words: {}\".format(len(previous_words))\n \n if previous_words not in self.conditional_probabilities:\n proba = 0\n else:\n proba = self.conditional_probabilities[previous_words].get(word,0)\n \n return proba", "def prob_word_given_spam(self, word):\n pass", "def generate(self, context):\n\n probs = {}\n total_prob = 0\n for word in self._bigrams[context]:\n # calculate the probability of a word given a context\n probs[word] = exp(self.laplace(context,word))\n # keep track of the total probability\n total_prob = total_prob + probs[word]\n # generate a uniform variable to perform simulation\n u = uniform(0, total_prob)\n prob = 0\n # picture this like a timeline -> keep moving up the timeline to a new\n # word until our uniform variable is in a section owned by a word\n for word in probs:\n prob = prob + probs[word]\n if u < prob:\n return word\n\n # case for when there is no predefined words following this context\n return_word = \"\"\n max_value = -1\n for word in self._counted:\n if self._counted[word] > max_value:\n # we'll just return the word seen the most often (many ways to do this)\n return_word = word\n max_value = self._counted[word]\n return return_word", "def logprob(self, word, context):\n\n return -log(self.prob(word, context), 2)", "def probability(self, words):\n prob = 1\n for token in words:\n if token not in self.mdl.index:\n prob = 0\n break\n prob *= self.mdl[token]\n return prob", "def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product", "def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result", "def context_probabilities(self, context):\n if context not in self._cache.keys():\n self._cache[context] = {\n word: self.score(word, context) for word in self.vocab.counts.keys()\n }\n return self._cache[context]", "def next_word_proba(self, word, seq):\n context = tuple(seq[-2:]) # (w_2, w_1)\n k = self.k\n #### YOUR CODE HERE ####\n # Hint: self.counts.get(...) and self.context_totals.get(...) may be\n # useful here. See note in defaultdict.md about how this works.\n return (self.counts.get(context, defaultdict(lambda: 0.0)).get(word, 0) + k) / float(self.context_totals.get(context, 0) + k * self.V)\n #return a /b \n #### END(YOUR CODE) ####", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def __probWordGivenLabel(self, word, label):\n ## Get no. of occurrences of word in label\n if word not in self.db.labelToWordToFreq[label]:\n freqInLabel = 0\n else:\n freqInLabel = self.db.labelToWordToFreq[label][word]\n\n ## Get total count of words in label\n totalWordCountInLabel = sum(self.db.labelToWordToFreq[label].values())\n\n ## Find probability of word coming up in class 'label', using Laplace Smoothing\n return float(freqInLabel + self.k) / (totalWordCountInLabel + (self.k * len(self.db.wordToTotalFreq)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
YOLOV3 network hybrid forward.
def hybrid_forward(self, F, x, *args): all_box_centers = [] all_box_scales = [] all_objectness = [] all_class_pred = [] all_anchors = [] all_offsets = [] all_feat_maps = [] all_detections = [] routes = [] for stage, block, output in zip(self.stages, self.yolo_blocks, self.yolo_outputs): x = stage(x) routes.append(x) # the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs): x, tip = block(x) if autograd.is_training(): dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip) all_box_centers.append(box_centers.reshape((0, -3, -1))) all_box_scales.append(box_scales.reshape((0, -3, -1))) all_objectness.append(objness.reshape((0, -3, -1))) all_class_pred.append(class_pred.reshape((0, -3, -1))) all_anchors.append(anchors) all_offsets.append(offsets) # here we use fake featmap to reduce memory consuption, only shape[2, 3] is used fake_featmap = F.zeros_like(tip.slice_axis( axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1)) all_feat_maps.append(fake_featmap) else: dets = output(tip) all_detections.append(dets) if i >= len(routes) - 1: break # add transition layers x = self.transitions[i](x) # upsample feature map reverse to shallow layers upsample = _upsample(x, stride=2) route_now = routes[::-1][i + 1] x = F.concat(F.slice_like(upsample, route_now * 0, axes=(2, 3)), route_now, dim=1) if autograd.is_training(): # during training, the network behaves differently since we don't need detection results if autograd.is_recording(): # generate losses and return them directly box_preds = F.concat(*all_detections, dim=1) all_preds = [F.concat(*p, dim=1) for p in [ all_objectness, all_box_centers, all_box_scales, all_class_pred]] all_targets = self._target_generator(box_preds, *args) return self._loss(*(all_preds + all_targets)) # return raw predictions, this is only used in DataLoader transform function. return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps, F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1), F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1)) # concat all detection results from different stages result = F.concat(*all_detections, dim=1) # apply nms per class if self.nms_thresh > 0 and self.nms_thresh < 1: result = F.contrib.box_nms( result, overlap_thresh=self.nms_thresh, valid_thresh=0.01, topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False) if self.post_nms > 0: result = result.slice_axis(axis=1, begin=0, end=self.post_nms) ids = result.slice_axis(axis=-1, begin=0, end=1) scores = result.slice_axis(axis=-1, begin=1, end=2) bboxes = result.slice_axis(axis=-1, begin=2, end=None) return ids, scores, bboxes
[ "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def YOLOV3Base(self):\n if self.conf['yolov3_base_model_load']:\n base = load_model('yolov3_base.h5')\n base.trainable = True\n return base\n\n yolov3 = make_yolov3_model()\n\n # Load the weights.\n weight_reader = WeightReader('yolov3.weights')\n weight_reader.load_weights(yolov3)\n \n # Make a base model.\n input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')\n \n # 0 ~ 1.\n conv_layer = yolov3.get_layer('conv_' + str(0))\n x = ZeroPadding2D(1)(input1) #? \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(0))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n conv_layer = yolov3.get_layer('conv_' + str(1))\n x = ZeroPadding2D(1)(x) #? \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n \n # 2 ~ 4.\n for i in range(2, 4, 2): #?\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n\n # 5.\n conv_layer = yolov3.get_layer('conv_' + str(5))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(5))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n \n # 6 ~ 10.\n for i in range(6, 10, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n\n # 12.\n conv_layer = yolov3.get_layer('conv_' + str(12))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(12))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n\n # 13 ~ 35.\n for i in range(13, 35, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n\n # 37.\n conv_layer = yolov3.get_layer('conv_' + str(37))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(37))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n\n # 38 ~ 60.\n for i in range(38, 60, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n\n # 62.\n conv_layer = yolov3.get_layer('conv_' + str(62))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(62))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n\n # 63 ~ 73.\n for i in range(63, 73, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n \n output = x\n base = Model(inputs=[input1], outputs=[output])\n base.trainable = True\n base.save('yolov3_base.h5')\n \n return base", "def _forward(self, X):\n firstLayer = True\n for layer, fcn in self.model.named_children():\n if 'recurrent' in layer:\n if firstLayer:\n Y, hidden = fcn(X)\n else:\n Y, hidden = fcn(Y)\n elif 'dropout' in layer:\n Y = fcn(Y)\n elif 'linear' in layer:\n Y = fcn(Y.view((Y.shape[1], Y.shape[0]*Y.shape[-1])))\n else:\n Y = fcn(Y)\n\n firstLayer = False\n\n return Y", "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def propagate_forward(self, data):\r\n\r\n # Set input layer\r\n self.layers[0][0:-1] = data\r\n\r\n # Propagate from layer 0 to layer n-1 using sigmoid as activation function\r\n for i in range(1, len(self.shape)):\r\n # Propagate activity\r\n self.layers[i][...] = sigmoid(np.dot(self.layers[i-1], self.weights[i-1]))\r\n\r\n # Return output\r\n return self.layers[-1]", "def forward_train(self, *args, **kwargs):\n pass", "def forward(self, x):\n\n # CNN\n x = self.conv1(x)\n if self.apply_drop:\n x = self.drop1a(x)\n # CNN->ReLU\n x = self.conv1b(x)\n x = F.relu(x)\n if self.apply_drop:\n x = self.drop1b(x)\n\n # CNN\n x = self.conv2(x)\n if self.apply_drop:\n x = self.drop2a(x)\n # CNN->ReLU\n x = self.conv2b(x)\n x = F.relu(x)\n if self.apply_drop:\n x = self.drop2b(x)\n \n # Flatten\n x = x.view(-1, 16*4*4)\n \n # FC->ReLU\n x = F.relu(self.fc1(x))\n if self.apply_drop:\n x = self.fc_drop1(x)\n \n # FC->ReLU\n x = F.relu(self.fc2(x))\n if self.apply_drop:\n x = self.fc_drop2(x)\n \n # FC\n x = self.fc3(x)\n \n return x", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def setup_forward(self, W, input_data, prefix=\"\"):\n \n def loop_body(i, activations, outputcollect):\n \n if self.config['sequence_input']:\n # Cut out the correct input\n if self.config['net_input_add_onehot']:\n inp = tf.slice(input_data, (0,i), (self.config['batch_size'], 1), name=prefix+\"/inputSlice\") # <batch_size, 1>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size>\n inp = tf.one_hot(indices=inp, depth=self.config['num_input']) # <batch_size, num_input>\n else:\n inp = tf.slice(input_data, (0,i,0), (self.config['batch_size'], 1, self.config['num_input']), name=prefix+\"/inputSlice\") # <batch_size, 1, num_input>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size, num_input>\n else:\n inp = input_data\n inp = self.setup_print(inp, \"input data\")\n \n # Concatenate input, bias, activations\n inp = tf.concat([inp, self.bias, activations], axis=1, name=prefix+\"/stepconcat\") # <batch_size, from>\n inp = tf.expand_dims(inp, 1) # <batch_size, 1, from>\n \n # Fully connected\n # <batch_size, 1, to> <= <batch_size, 1, from> @ <batch_size, from, to>\n activations = tf.matmul(inp, W, name=prefix+\"/stepmatmul\")\n activations = tf.squeeze(activations, 1) # <batch_size, to>\n \n # Leaky ReLU\n # This allows values to blow up\n ## activations = tf.maximum(activations, activations * .3, name=prefix+\"/lrelu\")\n \n # Sigmoid\n activations = tf.sigmoid(activations) # <batch_size, to>\n \n # Store the output if we need outputs from all timesteps\n # Alternative may be: https://stackoverflow.com/questions/39157723/how-to-do-slice-assignment-in-tensorflow/43139565#43139565\n if self.config['sequence_output']:\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n output = tf.expand_dims(output, axis=1) # <batch_size, 1, output>\n outputcollect = tf.concat([outputcollect, output], axis=1)\n \n return tf.add(i,1), activations, outputcollect\n \n loop_out = tf.while_loop(\n cond=(lambda\n i, \n activations,\n outputcollect:\n tf.less(i, self.config['timesteps'])\n ),\n body=loop_body,\n loop_vars=[\n self.initial_i,\n self.initial_activations,\n self.initial_output\n ],\n shape_invariants=[\n self.initial_i.get_shape(),\n self.initial_activations.get_shape(),\n tf.TensorShape([self.config['batch_size'], None, self.config['num_output']])\n ],\n back_prop=False,\n # return_same_structure=True,\n name=prefix+\"/loop\"\n )\n \n # Get the output\n if self.config['sequence_output']:\n output = loop_out[2]\n # Set shape otherwise broadcasting messes this up\n output.set_shape((self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n activations = loop_out[1] # <batch_size, to>\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n\n if self.config['net_add_softmax']:\n # tf.nn.softmax\n output = tf.exp(output) / tf.expand_dims(tf.reduce_sum(tf.exp(output), axis=-1), axis=-1)\n \n return output", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(self, h_prev, c_prev, x_t):\n m, i = x_t.shape\n cat = np.concatenate((h_prev, x_t), axis=1)\n f = self.sigmoid(cat @ self.Wf + self.bf)\n i = self.sigmoid(cat @ self.Wu + self.bu)\n c_hat = np.tanh(cat @ self.Wc + self.bc)\n c_next = f * c_prev + i * c_hat\n o = self.sigmoid(cat @ self.Wo + self.bo)\n h_next = o * np.tanh(c_next)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, c_next, y", "def forward(self, x):\n # Flatten\n x = x.view(-1, 32*32*3)\n\n # FC->ReLU\n x = F.relu(self.fc1(x))\n if self.apply_drop:\n x = self.fc_drop1(x)\n \n # FC->ReLU\n x = F.relu(self.fc2(x))\n if self.apply_drop:\n x = self.fc_drop2(x)\n \n # FC->ReLU\n x = F.relu(self.fc3(x))\n if self.apply_drop:\n x = self.fc_drop3(x)\n \n # FC\n x = self.fc4(x)\n\n return x", "def forward(self, X):\n for l in self.layers:\n layer_activation = l.forward(X)", "def forward(self, inputs):\n comp_input, prot_input = inputs\n comp_out = self.gnet(comp_input)\n prot_input = self.prot2vec(prot_input)\n prot_out = self.pcnn(prot_input, comp_out)\n out = torch.cat([comp_out, prot_out], dim=1)\n return out", "def ggml_build_forward(tensor: ffi.CData) -> ffi.CData:\n ...", "def forward(self, X, training=False):\n pass", "def _forward(self, input_: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput:\n raise NotImplementedError(\"_forward not implemented\")", "def forward(self, x):\n # Flatten\n x = x.view(-1, 28*28)\n\n # FC->ReLU \n x = F.relu(self.fc1(x))\n if self.apply_drop:\n x = self.drop1(x)\n\n # FC->ReLU \n x = F.relu(self.fc2(x))\n if self.apply_drop:\n x = self.drop2(x)\n\n # FC->ReLU \n x = F.relu(self.fc3(x))\n if self.apply_drop:\n x = self.drop3(x)\n\n # FC\n x = self.fc4(x)\n \n return x", "def forward(self):\n self.toNode.unsigval += self.fromNode.value * self.weight\n self.toNode.value = sigmoid(self.toNode.unsigval)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set nonmaximum suppression parameters.
def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100): self._clear_cached_op() self.nms_thresh = nms_thresh self.nms_topk = nms_topk self.post_nms = post_nms
[ "def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})", "def _non_max_suppression(scores, boxes, classes, max_boxes=10, iou_threshold=0.5):\n max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in `tf.image.non_max_suppression`\n K.get_session().run(tf.variables_initializer([max_boxes_tensor]))\n # To get the list of indices corresponding to boxes you keep\n nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold=iou_threshold)\n # To select only nms_indices from scores, boxes and classes\n scores = K.gather(scores, nms_indices)\n boxes = K.gather(boxes, nms_indices)\n classes = K.gather(classes, nms_indices)\n \n return scores, boxes, classes", "def set_nonbonding_parameters(self, selection):\n self._check_selection(selection)\n\n self._nonbonding = selection.lower()", "def non_max_suppression(node: NodeWrapper,\n params: Dict[str, np.ndarray],\n xmap: Dict[str, XLayer]):\n\n logger.info(\"ONNX NonMaxSupression -> XLayer AnyOp\")\n\n assert len(node.get_outputs()) == 1\n name = node.get_outputs()[0]\n bottoms = node.get_inputs()\n node_attrs = node.get_attributes()\n\n boxes_X = xmap[bottoms[0]]\n num_batches, spatial_d, _ = boxes_X.shapes.tolist()\n\n X = px.ops.any_op(\n op_name=px.stringify(name),\n in_xlayers=[boxes_X],\n any_shape=[num_batches, -1, 4],\n onnx_id=name\n )\n\n return [X]", "def set_cycle_suppression(self):\n self._cyclesuppression = True\n self.suppression_used = False", "def flush_suppression_list():\n consolidated_data.flush_suppress_list()", "def suppression(self):\n return self._suppression", "def setPeakExtras(self):\n \n pass", "def non_heap_max(self, non_heap_max):\n\n self._non_heap_max = non_heap_max", "def _set_suppress(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"suppress\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/tunnel', defining_module='openconfig-if-tunnel', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"suppress must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"suppress\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/tunnel', defining_module='openconfig-if-tunnel', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__suppress = t\n if hasattr(self, '_set'):\n self._set()", "def _check_non_max_suppression(self, probs):\n batch_size = self.output_map_shp[-1]\n for i in xrange(batch_size):\n dop = np.copy(self.data_predictor_positions)\n preds = np.reshape(dop, (probs[i].shape[0], 1))\n pred_pos = np.vstack((dop, preds))\n pred_pos = pred_pos[pred_pos[: , 1].argsort()]", "def add_bounds_on_uncertain_parameters(**kwargs):\n config = kwargs.pop('config')\n model = kwargs.pop('model')\n _set = config.uncertainty_set\n parameter_bounds = _set.parameter_bounds\n for i, p in enumerate(model.util.uncertain_param_vars.values()):\n p.setlb(parameter_bounds[i][0])\n p.setub(parameter_bounds[i][1])", "def _set_suppress(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"suppress\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/tunnel', defining_module='openconfig-if-tunnel', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"suppress must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"false\"), is_leaf=True, yang_name=\"suppress\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/tunnel', defining_module='openconfig-if-tunnel', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__suppress = t\n if hasattr(self, '_set'):\n self._set()", "def plussa_limited(self, plussa_limited):\n\n self._plussa_limited = plussa_limited", "def non_maximum_suppression(dt, seeds):\n seeds = np.array(np.where(seeds)).transpose()\n seeds = nonMaximumDistanceSuppression(dt, seeds)\n vol = np.zeros(dt.shape, dtype=\"bool\")\n coords = tuple(seeds[:, i] for i in range(seeds.shape[1]))\n vol[coords] = 1\n return vol", "def box_non_maximum_suppression(data=None, overlap_thresh=_Null, topk=_Null, coord_start=_Null, score_index=_Null, id_index=_Null, force_suppress=_Null, in_format=_Null, out_format=_Null, out=None, name=None, **kwargs):\n return (0,)", "def setDropThreshold(self, dropThreshold): # real signature unknown; restored from __doc__\n pass", "def set_ignore(self, count):\n\n self.ignore += count", "def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset class categories and class predictors.
def reset_class(self, classes): self._clear_cached_op() self._classes = classes if self._pos_iou_thresh >= 1: self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh) for outputs in self.yolo_outputs: outputs.reset_class(classes)
[ "def _reset(self):\n self.classifier.reset()", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self.experts = [\n self._construct_new_expert()\n ]", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self._random_state = check_random_state(self.random_state)\n if self.base_estimators:\n self.experts = [\n self.WeightedExpert(\n cp.deepcopy(be), 1, self.labels)\n for be in self.base_estimators\n ]\n else:\n self.experts = [\n self._construct_new_expert()\n ]", "def _untrain(self):\n if self.__clf:\n self.__clf._untrain()", "def reset_features_list(self):\n self.categorical_features = Data.categorical_features.copy()\n self.continuous_features = Data.continuous_features.copy()", "def _untrain(self):\n if not self.trained:\n return\n for clf in self.clfs:\n clf.untrain()\n super(BoostedClassifier, self)._untrain()", "def reset(self):\n\t\ttf.reset_default_graph()\n\t\tdel self.train_x_state, self.train_y_state\n\t\tdel self.test_x_state, self.test_y_state", "def _untrain(self):\n self.means = None\n self.variances = None\n self.ulabels = None\n self.priors = None\n super(GNB, self)._untrain()", "def reset_labeled_data(self):\n self.current_labeled_set = self.initial_labeled_set\n self.current_labels = self.initial_labels\n if self.eval_cutoff is not None:\n self.cov_matrix = self.init_cov_matrix.copy()", "def reset(self):\n self.reset_upper_confidence_bounds() ## for UCB\n self.reset_sample_rewards() ## for TS\n self.reset_regrets()\n self.reset_actions()\n self.reset_A_inv()\n self.reset_grad_approx()\n self.iteration = 0", "def reset(self):\n\n # Reset everything\n self._layers = OrderedDict()\n self._connections = defaultdict(list)\n self._learning_rules = dict()", "def reset(self) -> None:\n self.precision.reset()\n self.recall.reset()", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def reset_metrics(self):\n super().reset_metrics()\n del self.observations[:]\n del self.labels[:]", "def reset_train(self):\n\n self.model.apply(self._reset_weights)\n self.epoch_loss.reset()\n self.epoch = 0\n del self.batch_process\n self.batch_process = None", "def reset_models(self):\n self._model = self._agent_model()\n if(self._use_target_net):\n self._target_net = self._agent_model()\n self.update_target_net()", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def reset(self):\n\n self.rotation = 0\n self.iteration = 0\n self.predictions = []\n self.prediction = 0\n self.current_position = 0\n self.rotation_list = [0]\n self.prediction = 0\n self.initial_adjust = False", "def reset(cls):\n cls.frameworks = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
YOLO3 multiscale with darknet53 base network on VOC dataset.
def yolo3_darknet53_voc(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs): from ...data import VOCDetection pretrained_base = False if pretrained else pretrained_base base_net = darknet53( pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices, **kwargs) stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]] anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] strides = [8, 16, 32] classes = VOCDetection.CLASSES return get_yolov3( 'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'voc', pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)
[ "def YOLOV3Base(self):\n if self.conf['yolov3_base_model_load']:\n base = load_model('yolov3_base.h5')\n base.trainable = True\n return base\n\n yolov3 = make_yolov3_model()\n\n # Load the weights.\n weight_reader = WeightReader('yolov3.weights')\n weight_reader.load_weights(yolov3)\n \n # Make a base model.\n input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')\n \n # 0 ~ 1.\n conv_layer = yolov3.get_layer('conv_' + str(0))\n x = ZeroPadding2D(1)(input1) #? \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(0))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n conv_layer = yolov3.get_layer('conv_' + str(1))\n x = ZeroPadding2D(1)(x) #? \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n \n # 2 ~ 4.\n for i in range(2, 4, 2): #?\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n\n # 5.\n conv_layer = yolov3.get_layer('conv_' + str(5))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(5))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n \n # 6 ~ 10.\n for i in range(6, 10, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n\n # 12.\n conv_layer = yolov3.get_layer('conv_' + str(12))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(12))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n\n # 13 ~ 35.\n for i in range(13, 35, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n\n # 37.\n conv_layer = yolov3.get_layer('conv_' + str(37))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(37))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n\n # 38 ~ 60.\n for i in range(38, 60, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n\n # 62.\n conv_layer = yolov3.get_layer('conv_' + str(62))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(62))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n skip = x\n\n # 63 ~ 73.\n for i in range(63, 73, 3):\n conv_layer = yolov3.get_layer('conv_' + str(i))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n\n conv_layer = yolov3.get_layer('conv_' + str(i + 1))\n \n if conv_layer.kernel_size[0] > 1:\n x = ZeroPadding2D(1)(x) #? \n \n x = conv_layer(x)\n norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))\n x = norm_layer(x)\n x = LeakyReLU(alpha=0.1)(x)\n \n x = add([skip, x]) #?\n skip = x #?\n \n output = x\n base = Model(inputs=[input1], outputs=[output])\n base.trainable = True\n base.save('yolov3_base.h5')\n \n return base", "def get_pytorch_yolo(get_default_cifar10_subset):\n import cv2\n import torch\n\n from pytorchyolo import models\n from pytorchyolo.utils.loss import compute_loss\n\n from art.estimators.object_detection.pytorch_yolo import PyTorchYolo\n\n model_path = \"/tmp/PyTorch-YOLOv3/config/yolov3.cfg\"\n weights_path = \"/tmp/PyTorch-YOLOv3/weights/yolov3.weights\"\n model = models.load_model(model_path=model_path, weights_path=weights_path)\n\n class YoloV3(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, x, targets=None):\n if self.training:\n outputs = self.model(x)\n # loss is averaged over a batch. Thus, for patch generation use batch_size = 1\n loss, loss_components = compute_loss(outputs, targets, self.model)\n\n loss_components_dict = {\"loss_total\": loss}\n\n return loss_components_dict\n else:\n return self.model(x)\n\n model = YoloV3(model)\n\n object_detector = PyTorchYolo(\n model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=(\"loss_total\",)\n )\n\n n_test = 10\n (_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset\n x_test_cifar10 = x_test_cifar10[0:n_test]\n\n x_test = cv2.resize(\n x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC\n ).transpose((2, 0, 1))\n x_test = np.expand_dims(x_test, axis=0)\n x_test = np.repeat(x_test, repeats=2, axis=0)\n\n # Create labels\n\n result = object_detector.predict(x=x_test)\n\n y_test = [\n {\n \"boxes\": result[0][\"boxes\"],\n \"labels\": result[0][\"labels\"],\n \"scores\": np.ones_like(result[0][\"labels\"]),\n },\n {\n \"boxes\": result[1][\"boxes\"],\n \"labels\": result[1][\"labels\"],\n \"scores\": np.ones_like(result[1][\"labels\"]),\n },\n ]\n\n yield object_detector, x_test, y_test", "def yolo(image, classes=\"src/yolo/classes.txt\", config=\"src/yolo/yolo.cfg\", weights=\"src/yolo/yolov3.weights\"):\n\n with open(classes, 'r') as in_file:\n classes = [line.strip() for line in in_file.readlines()]\n\n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392\n\n net = cv2.dnn.readNet(weights, config)\n\n blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(get_output_layers(net))\n\n class_ids = []\n confidences = []\n boxes = []\n conf_threshold = 0.5\n nms_threshold = 0.4\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.5:\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)\n\n img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #convert it to RGB channel\n return_val = []\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = box[0]\n y = box[1]\n w = box[2]\n h = box[3]\n\n color = draw_prediction(image, classes, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))\n average = get_bbox_average(img_rgb, round(x), round(y), round(x+w), round(y+h))\n\n return_val.append({\n \"x\": x,\n \"y\": y,\n \"w\": w,\n \"h\": h,\n # \"class\": classes[class_ids[i]],\n \"class\": \"person\",\n \"confidence\": confidences[i],\n \"color\": color,\n \"centroid\": np.array([x + w/2, y + h/2]),\n \"average\": average,\n \"index\": i\n })\n\n return return_val", "def create_yolov3_modules(config_model, ignore_threshold):\n # layer order is same as yolov3.cfg\n # https://github.com/pjreddie/darknet/blob/master/cfg/yolov3.cfg\n module_list = nn.ModuleList()\n\n #\n # Darknet 53\n #\n\n module_list.append(add_conv(in_ch=3, out_ch=32, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=32, out_ch=64, ksize=3, stride=2))\n # 1\n module_list.append(resblock(ch=64, n_blocks=1))\n module_list.append(add_conv(in_ch=64, out_ch=128, ksize=3, stride=2))\n # 2\n module_list.append(resblock(ch=128, n_blocks=2))\n module_list.append(add_conv(in_ch=128, out_ch=256, ksize=3, stride=2))\n # 3\n module_list.append(resblock(ch=256, n_blocks=8))\n module_list.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=2))\n # 4\n module_list.append(resblock(ch=512, n_blocks=8))\n module_list.append(add_conv(in_ch=512, out_ch=1024, ksize=3, stride=2))\n # 5\n module_list.append(resblock(ch=1024, n_blocks=4))\n\n #\n # additional layers for YOLOv3\n #\n\n # A\n module_list.append(add_conv(in_ch=1024, out_ch=512, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=512, out_ch=1024, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=1024, out_ch=512, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=512, out_ch=1024, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=1024, out_ch=512, ksize=1, stride=1))\n # B\n module_list.append(add_conv(in_ch=512, out_ch=1024, ksize=3, stride=1))\n module_list.append(\n YOLOLayer(\n config_model, layer_no=0, in_ch=1024, ignore_threshold=ignore_threshold\n )\n )\n # C\n module_list.append(add_conv(in_ch=512, out_ch=256, ksize=1, stride=1))\n module_list.append(nn.Upsample(scale_factor=2, mode=\"nearest\"))\n\n # A\n module_list.append(add_conv(in_ch=768, out_ch=256, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=512, out_ch=256, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=512, out_ch=256, ksize=1, stride=1))\n # B\n module_list.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=1))\n module_list.append(\n YOLOLayer(\n config_model, layer_no=1, in_ch=512, ignore_threshold=ignore_threshold\n )\n )\n # C\n module_list.append(add_conv(in_ch=256, out_ch=128, ksize=1, stride=1))\n module_list.append(nn.Upsample(scale_factor=2, mode=\"nearest\"))\n\n # A\n module_list.append(add_conv(in_ch=384, out_ch=128, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=128, out_ch=256, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=256, out_ch=128, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=128, out_ch=256, ksize=3, stride=1))\n module_list.append(add_conv(in_ch=256, out_ch=128, ksize=1, stride=1))\n module_list.append(add_conv(in_ch=128, out_ch=256, ksize=3, stride=1))\n module_list.append(\n YOLOLayer(\n config_model, layer_no=2, in_ch=256, ignore_threshold=ignore_threshold\n )\n )\n\n return module_list", "def create_yolov3_modules(config_model, ignore_thre):\n # Fetch the number of bands (or channels) in the image\n in_ch = config_model[\"N_BANDS\"]\n\n # DarkNet53\n mlist = nn.ModuleList()\n mlist.append(add_conv(in_ch=in_ch, out_ch=32, ksize=3, stride=1)) #0\n mlist.append(add_conv(in_ch=32, out_ch=64, ksize=3, stride=2)) #1 \n mlist.append(resblock(ch=64)) #2\n mlist.append(add_conv(in_ch=64, out_ch=128, ksize=3, stride=2)) #3\n mlist.append(resblock(ch=128, nblocks=2)) #4\n mlist.append(add_conv(in_ch=128, out_ch=256, ksize=3, stride=2)) #5 \n mlist.append(resblock(ch=256, nblocks=8)) # shortcut 1 from here #6 - shortcut 1\n mlist.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=2)) #7 \n mlist.append(resblock(ch=512, nblocks=8)) # shortcut 2 from here #8 - shortcut 2\n mlist.append(add_conv(in_ch=512, out_ch=1024, ksize=3, stride=2)) #9\n mlist.append(resblock(ch=1024, nblocks=4)) #10\n\n # YOLOv3\n mlist.append(resblock(ch=1024, nblocks=2, shortcut=False)) #11\n mlist.append(add_conv(in_ch=1024, out_ch=512, ksize=1, stride=1)) #12\n # 1st yolo branch\n mlist.append(add_conv(in_ch=512, out_ch=1024, ksize=3, stride=1)) #13\n mlist.append(\n YOLOLayer(config_model, layer_no=0, in_ch=1024, ignore_thre=ignore_thre)) #14 - yolo\n\n mlist.append(add_conv(in_ch=512, out_ch=256, ksize=1, stride=1)) #15\n mlist.append(nn.Upsample(scale_factor=2, mode='nearest')) #16 - shortcut 2 concats\n mlist.append(add_conv(in_ch=768, out_ch=256, ksize=1, stride=1)) #17\n mlist.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=1)) #18\n mlist.append(resblock(ch=512, nblocks=1, shortcut=False)) #19\n mlist.append(add_conv(in_ch=512, out_ch=256, ksize=1, stride=1)) #20\n # 2nd yolo branch\n mlist.append(add_conv(in_ch=256, out_ch=512, ksize=3, stride=1)) #21\n mlist.append(\n YOLOLayer(config_model, layer_no=1, in_ch=512, ignore_thre=ignore_thre)) #22 - yolo\n\n mlist.append(add_conv(in_ch=256, out_ch=128, ksize=1, stride=1)) #23\n mlist.append(nn.Upsample(scale_factor=2, mode='nearest')) #24 - shortcut 1 concats\n mlist.append(add_conv(in_ch=384, out_ch=128, ksize=1, stride=1)) #25\n mlist.append(add_conv(in_ch=128, out_ch=256, ksize=3, stride=1)) #26\n mlist.append(resblock(ch=256, nblocks=2, shortcut=False)) #27\n mlist.append(\n YOLOLayer(config_model, layer_no=2, in_ch=256, ignore_thre=ignore_thre)) #28 - yolo\n\n return mlist", "def load_yolo():\n net = cv2.dnn.readNet(\"yolov3.weights\", \"yolov3.cfg\")\n with open(\"coco.names\", \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n layers_names = net.getLayerNames()\n output_layers = [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n colors = np.random.uniform(0, 255, size=(len(classes), 3))\n return net, classes, colors, output_layers", "def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]", "def yolo3_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):\r\n mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)\r\n print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)\r\n # expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)\r\n\r\n # activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)\r\n # expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)\r\n\r\n # activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)\r\n # expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)\r\n\r\n # NOTE: activation layer name may different for TF1.x/2.x, so we\r\n # use index to fetch layer\r\n # f1: 13 x 13 x (960*alpha)\r\n f1 = mobilenetv3large.layers[194].output\r\n # f2: 26 x 26 x (672*alpha)\r\n f2 = mobilenetv3large.layers[146].output\r\n # f3: 52 x 52 x (240*alpha)\r\n f3 = mobilenetv3large.layers[79].output\r\n\r\n f1_channel_num = int(960*alpha)\r\n f2_channel_num = int(672*alpha)\r\n f3_channel_num = int(240*alpha)\r\n #f1_channel_num = 1024\r\n #f2_channel_num = 512\r\n #f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])", "def darknet53():\r\n\r\n darknet = DarkNet(\r\n block=ResidualBlock,\r\n layer_nums=[1, 2, 8, 8, 4],\r\n in_channels=[32, 64, 128, 256, 512],\r\n out_channels=[64, 128, 256, 512, 1024],\r\n )\r\n\r\n return darknet", "def get_netavlad_model(opt, train_set, whole_test_set):\n pretrained = not opt.fromscratch\n arch = opt.arch.lower()\n mode = opt.mode.lower()\n dataPath = opt.dataPath\n pooling = opt.pooling.lower()\n resume = opt.resume\n num_clusters = opt.num_clusters\n\n hook_dim = 0\n\n if arch == 'alexnet':\n encoder_dim = 256\n hook_layer = 6 # TODO: fake value, to be determine\n layers = baseAlexNet(pre_trained=pretrained, num_train=opt.numTrain)\n elif arch == 'vgg16':\n encoder_dim = 512\n # vgg16-conv3(pooling之前的ReLU层,0-base)\n hook_layer = 15\n hook_dim = 256\n layers = baseVGG16(pre_trained=pretrained, numTrain=opt.numTrain)\n elif arch == 'resnet18':\n encoder_dim = 512\n # the output of the second block\n hook_layer = 4\n layers = baseResNet(pre_trained=pretrained,type=18, num_train=opt.numTrain)\n elif arch == 'resnet34':\n encoder_dim = 512\n hook_layer = 2\n layers = baseResNet(pre_trained=pretrained,type=34, num_train=opt.numTrain)\n elif arch == 'resnet50':\n encoder_dim = 2048\n hook_layer = 2\n layers = baseResNet(pre_trained=pretrained,type=50, num_train=opt.numTrain)\n elif arch == 'mobilenet2':\n encoder_dim = 320\n hook_layer = 2\n layers = baseMobileNet(pre_trained=pretrained, num_train=opt.numTrain)\n elif arch == 'shufflenet2':\n encoder_dim = 464\n hook_layer = 2\n layers = baseShuffleNet(pre_trained=pretrained, num_train=opt.numTrain)\n else:\n raise Exception('Unknown architecture')\n if mode == 'cluster':\n layers.append(L2Norm())\n\n if opt.saveDecs:\n layers[hook_layer].register_forward_hook(get_hook)\n\n encoder = nn.Sequential(*layers) # 参数数目不定时,使用*号作为可变参数列表,就可以在方法内对参数进行调用。\n model = nn.Module()\n model.add_module('encoder', encoder)\n\n\n # 初始化model中的pooling模块\n if mode != 'cluster':\n if pooling == 'netvlad':\n net_vlad = netvlad.NetVLAD(num_clusters=num_clusters, dim=encoder_dim)\n if not resume:\n if mode == 'train':\n initcache = join(dataPath, 'centroids', arch + '_' + train_set.dataset + '_' + str(\n num_clusters) + '_desc_cen.hdf5')\n else:\n initcache = join(dataPath, 'centroids', arch + '_' + whole_test_set.dataset + '_' + str(\n num_clusters) + '_desc_cen.hdf5')\n\n if not exists(initcache):\n raise FileNotFoundError('Could not find clusters, please run with --mode=cluster before proceeding')\n\n with h5py.File(initcache, mode='r') as h5:\n clsts = h5.get(\"centroids\")[...]\n traindescs = h5.get(\"descriptors\")[...]\n net_vlad.init_params(clsts, traindescs)\n del clsts, traindescs\n\n model.add_module('pool', net_vlad)\n\n elif pooling == 'max':\n global_pool = nn.AdaptiveMaxPool2d((1, 1))\n model.add_module('pool', nn.Sequential(*[global_pool, Flatten(), L2Norm()]))\n\n elif pooling == 'avg':\n global_pool = nn.AdaptiveAvgPool2d((1, 1))\n model.add_module('pool', nn.Sequential(*[global_pool, Flatten(), L2Norm()]))\n\n else:\n raise ValueError('Unknown pooling type: ' + pooling)\n\n return model, encoder_dim, hook_dim", "def run_yolo(net, image, coco_classes, save_image=False):\n\n global frame, classes\n # Give the configuration and weight files for the model and load the network using them.\n classes = coco_classes\n\n frame = cv2.imread(str(image))\n\n # Crop the frame\n # (y_min, y_max) (x_min, x_max)\n # frame = frame[300:1080, 200:1920] # Classifying people\n # frame = frame[0:500, 0:1920] # Classifying Cars\n\n # Stop the program if reached end of video\n if frame is None:\n return\n\n # Create a 4D blob from a frame.\n blob = cv2.dnn.blobFromImage(\n frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False\n )\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n\n # Remove the bounding boxes with low confidence\n postprocess(frame, outs, save_image)\n\n # Get the overall time for inference(t) and the timings for each of the layers(in layersTimes)\n t, _ = net.getPerfProfile()\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv2.getTickFrequency())\n # cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n print(label)\n\n # Save image with all bounding boxes\n # utils.write_image(frame)", "def yolo_mobilev2(input_shape: list, anchor_num: int, class_num: int, **kwargs) -> [keras.Model, keras.Model]:\n input_tensor = keras.Input(input_shape)\n base_model = MobileNetV2(\n include_top=False,\n weights=None,\n input_tensor=input_tensor,\n alpha=kwargs['alpha'],\n input_shape=input_shape,\n pooling=None) # type: keras.Model\n\n if kwargs['alpha'] == .5:\n base_model.load_weights('data/mobilenet_v2_base_5.h5')\n elif kwargs['alpha'] == .75:\n base_model.load_weights('data/mobilenet_v2_base_7.h5')\n elif kwargs['alpha'] == 1.:\n base_model.load_weights('data/mobilenet_v2_base_10.h5')\n\n x1 = base_model.get_layer('block_13_expand_relu').output\n x2 = base_model.output\n\n y1 = compose(\n DarknetConv2D_BN_Leaky(128 if kwargs['alpha'] > 0.7 else 192, (3, 3)),\n DarknetConv2D(anchor_num * (class_num + 5), (1, 1)))(x2)\n\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1, 1)),\n UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(128 if kwargs['alpha'] > 0.7 else 192, (3, 3)),\n DarknetConv2D(anchor_num * (class_num + 5), (1, 1)))([x2, x1])\n\n y1_reshape = Reshape((7, 10, anchor_num, 5 + class_num), name='l1')(y1)\n y2_reshape = Reshape((14, 20, anchor_num, 5 + class_num), name='l2')(y2)\n\n yolo_model = keras.Model(inputs=input_tensor, outputs=[y1, y2])\n yolo_model_warpper = keras.Model(inputs=input_tensor, outputs=[y1_reshape, y2_reshape])\n\n return yolo_model, yolo_model_warpper", "def main():\r\n args = parse_arguments()\r\n model = YOLOv3(in_channels=3, num_classes=config.NUM_CLASSES).to(args.device)\r\n optimizer = optim.Adam(\r\n model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay\r\n )\r\n loss_fn = YoloLoss()\r\n scaler = torch.cuda.amp.GradScaler()\r\n\r\n train_loader, test_loader = get_loaders(args.train_dir, args.test_dir, args.batch_size, args.num_workers, args.pin_memory\r\n )\r\n if args.load_model:\r\n load_checkpoint(\r\n args.weights, model, optimizer, args.learning_rate\r\n )\r\n\r\n for epoch in range(args.epochs):\r\n # If evaluate model\r\n if epoch % args.eval_interval == 0 and epoch > 0:\r\n plot_couple_examples(model, test_loader, 0.5, args.nms_threshold)\r\n\r\n precision, recall, F1, rmse_errors, rel_errors = evaluate_model(\r\n test_loader, model, conf_threshold=args.conf_threshold, nms_threshold=args.nms_threshold,\r\n pixel_threshold=args.pixel_threshold\r\n )\r\n print(\r\n f\"Precision is: {precision}, \\n Recall is {recall}, \\n F1: {F1}, \\n\"\r\n f\"X rmse error: {rmse_errors[0]}, y rmse error: {rmse_errors[1]}, z rmse error: {rmse_errors[2]}, r rmse error: {rmse_errors[3]}, n rmse error: {rmse_errors[4]} \\n\"\r\n f\"X relative error: {rel_errors[0]}, y relative error: {rel_errors[1]}, z relative error: {rel_errors[2]}, r relative error: {rel_errors[3]}, n relative error: {rel_errors[4]}\"\r\n )\r\n\r\n train_fn(\r\n train_loader,\r\n model,\r\n optimizer,\r\n loss_fn,\r\n scaler,\r\n )\r\n\r\n if args.save_model:\r\n save_checkpoint(model, optimizer, filename=args.checkpoint_file)", "def get_mobilenet_v3(model_name:str, pretrained=True, **kwargs) -> nn.Module:\n\n mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0, norm_layer=nn.BatchNorm2d,\n se_act2=partial(nn.Hardsigmoid, inplace=True), se_reduction_ratio=4, se_reduce_mode='adjust')\n\n if model_name == 'mobilenet_v3_large':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),\n mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),\n mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1280\n elif model_name == 'mobilenet_v3_small':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se=True),\n mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1024\n\n model = MobileNetV3(residual_config, last_channel=last_channel, block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)\n\n mobilenet_v2_init(model)\n\n if pretrained:\n load_from_zoo(model, model_name)\n\n return model", "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def VLocNet_full(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_3_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1')(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2')(odo_fc_1)\n odo_fc_3 = Dense(4, name='odo_fc_3')(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_geo')\n\n pose_5 = ResNet_50_unit_5(input_tensor=pose_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1')(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2')(pose_fc_1)\n pose_fc_3 = Dense(4, name='pose_fc_3')(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1], output=[odo_merge, pose_merge], name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def construct_model(attribute_vector_file,\n img_height=220,\n img_width=176,\n TV_weight=50,\n alpha=4,\n vgg19_weights_file=\"models/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5\",\n vgg_output_layers=[\"block3_conv1\", \"block4_conv1\", \"block5_conv1\"],\n vgg_output_layer_weights=[1.0, 1.0, 1.0]):\n \n # LOAD PREVIOUSLY GENERATED ATTRIBUTE VECTOR\n w = np.load(attribute_vector_file)\n \n \n \n \n # CREATE CUSTOM LAYERS FOR USE IN MODEL:\n \n # Create Normalization layer\n class Normalize(Layer):\n def __init__(self, **kwargs):\n super(Normalize, self).__init__(**kwargs)\n\n def call(self, x, mask=None):\n return x / 255.\n \n # Create Denormalization layer\n class Denormalize(Layer):\n def __init__(self, **kwargs):\n super(Denormalize, self).__init__(**kwargs)\n\n def call(self, x, mask=None):\n return x * 255\n \n\n # Allows network to learn identify function, (NOT SURE IF THIS IS NECESSARY)\n def residual_block(ip, id):\n init = ip\n\n x = ReflectionPadding2D()(ip)\n x = Conv2D(128, (3, 3), activation='linear', padding='valid',\n name='res_conv_' + str(id) + '_1')(x)\n x = BatchNormalization(axis=1, name=\"res_batchnorm_\" + str(id) + \"_1\")(x)\n x = Activation('relu', name=\"res_activation_\" + str(id) + \"_1\")(x)\n\n x = ReflectionPadding2D()(x)\n x = Conv2D(128, (3, 3), activation='linear', padding='valid',\n name='res_conv_' + str(id) + '_2')(x)\n x = BatchNormalization(axis=1, name=\"res_batchnorm_\" + str(id) + \"_2\")(x)\n\n m = Add()([x, init])\n #m = Activation('relu', name=\"res_activation_\" + str(id))(m)\n\n return m\n \n \n \n # create ReflectionPadding layer\n # https://github.com/misgod/fast-neural-style-keras/blob/master/layers.py\n class ReflectionPadding2D(Layer):\n def __init__(self, padding=(1, 1), dim_ordering='default', **kwargs):\n super(ReflectionPadding2D, self).__init__(**kwargs)\n\n if dim_ordering == 'default':\n dim_ordering = K.image_dim_ordering()\n\n self.padding = padding\n if isinstance(padding, dict):\n if set(padding.keys()) <= {'top_pad', 'bottom_pad', 'left_pad', 'right_pad'}:\n self.top_pad = padding.get('top_pad', 0)\n self.bottom_pad = padding.get('bottom_pad', 0)\n self.left_pad = padding.get('left_pad', 0)\n self.right_pad = padding.get('right_pad', 0)\n else:\n raise ValueError('Unexpected key found in `padding` dictionary. '\n 'Keys have to be in {\"top_pad\", \"bottom_pad\", '\n '\"left_pad\", \"right_pad\"}.'\n 'Found: ' + str(padding.keys()))\n else:\n padding = tuple(padding)\n if len(padding) == 2:\n self.top_pad = padding[0]\n self.bottom_pad = padding[0]\n self.left_pad = padding[1]\n self.right_pad = padding[1]\n elif len(padding) == 4:\n self.top_pad = padding[0]\n self.bottom_pad = padding[1]\n self.left_pad = padding[2]\n self.right_pad = padding[3]\n else:\n raise TypeError('`padding` should be tuple of int '\n 'of length 2 or 4, or dict. '\n 'Found: ' + str(padding))\n\n if dim_ordering not in {'tf'}:\n raise ValueError('dim_ordering must be in {tf}.')\n self.dim_ordering = dim_ordering\n self.input_spec = [InputSpec(ndim=4)] \n\n\n def call(self, x, mask=None):\n top_pad=self.top_pad\n bottom_pad=self.bottom_pad\n left_pad=self.left_pad\n right_pad=self.right_pad \n\n\n paddings = [[0,0],[left_pad,right_pad],[top_pad,bottom_pad],[0,0]]\n\n\n return tf.pad(x,paddings, mode='REFLECT', name=None)\n\n def compute_output_shape(self,input_shape):\n if self.dim_ordering == 'tf':\n rows = input_shape[1] + self.top_pad + self.bottom_pad if input_shape[1] is not None else None\n cols = input_shape[2] + self.left_pad + self.right_pad if input_shape[2] is not None else None\n\n return (input_shape[0],\n rows,\n cols,\n input_shape[3])\n else:\n raise ValueError('Invalid dim_ordering:', self.dim_ordering)\n\n\n def get_config(self):\n config = {'padding': self.padding}\n base_config = super(ReflectionPadding2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items())) \n\n \n \n # Build mask generation net\n def generate_mask_net():\n \n # Build model architecture\n input_img = Input(shape=(img_height, img_width, 3))\n\n x = Normalize()(input_img)\n\n x = ReflectionPadding2D(padding=(4,4))(x)\n x = Conv2D(32, (9, 9), strides=(1,1), activation='linear', padding='valid')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(64, (3, 3), strides=(2,2), activation='linear', padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(128, (3, 3), strides=(2,2), activation='linear', padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n\n r1 = residual_block(x, 1)\n r2 = residual_block(r1, 2)\n r3 = residual_block(r2, 3)\n r4 = residual_block(r3, 4)\n x = residual_block(r4, 5)\n\n\n x = Conv2DTranspose(64, (3, 3), strides=(2,2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2DTranspose(32, (3, 3), strides=(2,2), padding='same')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = ReflectionPadding2D((4, 4))(x)\n y = Conv2D(3, (9, 9), strides=(1,1), activation='tanh', padding='valid',\n name='mask_output')(x)\n\n\n model_mask_net = Model(inputs=input_img, outputs=y)\n print(\"Mask model architecture is loaded\")\n\n return model_mask_net\n \n \n \n \n # add vgg19 convolutional base to autoencoder\n def vgg_net(mask, orig_input):\n \n orig_input_norm = Normalize()(orig_input)\n masked_input = Add()([mask, orig_input_norm])\n \n \n # create new tensor of original and masked inputs\n input_tensor = Concatenate(axis=0)([masked_input, orig_input_norm]) \n\n # Build out VGG19 Architecture\n x = Conv2D(64, (3, 3), activation='relu', name='block1_conv1', padding='same')(input_tensor)\n x = Conv2D(64, (3, 3), activation='relu', name='block1_conv2', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(128, (3, 3), activation='relu', name='block2_conv1', padding='same')(x)\n x = Conv2D(128, (3, 3), activation='relu', name='block2_conv2', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv1', padding='same')(x)\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv2', padding='same')(x)\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv3', padding='same')(x)\n x = Conv2D(256, (3, 3), activation='relu', name='block3_conv4', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv1', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv2', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv3', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block4_conv4', padding='same')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv1', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv2', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv3', padding='same')(x)\n x = Conv2D(512, (3, 3), activation='relu', name='block5_conv4', padding='same')(x)\n z = MaxPooling2D((2, 2), strides=(2, 2))(x) \n\n\n full_model = Model(inputs=orig_input, outputs=z)\n print(\"VGG Model has been loaded and appended to network\")\n\n # Download and set weights\n f = h5py.File(vgg19_weights_file)\n layer_names = [name for name in f.attrs['layer_names']][1:] # chop off input layer\n\n for i, layer in enumerate(full_model.layers[-21:]):\n g = f[layer_names[i]]\n weights = [g[name] for name in g.attrs['weight_names']]\n layer.set_weights(weights)\n print(\"VGG19 Weights have been set successfully\")\n\n\n\n\n\n\n #Add losses as regulizers\n add_interpolation_loss(full_model, orig_input_norm, alpha, w)\n add_total_variation_loss(mask_net.layers[-1], weight=TV_weight)\n \n # Freeze all VGG layers\n for layer in full_model.layers[-21:]:\n layer.trainable = False\n\n return full_model\n \n \n \n \n def add_interpolation_loss(full_model, orig_input, alpha, w):\n\n vgg_layers = dict([(layer.name, layer) for layer in full_model.layers[-21:]])\n \n # output layers\n output_layers = vgg_output_layers\n\n layers = [vgg_layers[layer] for layer in output_layers]\n interpolation_regularizer = FeatureInterpolationRegularizer()(layers, alpha, w)\n \n # add_loss function to apply regularization loss to any layer\n layers[2].add_loss(interpolation_regularizer)\n \n \n \n # Loss function to be applied as a Regularizer\n class FeatureInterpolationRegularizer(Regularizer):\n\n def __init__(self):\n super(FeatureInterpolationRegularizer, self).__init__()\n\n def __call__(self, layer, a, w):\n phi_x_r = [K.flatten(layer[i].output[0])*vgg_output_layer_weights[i] for i in range(3)]\n phi_x_r = K.concatenate(phi_x_r) # Image + mask feature\n\n phi_x = [K.flatten(layer[i].output[1])*vgg_output_layer_weights[i] for i in range(3)]\n phi_x = K.concatenate(phi_x) # Original image features\n\n delta = phi_x_r - (phi_x + w * a)\n loss = K.sum(K.square(delta))\n\n return loss\n \n \n \n def add_total_variation_loss(transform_output_layer,weight):\n # Total Variation Regularization\n layer = transform_output_layer # Output layer\n tv_regularizer = TVRegularizer(weight)(layer)\n layer.add_loss(tv_regularizer)\n \n \n class TVRegularizer(Regularizer):\n \"\"\" Enforces smoothness in image output. \"\"\"\n\n def __init__(self, weight):\n self.weight = weight\n self.uses_learning_phase = False\n super(TVRegularizer, self).__init__()\n\n def __call__(self, x):\n assert K.ndim(x.output) == 4\n x_out = x.output \n\n shape = K.shape(x_out)\n img_width, img_height,channel = (shape[1],shape[2], shape[3])\n size = img_width * img_height * channel \n if K.image_dim_ordering() == 'th':\n a = K.square(x_out[:, :, :img_width - 1, :img_height - 1] - x_out[:, :, 1:, :img_height - 1])\n b = K.square(x_out[:, :, :img_width - 1, :img_height - 1] - x_out[:, :, :img_width - 1, 1:])\n else:\n a = K.square(x_out[:, :img_width - 1, :img_height - 1, :] - x_out[:, 1:, :img_height - 1, :])\n b = K.square(x_out[:, :img_width - 1, :img_height - 1, :] - x_out[:, :img_width - 1, 1:, :])\n loss = self.weight * K.sum(K.pow(a + b, 1.25)) \n return loss\n \n \n \n \n \n \n \n mask_net = generate_mask_net()\n model = vgg_net(mask_net.output,mask_net.input)\n model.summary()\n \n return model, mask_net", "def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2',\n OS=16, alpha=1., activation=None, training=True, weight_decay_type=None, weight_decay=None):\n\n if not (weights in {'pascal_voc', 'cityscapes', None}):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `pascal_voc`, or `cityscapes` '\n '(pre-trained on PASCAL VOC)')\n\n if not (backbone in {'xception', 'mobilenetv2'}):\n raise ValueError('The `backbone` argument should be either '\n '`xception` or `mobilenetv2` ')\n\n if weight_decay_type == \"l1\" and weight_decay is not None:\n regularizer = l1(weight_decay)\n elif weight_decay_type == \"l2\" and weight_decay is not None:\n regularizer = l2(weight_decay)\n elif weight_decay_type is None or weight_decay is None:\n regularizer = None\n else:\n raise ValueError(\"`Unknown weight decay type\")\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n img_input = input_tensor\n\n if backbone == 'xception':\n if OS == 8:\n entry_block3_stride = 1\n middle_block_rate = 2 # ! Not mentioned in paper, but required\n exit_block_rates = (2, 4)\n atrous_rates = (12, 24, 36)\n else:\n entry_block3_stride = 2\n middle_block_rate = 1\n exit_block_rates = (1, 2)\n atrous_rates = (6, 12, 18)\n\n x = Conv2D(32, (3, 3), strides=(2, 2), kernel_regularizer=regularizer, bias_regularizer=regularizer,\n name='entry_flow_conv1_1_new', use_bias=False, padding='same')(img_input)\n x = BatchNormalization(name='entry_flow_conv1_1_BN')(x, training=training)\n x = Activation(tf.nn.relu)(x)\n\n x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1, kernel_regularizer=regularizer)\n x = BatchNormalization(name='entry_flow_conv1_2_BN')(x, training=training)\n x = Activation(tf.nn.relu)(x)\n\n x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',\n skip_connection_type='conv', stride=2,\n depth_activation=False, kernel_regularizer=regularizer)\n x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',\n skip_connection_type='conv', stride=2,\n depth_activation=False, return_skip=True,\n kernel_regularizer=regularizer)\n\n x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',\n skip_connection_type='conv', stride=entry_block3_stride,\n depth_activation=False, kernel_regularizer=regularizer)\n for i in range(16):\n x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),\n skip_connection_type='sum', stride=1, rate=middle_block_rate,\n depth_activation=False, kernel_regularizer=regularizer)\n\n x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',\n skip_connection_type='conv', stride=1, rate=exit_block_rates[0],\n depth_activation=False, kernel_regularizer=regularizer)\n x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',\n skip_connection_type='none', stride=1, rate=exit_block_rates[1],\n depth_activation=True, kernel_regularizer=regularizer)\n\n else:\n OS = 8\n first_block_filters = _make_divisible(32 * alpha, 8)\n x = Conv2D(first_block_filters, kernel_size=3, strides=(2, 2), padding='same',\n use_bias=False, name='Conv', kernel_regularizer=regularizer, bias_regularizer=regularizer)(img_input)\n x = BatchNormalization(\n epsilon=1e-3, momentum=0.999, name='Conv_BN')(x, training=training)\n x = Activation(tf.nn.relu6, name='Conv_Relu6')(x)\n\n x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,\n expansion=1, block_id=0, skip_connection=False, kernel_regularizer=regularizer)\n\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,\n expansion=6, block_id=1, skip_connection=False, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,\n expansion=6, block_id=2, skip_connection=True, kernel_regularizer=regularizer)\n\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,\n expansion=6, block_id=3, skip_connection=False, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=4, skip_connection=True, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=5, skip_connection=True, kernel_regularizer=regularizer)\n\n # stride in block 6 changed from 2 -> 1, so we need to use rate = 2\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!\n expansion=6, block_id=6, skip_connection=False, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=7, skip_connection=True, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=8, skip_connection=True, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=9, skip_connection=True, kernel_regularizer=regularizer)\n\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=10, skip_connection=False, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=11, skip_connection=True, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=12, skip_connection=True, kernel_regularizer=regularizer)\n\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!\n expansion=6, block_id=13, skip_connection=False, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=14, skip_connection=True, kernel_regularizer=regularizer)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=15, skip_connection=True, kernel_regularizer=regularizer)\n\n x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=16, skip_connection=False, kernel_regularizer=regularizer)\n\n # end of feature extractor\n\n # branching for Atrous Spatial Pyramid Pooling\n\n # Image Feature branch\n shape_before = tf.shape(x)\n b4 = GlobalAveragePooling2D()(x)\n # from (b_size, channels)->(b_size, 1, 1, channels)\n b4_shape = tf.keras.backend.int_shape(b4)\n b4 = Reshape((1, 1, b4_shape[1]))(b4)\n b4 = Conv2D(256, (1, 1), padding='same', kernel_regularizer=regularizer,\n use_bias=False, name='image_pooling')(b4)\n b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4, training=training)\n b4 = Activation(tf.nn.relu)(b4)\n # upsample. have to use compat because of the option align_corners\n size_before = tf.keras.backend.int_shape(x)\n b4 = tf.keras.layers.experimental.preprocessing.Resizing(*size_before[1:3], interpolation=\"bilinear\")(b4)\n # simple 1x1\n b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0',\n kernel_regularizer=regularizer)(x)\n b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0, training=training)\n b0 = Activation(tf.nn.relu, name='aspp0_activation')(b0)\n\n # there are only 2 branches in mobilenetV2. not sure why\n if backbone == 'xception':\n # rate = 6 (12)\n b1 = SepConv_BN(x, 256, 'aspp1', rate=atrous_rates[0], depth_activation=True, epsilon=1e-5, training=training,\n kernel_regularizer=regularizer)\n # rate = 12 (24)\n b2 = SepConv_BN(x, 256, 'aspp2', rate=atrous_rates[1], depth_activation=True, epsilon=1e-5, training=training,\n kernel_regularizer=regularizer)\n # rate = 18 (36)\n b3 = SepConv_BN(x, 256, 'aspp3', rate=atrous_rates[2], depth_activation=True, epsilon=1e-5, training=training,\n kernel_regularizer=regularizer)\n\n # concatenate ASPP branches & project\n x = Concatenate()([b4, b0, b1, b2, b3])\n else:\n x = Concatenate()([b4, b0])\n\n x = Conv2D(256, (1, 1), padding='same', use_bias=False, name='concat_projection',\n kernel_regularizer=regularizer)(x)\n x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x, training=training)\n x = Activation(tf.nn.relu)(x)\n x = Dropout(0.1)(x)\n # DeepLab v.3+ decoder\n\n if backbone == 'xception':\n # Feature projection\n # x4 (x2) block\n skip_size = tf.keras.backend.int_shape(skip1)\n x = tf.keras.layers.experimental.preprocessing.Resizing(*skip_size[1:3], interpolation=\"bilinear\")(x)\n\n dec_skip1 = Conv2D(48, (1, 1), padding='same', use_bias=False, name='feature_projection0',\n kernel_regularizer=regularizer, bias_regularizer=regularizer)(skip1)\n dec_skip1 = BatchNormalization(name='feature_projection0_BN', epsilon=1e-5)(dec_skip1, training=training)\n dec_skip1 = Activation(tf.nn.relu)(dec_skip1)\n x = Concatenate()([x, dec_skip1])\n x = SepConv_BN(x, 256, 'decoder_conv0', depth_activation=True, epsilon=1e-5, training=training,\n kernel_regularizer=regularizer)\n x = SepConv_BN(x, 256, 'decoder_conv1', depth_activation=True, epsilon=1e-5, training=training,\n kernel_regularizer=regularizer)\n\n # you can use it with arbitary number of classes\n if (weights == 'pascal_voc' and classes == 21) or (weights == 'cityscapes' and classes == 19):\n last_layer_name = 'logits_semantic'\n else:\n last_layer_name = 'custom_logits_semantic_new'\n\n x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name,\n kernel_regularizer=regularizer, bias_regularizer=regularizer)(x)\n size_before3 = tf.keras.backend.int_shape(img_input)\n x = tf.keras.layers.experimental.preprocessing.Resizing(*size_before3[1:3], interpolation=\"bilinear\")(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n if activation in {'softmax', 'sigmoid'}:\n x = tf.keras.layers.Activation(activation)(x)\n\n model = Model(inputs, x, name='deeplabv3plus')\n\n # load weights\n\n if weights == 'pascal_voc':\n if backbone == 'xception':\n weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH_X,\n cache_subdir='models')\n else:\n weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH_MOBILE,\n cache_subdir='models')\n model.load_weights(weights_path, by_name=True)\n elif weights == 'cityscapes':\n if backbone == 'xception':\n weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5',\n WEIGHTS_PATH_X_CS,\n cache_subdir='models')\n else:\n weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5',\n WEIGHTS_PATH_MOBILE_CS,\n cache_subdir='models')\n model.load_weights(weights_path, by_name=True)\n return model", "def train_mobilenetv2():\n\n # load data\n training_sets = load_augmented_dataset()\n\n # build models\n model_mobile = build_mobilenetv2()\n\n # store base weights\n baseWeights_t = model_mobile.get_weights()\n\n # NOTE: You can still leave this alone if you've only downloaded the fully augmented set.\n for training_set in training_sets:\n print(\" Starting training for set {}\".format(str(training_set)))\n model_mobile.set_weights(baseWeights_t) # Resets model\n train_x = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][0]))\n train_y = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][1]))\n\n early_stopping_monitor = EarlyStopping(patience=2)\n history = model_mobile.fit(train_x, train_y, batch_size=32, epochs=20, verbose=1, validation_split=0.2,\n shuffle=True,\n callbacks=[early_stopping_monitor])\n\n mpu.plot_accuracy_loss(history,\n \"./model_cache/train_data/{}_mobilenetv2_plots.png\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_mobilenetv2_plots.png\".format(str(training_set)),\n \"model_charts/{}_mobilenetv2_plots.png\".format(str(training_set)))\n\n model_mobile.save(\"./model_cache/train_data/{}_mobilenetv2.h5\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_mobilenetv2.h5\".format(str(training_set)),\n \"saved_models/{}_mobilenetv2.h5\".format(str(training_set)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The uri returned from request.uri is not properly urlencoded (sometimes it's partially urldecoded) This is a weird hack to get werkzeug to return the proper urlencoded string uri
def _get_uri_from_request(request): uri = request.base_url if request.query_string: uri += '?' + request.query_string.decode('utf-8') return uri
[ "def requote_uri(uri):\n # To reduce tabulator import time\n import requests.utils\n if six.PY2:\n def url_encode_non_ascii(bytes):\n pattern = '[\\x80-\\xFF]'\n replace = lambda c: ('%%%02x' % ord(c.group(0))).upper()\n return re.sub(pattern, replace, bytes)\n parts = urlparse(uri)\n uri = urlunparse(\n part.encode('idna') if index == 1\n else url_encode_non_ascii(part.encode('utf-8'))\n for index, part in enumerate(parts))\n return requests.utils.requote_uri(uri)", "def rawurl(self)->URL:\n req = self.request\n while req.response != None:\n req = req.response.req\n return req.url", "def decode_uri(encoded_uri):\n for enc, dec in HTTPServer.__encoded_chars.iteritems():\n encoded_uri = encoded_uri.replace(enc, dec).replace(enc.lower(), dec)\n return encoded_uri", "def uri(self):\n uristring = self.path\n if self.query:\n uristring += \"?{}\".format(self.query)\n if self.fragment:\n uristring += \"#{}\".format(self.fragment)\n\n return uristring", "def getQualifiedURL(self, uri=''):\n import urlparse\n scheme = urlparse.urlparse(uri)[0]\n if scheme:\n return uri\n\n host_url = self.request.host_url.rstrip('/')\n result = \"%s%s\" % (host_url, uri)\n\n # This might break qualified urls in redirects!\n # e.g. mapping 'http://netloc' -> '/'\n result = wikiutil.mapURL(self, result)\n return result", "def build_absolute_uri(self, uri):\n request = self.context.get('request', None)\n\n return (request.build_absolute_uri(uri) if\n request is not None else uri)", "def normalize_uri(uri):\n repeated_chars = ['/', '?', '#']\n\n def replace_repeated(dupl, string):\n while dupl*2 in string:\n string = string.replace(dupl * 2, dupl)\n return string\n\n for duplicate in repeated_chars:\n uri = replace_repeated(duplicate, uri)\n\n return uri", "def uri(self):\n uri_ = self.get(\"uri\")\n if uri_:\n # Convert non-string URIs into strings.\n # If the URI is already a unicode string this will do nothing.\n # We're assuming that URI cannot be a byte string.\n return text_type(uri_)\n else:\n return \"\"", "def normalize_uri(uri):\n if isinstance(uri, str):\n uri = uri.decode('utf-8')\n return uri.strip().replace(u' ', u'_')", "def test_unicode(self):\n iri = u'http://localhost/expos\\xe9?doppelg\\xe4nger=Bryan O\\u2019Sullivan#r\\xe9sum\\xe9'\n uri = b'http://localhost/expos%C3%A9?doppelg%C3%A4nger=Bryan%20O%E2%80%99Sullivan#r%C3%A9sum%C3%A9'\n self.assertEqual(flatten(url.URL.fromString(iri)), uri)", "def _clean_uri(uri):\n return compat.unquote(uri).replace('+', ' ')", "def full_path(self):\n return self.path + u'?' + to_unicode(self.query_string, self.url_charset)", "def uri(self) -> str:\n host = (self._host if self._host not in ('', '0.0.0.0')\n else socket.gethostname())\n port = self.socket.getsockname()[1]\n return f'http://{host}:{port}'", "def _url(host, port, protocol):\n return f\"{protocol}{host}:{port}/\"", "def iri2uri(uri): \r\n if isinstance(uri ,unicode):\r\n (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)\r\n authority = authority.encode('idna')\r\n # For each character in 'ucschar' or 'iprivate'\r\n # 1. encode as utf-8\r\n # 2. then %-encode each octet of that utf-8 \r\n uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))\r\n uri = \"\".join([encode(c) for c in uri])\r\n return uri", "def _reconstruct_relative_url(self, environ):\n url = urllib.quote(environ.get('SCRIPT_NAME', ''))\n url += urllib.quote(environ.get('PATH_INFO', ''))\n if environ.get('QUERY_STRING'):\n url += '?' + environ['QUERY_STRING']\n return url", "def get_url(self, uri):\n # TODO make this a prepend_if_needed type method\n return urllib.parse.urljoin(self.hostname, uri)", "def _make_uriref(string):\n uri_pattern = rfc.format_patterns()[\"URI\"]\n match = re.compile(uri_pattern).match(string)\n if not match:\n return False\n return rb.term.URIRef(string.decode(\"unicode-escape\"))", "def uri_resolve(self, http_request):\n uri_parts = http_request.uri.split('?') + [None]\n\n if '' in uri_parts:\n uri_parts = filter(lambda a: a != '', uri_parts)\n\n parameters = http_request.get_params(uri_parts[1])\n\n resource_location = self.decode_uri(HTTPServer.normalize_uri(uri_parts[0]))\n\n if resource_location.find(\"../\") != -1:\n return \"Forbidden location\", parameters\n\n if resource_location[-1:] == '/':\n resource_location += HTTPServer.index\n\n return self.document_root + resource_location, parameters" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visualize a particular column of Y_pred anf Y_test for a particular series
def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx): ser_idx = [i for i in range(0, len(y_test), num_win_ser)] if num_plots > len(ser_idx): print("Too many plots, reduce the mumber") else: indx = ser_idx[0:num_plots] days = range(num_win_ser) for idx in indx: CR = test_seq[idx][0][0][3] pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx] true = y_test[idx : idx+num_win_ser, window_out -1, col_idx] plt.title("Y_True V/S Y_Pred, CR: "+ str(CR)) plt.xlabel('Days') plt.ylabel(cols_y[col_idx]) plt.plot(days, pred, label = 'Pred') plt.plot(days, true, label = 'True') plt.legend() plt.show()
[ "def plotPreds(self, predictions, test_series=None, run_up=None,\\\n ylabel='units'):\n #set up figure\n plt.figure(figsize=(10,6))\n plt.ylabel(ylabel)\n plt.xlabel('datetime')\n \n #plot lines\n if run_up is None:\n run_up = self.validation[-7:]\n \n if test_series is not None:\n plt.plot(pd.concat([run_up, test_series[:1]]))\n plt.plot(test_series)\n \n else:\n plt.plot(run_up)\n \n #plot points\n plt.scatter(predictions.index, predictions, edgecolors='k',\\\n label='predictions', c='#2ca02c', s=64)\n \n if test_series is not None:\n plt.scatter(test_series.index, test_series, marker='X',\\\n edgecolors='k', label='test_data', c='#ff7f0e', s=200)\n \n plt.legend()", "def show_target_pred_dif(yt,yp):\n column = ['Target','Predicted','Target - Predicted']\n nrows = yt.shape[-1]\n fig, axs = plt.subplots(figsize=(10,nrows*3), ncols = 3, nrows=nrows)\n [axi.set_axis_off() for axi in axs.ravel()]\n fig.tight_layout()\n for i in range(nrows):\n axs[i,0].imshow(yt[...,i]*255,cmap='Reds', vmin=0, vmax=255)\n axs[i,1].imshow(yp[...,i]*255,cmap='Blues', vmin=0, vmax=255)\n axs[i,2].imshow((yt[...,i]-yp[...,i]+1)*127,cmap='seismic', vmin=0, vmax=255)\n for ax, col in zip(axs[0], column):\n ax.set_title(col)", "def y_test(self):\n return self.get_data_subset(self.test_patients, \"y\")", "def plot(self, X, y, y_pred):\n\n from matplotlib import pyplot as plt\n\n plt.plot(X, y, 'rs', X, y_pred)\n plt.show()", "def show_misclassified(X_test, y_test, y_pred):\n equal = (y_pred == y_test[:,0]).astype(int) # 0 for misclassified, 1 else\n misclassified = np.sum(equal == 0)\n miss_inds = np.where(equal == 0)[0]\n\n for ind in miss_inds:\n print (\"Actual:\", y_test[ind,0], \"Predicted:\", y_pred[ind])\n plt.imshow(X_test[ind].reshape(8,8))\n plt.show()", "def test_sarima_model(y, y_test, results, **kwargs):\n \n # Get predictions\n pred = results.get_prediction(start=y_test.index.min(), end=y_test.index.max(), **kwargs)\n y_pred = pred.predicted_mean\n pred_ci = pred.conf_int()\n\n # Calculate some metrics and print them out\n rmse = ((y_pred - y_test) ** 2).mean() ** 0.5\n print('Root Mean Squared Error =', rmse)\n \n r2 = r2_score(y_pred, y_test)\n print('R^2 =', r2)\n \n # Graph\n ax = y.plot(label='observed')\n y_pred.plot(ax=ax, label='predicted', alpha=.7, figsize=(15, 8))\n ax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n plt.title('Average Monthly Temperature: Observed vs. Predicted')\n ax.set_xlabel('Date')\n ax.set_ylabel('Temperature')\n plt.legend()\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] < 0.5:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def visualize_testing(self) -> None:\n data = self.data[['tested total', 'tested positive', 'tested negative']]\n colors = [\"black\", \"blue\", \"red\"]\n data.plot(color=colors)\n\n plt.xlabel(\"Number of Steps\")\n plt.ylabel(\"Number of Tests\")\n plt.title(\"Testing vs Time\")\n\n self.__handle_result(\"testing.png\")", "def plot_example_errors(X, y, y_pred):\n incorrect = (y != y_pred)\n \n X = X[incorrect]\n y = y[incorrect]\n y_pred = y_pred[incorrect]\n\n # Plot the first 9 images.\n plot_example(X, y, y_pred)", "def visualization(self, x, y, preds):\n plt.figure()\n plt.plot(x, y, \".\", color = \"blue\")\n plt.plot(x, preds, \"-\", color = \"red\")", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def visualization(self, x, y, preds):\n plt.figure()\n plt.plot(x, y, \".\", color = \"blue\")\n plt.plot(x, preds, \".\", color = \"red\")", "def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )", "def plot_result(data, gt_y, pred_y):\n print(data.shape[0],\" \",gt_y.shape[0])\n assert data.shape[0] == gt_y.shape[0]\n print(data.shape[0],\" \",pred_y.shape[0])\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def _graph_results(self, X_test, y_test, y_pred):\n if self.regression is None:\n print(\"Regression results aren't available. Have you run linear_regression() yet?\")\n return\n\n if self.attributes.shape[1] > 1:\n print(\"Graphing is supported for one feature only.\")\n return\n\n plt.scatter(X_test, y_test, color=\"black\")\n plt.plot(X_test, y_pred, color=\"blue\", linewidth=3)\n plt.xticks(())\n plt.yticks(())\n plt.show()", "def plot_model_results(model_results_df,\n x, y, hue=None,\n xlabel=\"\", ylabel=\"\",\n title=\"\",\n null_accuracy=None,\n plot_max=True,\n text_lift=1.03):\n # font to be used in axes labels\n font = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 16}\n # font to be used for null accuracy\n font_null_acc = {'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 16}\n # font to be used for model accuracy\n font_acc = {'family': 'serif',\n 'color': 'darkgreen',\n 'weight': 'normal',\n 'size': 16}\n\n # plot grouped bar chart\n sns.catplot(x=x, y=y, hue=hue, data=model_results_df, kind='bar')\n # get axis created by seaborn\n ax = plt.gca()\n\n if plot_max:\n # plot max value from model performance\n ax.axhline(model_results_df[y].max(),\n color='darkgreen',\n linestyle='--',\n linewidth=2)\n ax.text(0, model_results_df[y].max() * text_lift,\n \"Best accuracy: {0:.2f}\"\n .format(model_results_df[y].max()),\n fontdict=font_acc)\n\n if null_accuracy:\n # plot null accuracy\n ax.axhline(null_accuracy,\n linestyle='--',\n color='black',\n linewidth=2)\n ax.text(0, null_accuracy * text_lift,\n \"Null accuracy: {0:.2f}\"\n .format(null_accuracy),\n fontdict=font_null_acc)\n\n # set axis parameters\n ax.set_ylabel(ylabel, fontdict=font)\n ax.set_xlabel(xlabel, fontdict=font)\n ax.set_title(title)\n plt.show()", "def evaluate_random_forest(y_test, y_pred):", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._calc_move function
def test_calc_move(self): t = AioBaseTurtle() t.speed(speed=5) steps, delta = t._calc_move(Vec2D(0, 100)) self.assertEqual(steps, 20) self.assertAlmostEqual(delta[0], 0.0) self.assertAlmostEqual(delta[1], 5.0)
[ "def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position\n \"black\",\n 1,\n False\n )\n self.mock_update.assert_called_once_with()", "def test_move():\n\n print\n print '--------------'\n print 'test certain_move and uncertain_move are different'\n tgt = ComponentMove()\n tgt.certain_move()\n tgt.uncertain_move()\n tgt.component.model_step_is_done()\n\n assert np.all(tgt.delta['lat'] != tgt.u_delta['lat'])\n assert np.all(tgt.delta['long'] != tgt.u_delta['long'])\n assert np.all(tgt.delta['z'] == tgt.u_delta['z'])", "def test_position_angle_move(self):\n print(\"Testing position angle move\")\n pass", "def __movement_test(self):\n if self.SIMULATE:\n logging.debug(f\"Simulating Head movement test...\")\n out = 0\n else:\n self.traverseHomeSensor.set_rising_callback(self.__home_callback)\n\n self.traverseStepper.move_steps(round(HeadTraverser.MAX_TRAV_STEPS / 2), HeadTraverser.POS_DIR)\n\n count = self.traverseStepper.move_steps(round(HeadTraverser.MAX_TRAV_STEPS / 2), HeadTraverser.NEG_DIR)\n\n if self.traverseHomeSensor.read_sensor():\n logging.debug(f\"Tool Movement Test Completed. Expected Steps = {HeadTraverser.MAX_TRAV_STEPS/2}, \"\n f\"Steps taken = {count}\")\n out = count\n else:\n exp = count\n count = self.traverseStepper.move_steps(HeadTraverser.MAX_TRAV_STEPS - count, HeadTraverser.NEG_DIR)\n logging.debug(f'Tool Movement Test Completed. Expected Steps = {HeadTraverser.MAX_TRAV_STEPS/2}, '\n f'Actual Steps = {exp + count}')\n out = count + exp\n\n self.traverseHomeSensor.clear_rising_callback()\n\n return out", "def test_move(self):\n self.sprite.move((200, 200), 2000)\n self.sprite.update(1000000)\n self.assertTupleEqual(\n (round(self.sprite.x_pos), round(self.sprite.y_pos)), (200, 200))", "def test_set_position_after_travel(self):\n travelcalculator = TravelCalculator(25, 50)\n travelcalculator.start_travel(30)\n travelcalculator.set_position(80)\n assert travelcalculator.position_reached()\n assert travelcalculator.current_position() == 80", "def test_change_direction(self):\n travelcalculator = TravelCalculator(50, 25)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(60)\n travelcalculator.start_travel(80)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n # change direction after two seconds\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 64\n travelcalculator.start_travel(48)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_UP\n\n assert travelcalculator.current_position() == 64\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000004.0\n assert travelcalculator.current_position() == 56\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000006.0\n assert travelcalculator.current_position() == 48\n assert travelcalculator.position_reached()", "def movement(self):", "def test_get_move_interface(self):\n h, w = 9, 9 # board size\n test_depth = 1\n starting_location = (2, 7)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n search_method = \"minimax\"\n heuristic = lambda g, p: 0. # return 0 everywhere\n\n # create a player agent & a game board\n agentUT = game_agent.CustomPlayer(\n test_depth, heuristic, iterative_search, search_method)\n\n # Test that get_move returns a legal choice on an empty game board\n board = isolation.Board(agentUT, 'null_agent', w, h)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on an \" +\n \"empty board. It should return coordinates on the \" +\n \"game board for the location of the agent's next \" +\n \"move. The move must be one of the legal moves on \" +\n \"the current game board.\"))\n\n # Test that get_move returns a legal choice for first move as player 2\n board = isolation.Board('null_agent', agentUT, w, h)\n board.apply_move(starting_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed making the first \" +\n \"move as player 2 on a new board. It should return \" +\n \"coordinates on the game board for the location \" +\n \"of the agent's next move. The move must be one \" +\n \"of the legal moves on the current game board.\"))\n\n # Test that get_move returns a legal choice after first move\n board = isolation.Board(agentUT, 'null_agent', w, h)\n board.apply_move(starting_location)\n board.apply_move(adversary_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on a \" +\n \"game in progress. It should return coordinates on\" +\n \"the game board for the location of the agent's \" +\n \"next move. The move must be one of the legal moves \" +\n \"on the current game board.\"))", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def test_get_random_move(self):\n for i in range(1000):\n y, x = self.ttt.get_random_move()\n assert (y < self.ttt.height)\n assert (x < self.ttt.width)", "def test_get_move(self):\n\n class DynamicTimer():\n \"\"\"Dynamic Timer allows the time limit to be changed after the\n timer is initialized so that the search timeout can be triggered\n before the timer actually expires. This allows the timer to expire\n when an event occurs, regardless of the clock time required until\n the event happens.\n \"\"\"\n def __init__(self, time_limit):\n self.time_limit = time_limit\n self.start_time = curr_time_millis()\n\n def time_left(self):\n return self.time_limit - (curr_time_millis() - self.start_time)\n\n w, h = 11, 11 # board size\n adversary_location = (0, 0)\n method = \"minimax\"\n\n # The agent under test starts at the positions indicated below, and\n # performs an iterative deepening minimax search (minimax is easier to\n # test because it always visits all nodes in the game tree at every\n # level).\n origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]\n exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]\n\n for idx in range(len(origins)):\n\n # set the initial timer high enough that the search will not\n # timeout before triggering the dynamic timer to halt by visiting\n # the expected number of nodes\n time_limit = 1e4\n timer = DynamicTimer(time_limit)\n eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)\n agentUT, board = self.initAUT(-1, eval_fn, True, method,\n origins[idx], adversary_location,\n w, h)\n legal_moves = board.get_legal_moves()\n chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)\n\n diff_total = abs(board.counts[0] - exact_counts[idx][0])\n diff_unique = abs(board.counts[1] - exact_counts[idx][1])\n\n self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)\n\n self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(\n legal_moves, chosen_move))", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def think_move(self) -> tuple[int]:\n raise NotImplementedError", "def makeMove(self, movable_statement):\n # 'DONE'\n\n #assert the new posisiton\n tile = str(movable_statement.terms[0]) \n froX = str(movable_statement.terms[1]) #going to use fro again for consistency\n froY = str(movable_statement.terms[2]) \n toX = str(movable_statement.terms[3])\n toY = str(movable_statement.terms[4])\n tile2 = self.kb.kb_ask(\n parse_input('fact: (pos ?tile2 {} {})'.format(toX, toY)))[0].bindings_dict['?tile2']\n self.kb.kb_retract(\n parse_input('fact: (pos ' + tile + ' ' + froX + ' ' + froY + ')'))\n #make the move: assert new pos fact\n self.kb.kb_assert(\n parse_input('fact: (pos ' + tile + ' ' + toX + ' ' + toY + ')'))\n self.kb.kb_retract(\n parse_input('fact: (pos {} {} {})'.format(tile2, toX, toY)))\n self.kb.kb_assert(\n parse_input('fact: (pos {} {} {})'.format(tile2, froX, froY)))", "def _move(self):\n self._attr[\"position\"][0] += np.cos((self._attr[\"direction\"] + 90) * np.pi / 180) * self._attr[\"speed\"] * self._attr[\"tick\"]\n self._attr[\"position\"][1] += np.sin(-(self._attr[\"direction\"] + 90) * np.pi / 180) * self._attr[\"speed\"] * self._attr[\"tick\"]\n\n self._attr[\"distance_traveled\"] += self._attr[\"speed\"] * self._attr[\"tick\"]\n # get distance travelled by the sprite so we can remove when it's superior to the range of the spell", "def _move(self, steps):\n if self.direction == 0:\n self.y += steps\n elif self.direction == 1:\n self.x += steps\n elif self.direction == 2:\n self.y -= steps\n elif self.direction == 3:\n self.x -= steps", "def test_steering_wheels_right(self):\n response = self.smoothie.custom_move_to(A_F=config.A_F_MAX, A=config.A_MIN)\n return response", "def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._calc_rotation function
def test_calc_rotation(self): t = AioBaseTurtle() t.speed(speed=2) orient, steps, delta = t._calc_rotation(120) self.assertEqual(steps, 21) self.assertAlmostEqual(delta, 120.0 / 21.0) self.assertAlmostEqual(orient[0], math.cos(math.radians(120))) self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))
[ "def test_speed_angle_rotate(self):\n print(\"Testing speed angle rotate!\")\n pass", "def get_rotation():\n return _rotation * 90", "def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n np.allclose(S, expected, atol=tol, rtol=0)", "def test_position_angle_rotate(self):\n print(\"Testing position angle rotate\")\n pass", "def test_ToroidalFieldCoilCoatHanger_rotation_angle(self):\n\n test_shape = paramak.ToroidalFieldCoilCoatHanger(\n horizontal_start_point=(200, 500),\n horizontal_length=400,\n vertical_mid_point=(700, 0),\n vertical_length=500,\n thickness=50,\n distance=50,\n number_of_coils=8,\n )\n\n test_shape.rotation_angle = 360\n test_shape.workplane = \"XZ\"\n test_volume = test_shape.volume\n test_shape.rotation_angle = 180\n assert test_shape.volume == pytest.approx(test_volume * 0.5, rel=0.01)\n\n test_shape.rotation_angle = 360\n test_shape.workplane = \"YZ\"\n test_volume = test_shape.volume\n test_shape.rotation_angle = 180\n assert test_shape.volume == pytest.approx(test_volume * 0.5, rel=0.01)\n\n # this test will remain commented until workplane issue #308 is resolved\n # currently causes terminal to crash due to large number of unions\n # test_shape.rotation_angle = 360\n # test_shape.workplane = \"XY\"\n # test_volume = test_shape.volume\n # test_shape.rotation_angle = 180\n # assert test_shape.volume == pytest.approx(test_volume * 0.5)", "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def test_rotation():\n output_file = render(\"rotate\")\n pdf = PyPDF3.PdfFileReader(output_file)\n # Note that inputs to getPage are 0-indexed\n assert 90 == pdf.getPage(3)['/Rotate']\n assert 90 == pdf.getPage(4)['/Rotate']\n assert 90 == pdf.getPage(5)['/Rotate']\n assert 180 == pdf.getPage(6)['/Rotate']\n assert 180 == pdf.getPage(7)['/Rotate']\n assert 180 == pdf.getPage(8)['/Rotate']", "def test_rotation():\n board = TetrisBoard()\n board.new_piece(0)\n #The obstruction is a piece that\n #is fixed in the 3rd row and 5th column.\n board.board[1][4] = 2\n board.rotate()\n board.freeze()\n #Tests that the rotation did not change because a collision was detected\n assert board.piece.orientation == 0", "def test_rotate(self, number_of_rotations, angle, direction):\n for _ in xrange(number_of_rotations):\n self.robot.rotate(angle)\n self.assertEqual(self.robot.direction, direction)", "def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)", "def rotation(angle, pas):\n\n return (angle + pas) % 360", "def test_calculate_angle():\n r1 = np.array([0, 0, -1])\n r2 = np.array([0, 0, 0])\n r3 = np.array([1, 0, 0])\n\n expected_angle = 90\n calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees = True)\n\n assert expected_angle == calculated_angle", "def test_calculate_angle():\n\n r_a = np.array([0, 0, -1])\n r_b = np.array([0, 0, 0])\n r_c = np.array([0, 1, 0])\n\n expected_angle = 90\n\n calculated_angle = molecool.calculate_angle(r_a, r_b, r_c, degrees=True)\n\n assert calculated_angle == expected_angle", "def rotation_mode():\r\n pass", "def test_extract_rot_angle():\n v = np.zeros((4,2))\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Zero velocities not allowed.\"\n \n v[:,1] = 1.\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Failed to get both forward and backward directions.\"\n\n # Forwards-backwards motion.\n v[:,1] = 0.\n v[:2,0] = -1.1\n v[2:,0] = 1.2\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,np.pi)\n\n # Forwards-backwards motion.\n v[:,0] = 0.\n v[:2,1] = -.9\n v[2:,1] = .8\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2)\n\n # Forwards-backwards motion with noise.\n v[:2,1] += (np.random.rand(2)*2-1)/10\n v[2:,1] += (np.random.rand(2)*2-1)/10\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2,atol=.1)", "def test_calculate_angle():\n\n r1 = np.array([0,0,-1])\n r2 = np.array([0,0,0])\n r3 = np.array([1,0,0])\n\n expected_angle = 90.\n\n calculated_angle = molecool.calculate_angle(r1,r2,r3,degrees = True)\n\n assert calculated_angle == expected_angle", "def rotate_rotor_value(self):\r\n\r\n raise NotImplementedError(\"Reflector doesn't rotate\")", "def test_rotation_against_quat(self):\n v1 = np.random.rand(3)\n v2 = np.random.rand(3)\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n\n rot = helpers.get_rotation_matrix(v1, v2)\n vector, angle = helpers.get_rotation_vector_and_angle(v1, v2)\n quat = Quaternion(vector=vector, angle=angle).unit()\n rot_quat = quat.basis()\n for x_row, y_row in zip(rot, rot_quat):\n for a, b in zip(x_row, y_row):\n self.assertAlmostEqual(a, b)\n # v2_bis = rot@v1.T\n # v2_tris = rot_quat@v1.T", "def _test_rotation(self, src_center, ref_center, src_delta, ref_delta,\n cos_shift, max_cos_rot_sq):\n\n # Make sure the sine is a real number.\n if cos_shift > 1.0:\n cos_shift = 1.\n elif cos_shift < -1.0:\n cos_shift = -1.\n sin_shift = np.sqrt(1 - cos_shift ** 2)\n\n # If the sine of our shift is zero we only need to use the identity\n # matrix for the shift. Else we construct the rotation matrix for\n # shift.\n if sin_shift > 0:\n rot_axis = np.cross(src_center, ref_center)\n rot_axis /= sin_shift\n shift_matrix = self._create_spherical_rotation_matrix(\n rot_axis, cos_shift, sin_shift)\n else:\n shift_matrix = np.identity(3)\n\n # Now that we have our shift we apply it to the src delta vector\n # and check the rotation.\n rot_src_delta = np.dot(shift_matrix, src_delta)\n proj_src_delta = (rot_src_delta\n - np.dot(rot_src_delta, ref_center) * ref_center)\n proj_ref_delta = (ref_delta\n - np.dot(ref_delta, ref_center) * ref_center)\n cos_rot_sq = (np.dot(proj_src_delta, proj_ref_delta) ** 2\n / (np.dot(proj_src_delta, proj_src_delta)\n * np.dot(proj_ref_delta, proj_ref_delta)))\n # If the rotation isn't in tolerance return None.\n if cos_rot_sq < max_cos_rot_sq:\n return pipeBase.Struct(\n cos_rot_sq=None,\n proj_ref_ctr_delta=None,\n shift_matrix=None,)\n # Return the rotation angle, the plane projected reference vector,\n # and the first half of the full shift and rotation matrix.\n return pipeBase.Struct(\n cos_rot_sq=cos_rot_sq,\n proj_ref_ctr_delta=proj_ref_delta,\n shift_matrix=shift_matrix,)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._calc_circle function
def test_calc_circle(self): t = AioBaseTurtle() steps, step_len, rot_step = t._calc_circle(100, extent=180) self.assertEqual(steps, 14) self.assertAlmostEqual(rot_step, 180.0 / 14.0) self.assertAlmostEqual(step_len, 22.3928952207)
[ "def makeCircle(r, a): \n myTurtle.circle(r, a)", "def draw_circle(c):\n turtle.circle(c.radius)", "def GetCircle(circle):\r\n pass", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def test_pow_circle():\n c1 = Circle(2)\n c2 = c1 ** 3\n assert c2.radius == 8\n\n c3 = Circle(4)\n c4 = c3 ** c1\n assert c4.radius == 16", "def test_circle(self):\n posargs = '1 2 3'\n negargs = '-6 -7 8'\n k, v = self.base.parse_circle(posargs)\n self.assertEqual(k, 'shape')\n self.assertEqual(v.type, 'circle')\n self.assertEqual((v.x, v.y), (1, 2))\n self.assertEqual(v.radius, 3)\n\n k, v = self.base.parse_circle(negargs)\n self.assertEqual(k, 'shape')\n self.assertEqual(v.type, 'circle')\n self.assertEqual((v.x, v.y), (-6, -7))\n self.assertEqual(v.radius, 8)", "def test_circumference():\n assert func_difficult.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func_difficult.circumference_circle(0) == 0, \"is 0\"\n assert func_difficult.circumference_circle(10) == 2 * np.pi * 10", "def test_circumference():\n assert func1.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func1.circumference_circle(0) == 0, \"is 0\"\n assert func1.circumference_circle(10) == 2 * np.pi * 10", "def test_circle():\n c = Circle(4)\n assert c.radius == 4\n assert c.diameter == 8", "def circle(r):\r\n pi=3.14\r\n area=pi*r*r\r\n perimeter=2*pi*r\r\n return(area,perimeter)", "def drawCircle(myturtle, x, y, r):\n\n #Getting new turtle object and draws circle\n myturtle.penup()\n myturtle.setposition(x, y)\n myturtle.pendown()\n myturtle.circle(r)", "def createCircularArea(self,radius,offset): \n \n try:\n refPos = self.__referencePosition\n \n # negative offset values move the circle into the approach direction\n d = radius - offset\n \n refPosWithOffsetX = refPos.X\n refPosWithOffsetY = refPos.Y \n \n if (not offset == 0): \n circHeading = self.MAX_VALUE\n # if there is a approach, get moving direction \n points = self.__points\n \n # heading of circle is needed for circle calculation\n circHeading = self.calcHeadingForwardingArea(points, refPos, d)\n \n # geometric projections and rotations for the circle center points\n if(not circHeading == self.MAX_VALUE): \n refPosWithOffsetX = refPos.X + offset * math.sin(circHeading) \n refPosWithOffsetY = refPos.Y + offset * math.cos(circHeading)\n \n center = Vector(refPosWithOffsetX,refPosWithOffsetY)\n # generate a circle from center point and radius\n return GeoShapes.Circle(center,radius)\n \n \n except Exception, err:\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n traceback.print_tb(exceptionTraceback, limit=1, file=sys.stdout)\n traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback,limit=2, file=sys.stdout)\n time.sleep(4) \n return 1", "def area_of_circle(radius):\n return radius", "def main():\n x = int(input(\"Enter the x coordinate of the center point: \"))\n y = int(input(\"Enter the y coordinate of the center point: \"))\n radius = int(input(\"Enter the radius: \"))\n draw_circle(Turtle(), x, y, radius)", "def GetRadius(self):\n ...", "def tscheme_circle(r, extent=None):\n if extent is None:\n _check_nums(r)\n else:\n _check_nums(r, extent)\n _tscheme_prep()\n turtle.circle(r, extent and extent)", "def circle(self, x, y, r):\n # Render units to points.\n xpt, ypt, rpt = upt(x, y, r)\n self.b.oval(xpt-rpt, ypt-rpt, 2*rpt, 2*rpt)", "def test_circle_from_diameter():\n c = Circle.from_diameter(8)\n assert c.diameter == 8\n assert c.radius == 4", "def is_circle(self):\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the AioBaseTurtle._move_step function
def test_move_step(self): t = AioBaseTurtle() t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5)) self.assertAlmostEqual(t._position[0], 100) self.assertAlmostEqual(t._position[1], 100) t.screen._drawline.assert_called_once_with( t.currentLineItem, ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position "black", 1, False ) self.mock_update.assert_called_once_with()
[ "def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)", "def __movement_test(self):\n if self.SIMULATE:\n logging.debug(f\"Simulating Head movement test...\")\n out = 0\n else:\n self.traverseHomeSensor.set_rising_callback(self.__home_callback)\n\n self.traverseStepper.move_steps(round(HeadTraverser.MAX_TRAV_STEPS / 2), HeadTraverser.POS_DIR)\n\n count = self.traverseStepper.move_steps(round(HeadTraverser.MAX_TRAV_STEPS / 2), HeadTraverser.NEG_DIR)\n\n if self.traverseHomeSensor.read_sensor():\n logging.debug(f\"Tool Movement Test Completed. Expected Steps = {HeadTraverser.MAX_TRAV_STEPS/2}, \"\n f\"Steps taken = {count}\")\n out = count\n else:\n exp = count\n count = self.traverseStepper.move_steps(HeadTraverser.MAX_TRAV_STEPS - count, HeadTraverser.NEG_DIR)\n logging.debug(f'Tool Movement Test Completed. Expected Steps = {HeadTraverser.MAX_TRAV_STEPS/2}, '\n f'Actual Steps = {exp + count}')\n out = count + exp\n\n self.traverseHomeSensor.clear_rising_callback()\n\n return out", "def test_move():\n\n print\n print '--------------'\n print 'test certain_move and uncertain_move are different'\n tgt = ComponentMove()\n tgt.certain_move()\n tgt.uncertain_move()\n tgt.component.model_step_is_done()\n\n assert np.all(tgt.delta['lat'] != tgt.u_delta['lat'])\n assert np.all(tgt.delta['long'] != tgt.u_delta['long'])\n assert np.all(tgt.delta['z'] == tgt.u_delta['z'])", "def move(self, step):\n\n status = self.read()\n Logger.getLogger().debug(\"Status in move method: %s\", status)\n # while the motors are moving we don't want to start another movement\n if status > CurtainsStatus.OPEN or self.motor.value:\n return\n\n self.target = step\n\n # deciding the movement direction\n if self.steps() < self.target:\n self.__open__()\n elif self.steps() > self.target:\n self.__close__()", "def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def moveStep(self):\n\t\tif self.pos[0] < self.boundsX[0] or \\\n\t\t\tself.pos[0] > (self.boundsX[1] - self.width):\n\t\t\t\tself.dir[0] *= -1\n\t\tif self.pos[1] < self.boundsY[0] or \\\n\t\t self.pos[1] > (self.boundsY[1] - self.height):\n\t\t\t\tself.dir[1] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def test_position_angle_move(self):\n print(\"Testing position angle move\")\n pass", "def _move(self, steps):\n if self.direction == 0:\n self.y += steps\n elif self.direction == 1:\n self.x += steps\n elif self.direction == 2:\n self.y -= steps\n elif self.direction == 3:\n self.x -= steps", "def step(self, new_position):\n # Calculate direction of movement\n x_diff = self.game_map.player_position[0] - new_position[0]\n y_diff = self.game_map.player_position[1] - new_position[1]\n\n # Determine what key to press\n direction = ''\n if x_diff == 1:\n direction = 'a'\n elif x_diff == -1:\n direction = 'd'\n elif y_diff == 1:\n direction = 'w'\n elif y_diff == -1:\n direction = 's'\n else:\n utils.log(\n 'SEVERE', F\"Invalid step difference. xDiff: {x_diff}, yDiff: {y_diff}\")\n utils.quit_game()\n\n # Move along path\n pyautogui.press(direction)\n sleep(0.1)\n\n # Player moved, re-detect environment\n screenshot = utils.take_screenshot()\n self.game_map.update_player_position(screenshot)\n self.game_map.update_map()", "def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)", "def step(self):\n \n self.steer()\n self.wobble()\n self.move()", "def move(self, timestep):\n if self.trajectoryStep >= len(self.trajectory):\n # return trajectory completed\n return False\n\n target2DPosition = self.trajectory[self.trajectoryStep]\n vector = [-target2DPosition[0] - self.translation[0],\n -target2DPosition[1] - self.translation[1],\n 0.0]\n distance = math.sqrt(vector[0] * vector[0] + vector[1] * vector[1] +\n vector[2] * vector[2])\n maxStep = MovingTarget.SPEED * timestep\n\n if distance < maxStep:\n self.trajectoryStep += 1\n self.translation = [a + b for a, b in zip(self.translation, vector)]\n segmentChanged = True\n else:\n if math.isinf(self.rotationStep):\n self.rotationStepsCount = 10\n newAngle = math.acos(dotProduct([1.0, 0.0, 0.0], vector))\n if vector[1] < 0.01:\n newAngle = -newAngle\n diff = self.rotationAngle - newAngle\n while diff > math.pi:\n diff -= 2 * math.pi\n while diff < -math.pi:\n diff += 2 * math.pi\n self.rotationStep = -diff / self.rotationStepsCount\n\n factor = maxStep / distance\n self.translation[0] += vector[0] * factor\n self.translation[1] += vector[1] * factor\n segmentChanged = False\n\n self.translationField.setSFVec3f(self.translation)\n\n if self.rotationStepsCount > 0:\n if segmentChanged:\n self.rotationAngle += self.rotationStep * \\\n self.rotationStepsCount\n self.rotationStepsCount = 0\n else:\n self.rotationAngle += self.rotationStep\n self.rotationStepsCount -= 1\n self.rotationField.setSFRotation([0.0, 0.0, 1.0,\n self.rotationAngle])\n\n if segmentChanged:\n self.rotationStep = float('Inf')\n return True", "def move(self, *step):\n self.x += step[0]\n self.y += step[1]", "def test_move_default_dropped_steps(self):\n player = ss.LazyPlayer()\n random.seed(2)\n player.move()\n random.seed(5)\n player.move()\n assert player.position == 44", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def test_set_position_after_travel(self):\n travelcalculator = TravelCalculator(25, 50)\n travelcalculator.start_travel(30)\n travelcalculator.set_position(80)\n assert travelcalculator.position_reached()\n assert travelcalculator.current_position() == 80", "def move(self,dt):\n raise NotImplementedError(\"Robot.move\")", "def move_step(self, move):\n # Check that the move is valid\n steps = self.mgr.obj.steps\n if len(steps) == 0:\n return\n idx = self.stepsListWidget.currentRow()\n idx_max = len(steps) - 1\n if (idx+move < 0) or (idx+move > idx_max):\n return\n \n # Insert the step at its new location, then delete it at the old location\n steps.insert(idx+move+(move>0), steps[idx])\n del steps[idx if move>0 else idx+1]\n \n self.load_steps()\n self.stepsListWidget.setCurrentRow(idx+move)\n self.mgr.changed = True", "def test_move_dropped_steps_greater_than_move(self):\n player = ss.LazyPlayer(dropped_steps=3)\n random.seed(2)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 40" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Temporarily overwrite the settings with test settings. This allows to use test datasets for testing.
def generate_test_settings(tmpdir, dataset): # When `tmpdir` is a path convert it to a string if isinstance(tmpdir, py._path.local.LocalPath): tmpdir = str(tmpdir) test_settings = { 'datasets': { 'mnist': { 'train': { 'images': "file://" + tmpdir + "/" + dataset + "/server/train-images-idx3-ubyte.gz", 'labels': "file://" + tmpdir + "/" + dataset + "/server/train-labels-idx1-ubyte.gz" }, 'test': { 'images': "file://" + tmpdir + "/" + dataset + "/server/t10k-images-idx3-ubyte.gz", 'labels': "file://" + tmpdir + "/" + dataset + "/server/t10k-labels-idx1-ubyte.gz" }, }, }, 'data-dir': tmpdir + "/" + dataset + "/data" } overwrite_settings(test_settings)
[ "def prepare_settings(self):\n\n self.settings = load_settings_as_template(DEFAULT_SETTINGS_PATH)\n self.settings['experiment']['file_paths'] = [os.path.join(TEST_DIR, _) for _ in self.file_paths]\n self.settings['experiment']['fasta_paths'] = [os.path.join(TEST_DIR, _) for _ in self.fasta_paths]", "def force_test_setting(dm, tsm, output_path):\n if dm is not None:\n data_json_path = os.path.join(output_path, 'cur_data_setting.json')\n dm.data_par['datapro']['dataset']['prepare_data'] = False\n dm.data_par['datapro']['reg']['max_num_for_loading'] = [1, 1, -1, 1]\n dm.save(data_json_path)\n else:\n tsm.task_par['dataset']['max_num_for_loading'] = [1, 1, -1, 1]\n tsm.task_par['tsk_set']['train'] = False\n tsm.task_par['tsk_set']['continue_train'] = False\n tsk_json_path = os.path.join(output_path, 'cur_task_setting.json')\n tsm.save(tsk_json_path)", "def setUp(self):\n self.settings = Settings()", "def setUp(self):\n\t\tself.settings = settings.Settings()", "def __perapre_test_setting(package_settings: dict) -> dict:\n\n __package_setting = copy.deepcopy(package_settings)\n\n __package_setting['slient'] = False\n\n if __package_setting.get('weights') is not None:\n __package_setting['weights'] = [1, 1, 1, 1, 1]\n\n return __package_setting", "def test_settings_restored(self) -> None:\n from django.conf import settings\n\n assert TestLiveServer._test_settings_before_run is True # type: ignore[attr-defined]\n assert (\n f\"{settings.__class__.__module__}.{settings.__class__.__name__}\"\n == \"django.conf.Settings\"\n )\n assert settings.ALLOWED_HOSTS == [\"testserver\"]", "def load_test_data(self):\n self.meta_data = pd.read_csv(settings[\"RAW_TEST_METADATA_PATH\"])\n\n self.dataset_name = 'test'", "def settings_to_use(args):\n\n test_settings = 'dannywebsite.settings_test'\n default_settings = 'dannywebsite.settings'\n\n return test_settings if 'test' in args else default_settings", "def test(self) -> \"BaseDataset\":\n self.dataset_args[\"train\"] = False\n return self", "def setupTests(self, paths = [], tests = {}):\n # Used for settings only\n self.view = self.build.window.active_view()\n self._settings = {}\n for key in buildSettings:\n self._settings[key] = self._coalesceOption(key)\n self.runnerSetup(paths = paths, tests = tests)", "def setUp(self):\n logging.set_verbosity(logging.INFO)\n if PerfZeroBenchmark.local_flags is None:\n # Loads flags to get defaults to then override. List cannot be empty.\n flags.FLAGS(['foo'])\n saved_flag_values = flagsaver.save_flag_values()\n PerfZeroBenchmark.local_flags = saved_flag_values\n else:\n flagsaver.restore_flag_values(PerfZeroBenchmark.local_flags)", "def test_update_setting(self):\n pass", "def test_set_testing(self):\n old_value = Config.testing\n Config.set_testing(True)\n\n self.assertNotEqual(old_value, Config.testing)", "def prepare_test(self):\n pass", "def use_test():\n global _casda_query_base_url, _casda_anon_query_base_url, _casda_soda_base_url\n _casda_query_base_url = _casda_base_url_vo_test\n _casda_anon_query_base_url = _casda_base_url_anon_vo_test\n _casda_soda_base_url = _casda_base_url_soda_test", "def test_ensure_correct_settings():\n assert settings.TEST_SETTING == 1", "def tearDown(self):\n # set the config module level variables back to None\n config.config._conf_parser = None\n config.config._user_config_file = None", "def setup_settings():\n # pylint: disable=import-outside-toplevel\n from django.conf import settings\n import tiny_erp.settings as defaults\n\n for name in dir(defaults):\n if name.isupper() and not hasattr(settings, name):\n setattr(settings, name, getattr(defaults, name))", "def pytest_funcarg__settings(request):\n old_settings = copy.deepcopy(settings)\n\n def restore_settings():\n for setting in dir(old_settings):\n if setting == setting.upper():\n setattr(settings, setting, getattr(old_settings, setting))\n request.addfinalizer(restore_settings)\n return settings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate archive files for the given test dataset in tmpdir
def generate_test_dataset_archive(filepath, dataset): # 'file:///some/path' to '/some/path' if filepath[:7] == 'file://': filepath = filepath[7:] # Check if the dataset exists. # When not been generate it. if not os.path.isfile(filepath): print("Generating", filepath) data = get_test_dataset(dataset) ensure_dir(os.path.dirname(filepath)) idxgz.save(filepath, data)
[ "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def create_archive(cls, directory_path: str, output_path: str) -> str:\n pass", "def test_archive_run(self):\n pass", "def archive_test_logs(days, archive_path, all_logs):\n for day in days.keys():\n daydir = datetime.strptime(day, \"%Y%m%d\").strftime(\"%m-%d-%Y\")\n for scenario in days[day].keys():\n # temporary log directories are stored by scenario + date\n datename = scenario + \"-\" + datetime.strptime(day, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n if datename not in all_logs:\n raise RuntimeError(f\"Missing all_log entry for {datename}\")\n\n if not os.path.exists(all_logs[datename].name):\n raise RuntimeError(f\"Missing log directory for {datename}\")\n\n tmpdir = all_logs[datename].name\n failed = days[day][scenario][\"failed-tests\"]\n flakes = days[day][scenario][\"flaky-tests\"]\n\n scenario_archive = os.path.join(archive_path, daydir, scenario)\n os.makedirs(os.path.join(scenario_archive, \"failed\"))\n os.makedirs(os.path.join(scenario_archive, \"flakes\"))\n # data is organized by test names as keys with lists of tests\n for name in failed:\n i = 1\n for t in sorted(failed[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"failed\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1\n\n for name in flakes:\n i = 1\n for t in sorted(flakes[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not logdir or not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"flakes\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1", "def create_archives(self):\n # Cria pasta onde arquivos serão armazenados\n self._create_data_folder()\n # cria os aquivos de saida esperados\n OUT_ARCHIVE_COMMENTS, OUT_ARCHIVE_MEDIAS, OUT_ARCHIVE_MEDIAS_PERIODIC, OUT_ARCHIVE_PROFILES_PERIODIC = self._create_output_paths()\n\n print(self._now_str(), \"Creating archives at:\",\n \"data/archives/{}\".format(self.TIME))\n\n self._aggregate_comments(OUT_ARCHIVE_COMMENTS)", "def _write_dataset_files(\n root_path: pathlib.Path,\n namespace: str,\n ds_files: Dict[str, List[str]],\n) -> str:\n repo_path = root_path / namespace\n # Create all datasets\n for ds_name, files in ds_files.items():\n ds_path = repo_path / ds_name\n ds_path.mkdir(parents=True) # Create the containing dir\n for fname in files:\n (ds_path / fname).touch() # Create the file\n\n # Additional noisy files should be ignored\n (repo_path / '__init__.py').touch()\n return str(repo_path)", "def zip_data_for_zenodo(year):\n os.makedirs(data_folder(\"zenodo\"), exist_ok=True)\n for directory in [\"outputs\", \"results\"]:\n logger.info(f\"zipping {directory}_{year} for zenodo\")\n shutil.make_archive(\n data_folder(f\"zenodo/{directory}_{year}\"),\n \"zip\",\n root_dir=data_folder(f\"{directory}/{year}\"),\n # base_dir=\"\",\n )", "def test_create_log_archive(self, temp_directory):\n RepeatableLogRecorder(temp_directory)\n assert len(os.listdir(temp_directory)) >= 1\n\n log_file = os.listdir(temp_directory)[0]\n assert re.match(\n r'^pydov-archive-[0-9]{8}T[0-9]{6}-([0-9a-z]){6}.zip', log_file)", "def compress_data(datadir):\n tar = tarfile.open(datadir + \".tar.gz\", \"w:gz\")\n for datafile in os.listdir(datadir):\n tar.add(datadir + '/' + datafile, datafile)\n tar.close()\n shutil.rmtree(datadir)", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def archiveNewFiles(self):\n\n #Archive any diag files\n if self.checkForDiagFiles():\n self._archiveDiags()\n\n #Archive checkpoint files\n if self.checkForMaeFiles(self.chk_prefix, self.chkdir, 2):\n self._archiveMaeFiles(self.chk_prefix, self.chkdir, 2)\n \n #Archive plot files\n if self.checkForMaeFiles(self.plt_prefix, self.pltdir, 1):\n self._archiveMaeFiles(self.plt_prefix, self.pltdir, 1)", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def test_kit_archive(request) -> str:\n working_directory = os.path.join(request.fspath.dirname, 'fixtures',\n 'kit-test')\n builder = KitBuilder(working_directory=working_directory)\n\n builder.clean()\n tarball_path = builder.build()\n\n return tarball_path", "def populated_archivist_dataset(archivist_dataset, tmp_path_factory):\n wpath = tmp_path_factory.mktemp(\"archivistds\")\n\n ads = archivist_dataset\n\n dscontent = (\n ('azip/file1.txt', 'zipfile1'),\n ('azip/file2.csv', 'zipfile2_muchcontent'),\n ('atar/file1.txt', 'tarfile1'),\n ('atar/file2.csv', 'tarfile2_muchcontent'),\n )\n srcds = Dataset(wpath / 'srcds').create(**nonoise)\n for fpath, fcontent in dscontent:\n fpath = srcds.pathobj / (PurePosixPath(fpath))\n fpath.parent.mkdir(parents=True, exist_ok=True)\n fpath.write_text(fcontent)\n srcds.save(**nonoise)\n\n archive_root = wpath / 'myarchive'\n #archivetype = 'zip'\n\n akeys = {}\n\n # no ZIP just yet\n # for archivetype, ext in (('zip', ''), ('tar', '.gz')):\n for archivetype, ext in (('tar', '.gz'), ):\n archive_path = Path(f\"{archive_root}.{archivetype}{ext}\")\n\n archive_path_inds = ads.pathobj / '.archives' / archive_path.name\n # create an archive, the easy way, by simply exporting the\n # entire dataset worktree\n srcds.export_archive(archive_root, archivetype=archivetype,\n **nonoise)\n assert archive_path.exists()\n\n # add the archive (in a hidden dir) to be able to reference\n # it via a key\n aurl = archive_path.as_uri()\n ads.repo.call_annex([\n 'addurl', '--file', str(archive_path_inds), aurl])\n ads.save(**nonoise)\n # get the key of the archive\n akeys[archivetype] = ads.status(\n archive_path_inds, annex='basic', return_type='item-or-list',\n **nonoise)['key']\n return ads, akeys, archive_root, dscontent", "def sample_input_dir():\n tmpdir = tempfile.mkdtemp()\n input_zip = os.path.join(ASSETS_DIR, 'input_dir.zip')\n with zipfile.ZipFile(input_zip, \"r\") as zip_ref:\n zip_ref.extractall(tmpdir)\n yield tmpdir\n shutil.rmtree(tmpdir)", "def archive_results(seeds):\n # Moves all created folder and result files in a single folder\n datetime =time.strftime(\"%Y.%m.%d-%H%M%S\")\n try:\n os.system(str('mkdir Results/'+str(datetime)))\n except:\n print(\"\\rFailed to create folder Results/{:s}\".format(datetime))\n try:\n os.system('mv results.log Results/{:}/results.log'.format(datetime))\n except:\n print(\"\\rFailed to move results.log\")\n for seed in seeds:\n mv_folders = (folder for folder in os.listdir() if folder.endswith(str(seed)))\n for folder in mv_folders:\n try:\n os.system('mv {} Results/{}/{}'.format(folder, datetime, folder))\n #print(\"\\rMoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to move {:s}\".format(folder))\n raise", "def archive_experiment(experiment_dir: str,\n dst_dir: str,\n save_extensions: Union[str, Sequence[str]]='py',\n exclude_dirs: Union[str, Sequence[str]]='output',\n archive_format: str='zip',\n base_name: Optional[str]=None):\n # Format save_extensions for consistency\n # Make into a sequence\n if isinstance(save_extensions, str):\n save_extensions = [save_extensions]\n # Drop any .'s\n save_extensions = [s.strip('.') for s in save_extensions]\n # Format exclude_dirs for consistency\n if isinstance(exclude_dirs, str):\n exclude_dirs = [exclude_dirs]\n # Get default base name\n if base_name is None:\n experiment_path = os.path.abspath(experiment_dir)\n base_name = [p for p in experiment_path.split('/') if p][-1]\n\n # Full name of the archive name uses a time stamp\n timestamp = time.strftime('%b%d%Y_%H%M%S')\n archive_name = f'{base_name}_{timestamp}'\n\n # Use a temporary folder to create the archive\n tmp_folder = f'/tmp/{str(uuid.uuid4())}'\n if os.path.exists(tmp_folder):\n shutil.rmtree(tmp_folder)\n os.makedirs(tmp_folder)\n tmp_experiment = os.path.join(tmp_folder, archive_name)\n os.makedirs(tmp_experiment)\n\n # Recurse through the experiment directory and non-'output' subdirectories,\n # saving files to the temporary folder\n dirs_to_check = [experiment_dir]\n while len(dirs_to_check) > 0:\n # A directory to check (DTC), relative to the experiment_dir\n dtc = dirs_to_check.pop(0)\n # Full path to the DTC\n full_dtc = dtc if dtc == experiment_dir \\\n else os.path.join(experiment_dir, dtc)\n # List of all files and folders in the DTC\n dlist = os.listdir(full_dtc)\n # List of all files in the DTC\n files = [d for d in dlist\n if os.path.isfile(os.path.join(full_dtc, d))]\n # Check each file to see if it should be archived.\n for f in files:\n if f.split('.')[-1] in save_extensions:\n # Recreate the file structure inside experiment_dir, up to\n # the folder containing f\n tmp_save_dir = tmp_experiment if dtc == experiment_dir \\\n else os.path.join(tmp_experiment, dtc)\n os.makedirs(tmp_save_dir, exist_ok=True)\n # Save a copy of f\n shutil.copy2(os.path.join(full_dtc, f), tmp_save_dir)\n\n # Get non-excluded subdirectories\n subdirs = [d for d in dlist\n if os.path.isdir(os.path.join(full_dtc, d))\n and d not in exclude_dirs]\n # Track subdirectories as paths relative to the experiment dir\n if dtc != experiment_dir and len(subdirs) > 0:\n subdirs = [os.path.join(dtc, d) for d in subdirs]\n\n dirs_to_check += subdirs\n\n # At this point, all archivable files and folders are saved in tmp_folder.\n # Create an archive, coincidentally the same name as tmp_experiment's path\n tmp_archive = tmp_experiment[:]\n shutil.make_archive(tmp_archive, archive_format, tmp_folder, archive_name)\n # Get the full name of the archive. There should only be one file in\n # tmp_experiment\n tmp_archive_full = [f for f in os.listdir(tmp_folder)\n if os.path.isfile(os.path.join(tmp_folder, f))][0]\n # Copy the archive to its destination\n os.makedirs(dst_dir, exist_ok=True)\n shutil.move(os.path.join(tmp_folder, tmp_archive_full),\n os.path.join(dst_dir, tmp_archive_full),\n copy_function=shutil.copyfile)\n # Remove the temporary folder\n shutil.rmtree(tmp_folder)\n\n pass", "def write_output_files(input_path, output_path, out_data, random = False):\n create_directory_structure(output_path)\n for city in cities:\n # set relevant list\n data_dir = os.path.join(input_path, city, city+'_test')\n sub_files = list_filenames(data_dir)\n for f in sub_files:\n # load data\n outfile = os.path.join(output_path, city, city+'_test',f)\n if random:\n out = np.random.randint(256, size=(5,3,495,436,3), dtype = np.dtype(np.uint8))\n else:\n out = out_data\n write_data(out, outfile)\n print(\"just wrote file {}\".format(outfile))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a test environment using the given dataset. The settings are temporarily overwritten to use the test data.
def generate_test_environment(tmpdir, dataset): # Overwrite settings with test settings generate_test_settings(tmpdir, dataset) # Generate the archive files for usage in ['train', 'test']: for dstype in ['images', 'labels']: dataset_type = usage + '.' + dstype mnist_dataset = 'datasets.mnist.' + dataset_type filepath = get_setting(mnist_dataset) test_dataset = dataset + '.' + dataset_type generate_test_dataset_archive(filepath, test_dataset)
[ "def generate_test_settings(tmpdir, dataset):\n\n # When `tmpdir` is a path convert it to a string\n if isinstance(tmpdir, py._path.local.LocalPath):\n tmpdir = str(tmpdir)\n \n test_settings = {\n \n 'datasets': {\n 'mnist': {\n 'train': {\n 'images': \"file://\" + tmpdir + \"/\" + dataset + \"/server/train-images-idx3-ubyte.gz\",\n 'labels': \"file://\" + tmpdir + \"/\" + dataset + \"/server/train-labels-idx1-ubyte.gz\"\n },\n 'test': {\n 'images': \"file://\" + tmpdir + \"/\" + dataset + \"/server/t10k-images-idx3-ubyte.gz\",\n 'labels': \"file://\" + tmpdir + \"/\" + dataset + \"/server/t10k-labels-idx1-ubyte.gz\"\n },\n },\n },\n 'data-dir': tmpdir + \"/\" + dataset + \"/data\"\n }\n overwrite_settings(test_settings)", "def test_create_training_dataset(self):\n pass", "def main():\n config = get_environment_config()\n\n create_test_buckets(config, BUCKET_NAMES)\n create_test_datasets(config, DATASET_NAMES)", "def make_test_data(self):\n import data", "def create_sandbox_dataset(project_id, dataset_id):\n sandbox_dataset_id = get_sandbox_dataset_id(dataset_id)\n friendly_name = f'Sandbox for {dataset_id}'\n description = f'Sandbox created for storing records affected by the cleaning rules applied to {dataset_id}'\n label_or_tag = {'label': '', 'tag': ''}\n create_dataset(project_id=project_id,\n dataset_id=sandbox_dataset_id,\n friendly_name=friendly_name,\n description=description,\n label_or_tag=label_or_tag,\n overwrite_existing=False)\n\n return sandbox_dataset_id", "def __create_test_environment(self):\n os.chdir(self.wd)\n temp_dir = tempfile.gettempdir()\n self.test_root = os.path.join(temp_dir, \"test-grpc\")\n print(\"Creating testing environment in {}\".format(self.test_root))\n if os.path.exists(self.test_root):\n # delete any previous environment\n shutil.rmtree(self.test_root)\n # create root directory\n os.makedirs(self.test_root)\n def copy_app(name):\n app_root = os.path.join(self.test_root, name)\n os.makedirs(app_root)\n filename = \"grpc-{}\".format(name)\n src = os.path.join(self.args.bin, filename)\n dst = os.path.join(app_root, filename)\n shutil.copy(src, dst)\n return dst\n # copy client and server into the new test environment\n self.server_path = copy_app(\"server\")\n self.client_path = copy_app(\"client\")", "def test_can_generate_from_env_var(testdir: Testdir) -> None:\n schema = '''\n datasource db {{\n provider = \"sqlite\"\n url = \"file:dev.db\"\n }}\n\n // default output: {output}\n generator db {{\n provider = \"coverage run -m prisma\"\n output = env(\"PRISMA_TEST_ASSUMPTIONS_OUTPUT\")\n {options}\n }}\n\n model User {{\n id Int @id @default(autoincrement())\n email String @unique\n name String?\n }}\n '''\n\n with temp_env_update(\n {'PRISMA_TEST_ASSUMPTIONS_OUTPUT': str(testdir.path / 'prisma')}\n ):\n testdir.generate(schema=schema)", "def makeWorkflowTemplate(self, dataset):\n datasetPath = parseDatasetPath(dataset)\n self.workflow.payload._InputDatasets = []\n newDataset = self.workflow.payload.addInputDataset(\n datasetPath['Primary'],\n datasetPath['Processed']\n )\n newDataset['DataTier'] = datasetPath['DataTier']\n return", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def setUp(self):\r\n super(RunTestsTest, self).setUp()\r\n self.testrun = self.F.RunFactory.create(status=\"active\")\r\n self.envs = self.F.EnvironmentFactory.create_full_set(\r\n {\"OS\": [\"Windows 7\", \"Ubuntu Linux\"]})\r\n self.testrun.environments.add(*self.envs)\r\n self.add_perm(\"execute\")", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def make_testsuite(testsuite: Dict) -> NoReturn:\n # validate testsuite format\n load_testsuite(testsuite)\n\n testsuite_config = testsuite[\"config\"]\n testsuite_path = testsuite_config[\"path\"]\n testsuite_variables = convert_variables(\n testsuite_config.get(\"variables\", {}), testsuite_path\n )\n\n logger.info(f\"start to make testsuite: {testsuite_path}\")\n\n # create directory with testsuite file name, put its testcases under this directory\n testsuite_path = ensure_file_abs_path_valid(testsuite_path)\n testsuite_dir, file_suffix = os.path.splitext(testsuite_path)\n # demo_testsuite.yml => demo_testsuite_yml\n testsuite_dir = f\"{testsuite_dir}_{file_suffix.lstrip('.')}\"\n\n for testcase in testsuite[\"testcases\"]:\n # get referenced testcase content\n testcase_file = testcase[\"testcase\"]\n testcase_path = __ensure_absolute(testcase_file)\n testcase_dict = load_test_file(testcase_path)\n testcase_dict.setdefault(\"config\", {})\n testcase_dict[\"config\"][\"path\"] = testcase_path\n\n # override testcase name\n testcase_dict[\"config\"][\"name\"] = testcase[\"name\"]\n # override base_url\n base_url = testsuite_config.get(\"base_url\") or testcase.get(\"base_url\")\n if base_url:\n testcase_dict[\"config\"][\"base_url\"] = base_url\n # override verify\n if \"verify\" in testsuite_config:\n testcase_dict[\"config\"][\"verify\"] = testsuite_config[\"verify\"]\n # override variables\n # testsuite testcase variables > testsuite config variables\n testcase_variables = convert_variables(\n testcase.get(\"variables\", {}), testcase_path\n )\n testcase_variables = merge_variables(testcase_variables, testsuite_variables)\n # testsuite testcase variables > testcase config variables\n testcase_dict[\"config\"][\"variables\"] = convert_variables(\n testcase_dict[\"config\"].get(\"variables\", {}), testcase_path\n )\n testcase_dict[\"config\"][\"variables\"].update(testcase_variables)\n\n # override weight\n if \"weight\" in testcase:\n testcase_dict[\"config\"][\"weight\"] = testcase[\"weight\"]\n\n # make testcase\n testcase_pytest_path = make_testcase(testcase_dict, testsuite_dir)\n pytest_files_run_set.add(testcase_pytest_path)", "def create_eval_dataset(args):\n\n if args.dataset.eval_dataset == \"llff\":\n eval_ds_list = {}\n if not args.dataset.eval_scene:\n scene_list = [\n \"fern\", \"flower\", \"fortress\", \"horns\", \"leaves\", \"orchids\", \"room\",\n \"trex\"\n ]\n else:\n scene_list = [args.dataset.eval_scene]\n\n if args.dev_run:\n scene_list = [\"fern\"]\n\n for scene in scene_list:\n logging.info(\"Loading eval scene {} ===============\".format(scene)) # pylint: disable=logging-format-interpolation\n train_ds = eval_ibr_epipolar.EvalIBREpipolar(\"train\", args, scene)\n eval_ds = eval_ibr_epipolar.EvalIBREpipolar(\"test\", args, scene, train_ds)\n eval_ds_list[scene] = eval_ds\n\n elif args.dataset.eval_dataset == \"shiny-6\":\n eval_ds_list = {}\n if not args.dataset.eval_scene:\n scene_list = [\"crest\", \"food\", \"giants\", \"pasta\", \"seasoning\", \"tools\"]\n else:\n scene_list = [args.dataset.eval_scene]\n\n if args.dev_run:\n scene_list = [\"crest\"]\n\n for scene in scene_list:\n logging.info(\"Loading eval scene {} ===============\".format(scene)) # pylint: disable=logging-format-interpolation\n train_ds = eval_ff_epipolar.EvalFFEpipolar(\"train\", args, scene)\n eval_ds = eval_ff_epipolar.EvalFFEpipolar(\"test\", args, scene, train_ds)\n eval_ds_list[scene] = eval_ds\n\n return train_ds, eval_ds_list", "def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def _prepare_test_env(ptfhost, request):\n logger.info(\"Preparing SAI test environment.\")\n _create_sai_test_folder(ptfhost)\n _copy_sai_test_cases(ptfhost, request.config.option.sai_test_dir)", "def setUp(self):\n self._base_dir = \"../output\"\n self.pipeline = Pipeline(dataset=None, extractor=None, dataset_name=\"nprandom\",\n base_dir=self._base_dir)", "def create_dataset(mkt, set_name = \"TestSet\", n_items = 10):\n set = DataSet.objects.create(market = mkt, description = set_name)\n set.random(n_items)\n set.save()\n return set", "def create_datasets(X, X_test, y, datasets=[], use_cache=True):\r\n if use_cache:\r\n # Check if all files exist. If not, generate the missing ones\r\n DATASETS = []\r\n for dataset in datasets:\r\n try:\r\n with open(\"cache/%s.pkl\" % dataset, 'rb'):\r\n pass\r\n except IOError:\r\n logger.warning(\"couldn't load dataset %s, will generate it\",\r\n dataset)\r\n DATASETS.append(dataset.split('_')[0])\r\n else:\r\n DATASETS = [\"basic\", \"tuples\", \"triples\",\r\n \"greedy\", \"greedy2\", \"greedy3\"]\r\n\r\n # Datasets that require external code to be generated\r\n for dataset, module in EXTERNAL_DATASETS.iteritems():\r\n if not get_dataset(dataset):\r\n module.create_features()\r\n\r\n # Generate the missing datasets\r\n if len(DATASETS):\r\n bsfeats, bsfeats_test = get_dataset('bsfeats')\r\n\r\n basefeats, basefeats_test = create_features(X, X_test, 3)\r\n save_dataset(\"base_feats\", basefeats, basefeats_test)\r\n\r\n lrfeats, lrfeats_test = pre_process(*create_features(X, X_test, 0))\r\n save_dataset(\"lrfeats\", lrfeats, lrfeats_test)\r\n\r\n feats, feats_test = pre_process(*create_features(X, X_test, 1))\r\n save_dataset(\"features\", feats, feats_test)\r\n\r\n meta, meta_test = pre_process(*create_features(X, X_test, 2),\r\n normalize=False)\r\n save_dataset(\"metafeatures\", meta, meta_test)\r\n\r\n X = X[:, SELECTED_COLUMNS]\r\n X_test = X_test[:, SELECTED_COLUMNS]\r\n save_dataset(\"basic\", X, X_test)\r\n\r\n Xt = create_tuples(X)\r\n Xt_test = create_tuples(X_test)\r\n save_dataset(\"tuples\", Xt, Xt_test)\r\n\r\n Xtr = create_tuples(X)\r\n Xtr_test = create_tuples(X_test)\r\n save_dataset(\"triples\", Xtr, Xtr_test)\r\n\r\n Xe, Xe_test = create_effects(X, X_test, y)\r\n save_dataset(\"effects\", Xe, Xe_test)\r\n\r\n feats_d, feats_d_test = pre_process(basefeats, basefeats_test,\r\n create_divs=True)\r\n bsfeats_d, bsfeats_d_test = pre_process(bsfeats, bsfeats_test,\r\n create_divs=True)\r\n feats_l, feats_l_test = pre_process(basefeats, basefeats_test,\r\n log_transform=True)\r\n lrfeats_l, lrfeats_l_test = pre_process(lrfeats, lrfeats_test,\r\n log_transform=True)\r\n bsfeats_l, bsfeats_l_test = pre_process(bsfeats, bsfeats_test,\r\n log_transform=True)\r\n\r\n for ds in DATASETS:\r\n Xg, Xg_test = get_dataset(ds)\r\n save_dataset(ds + '_b', Xg, Xg_test, bsfeats, bsfeats_test)\r\n save_dataset(ds + '_f', Xg, Xg_test, feats, feats_test)\r\n save_dataset(ds + '_fd', Xg, Xg_test, feats_d, feats_d_test)\r\n save_dataset(ds + '_bd', Xg, Xg_test, bsfeats_d, bsfeats_d_test)\r\n Xs, Xs_test = sparsify(Xg, Xg_test)\r\n save_dataset(ds + '_sf', Xs, Xs_test, lrfeats, lrfeats_test)\r\n save_dataset(ds + '_sfl', Xs, Xs_test, lrfeats_l, lrfeats_l_test)\r\n save_dataset(ds + '_sfd', Xs, Xs_test, feats_d, feats_d_test)\r\n save_dataset(ds + '_sb', Xs, Xs_test, bsfeats, bsfeats_test)\r\n save_dataset(ds + '_sbl', Xs, Xs_test, bsfeats_l, bsfeats_l_test)\r\n save_dataset(ds + '_sbd', Xs, Xs_test, bsfeats_d, bsfeats_d_test)\r\n\r\n if issubclass(Xg.dtype.type, np.integer):\r\n consolidate(Xg, Xg_test)\r\n save_dataset(ds + '_c', Xg, Xg_test)\r\n save_dataset(ds + '_cf', Xg, Xg_test, feats, feats_test)\r\n save_dataset(ds + '_cb', Xg, Xg_test, bsfeats, bsfeats_test)\r\n Xs, Xs_test = sparsify(Xg, Xg_test)\r\n save_dataset(ds + '_sc', Xs, Xs_test)\r\n save_dataset(ds + '_scf', Xs, Xs_test, feats, feats_test)\r\n save_dataset(ds + '_scfl', Xs, Xs_test, feats_l, feats_l_test)\r\n save_dataset(ds + '_scb', Xs, Xs_test, bsfeats, bsfeats_test)\r\n save_dataset(ds + '_scbl', Xs, Xs_test,\r\n bsfeats_l, bsfeats_l_test)", "def _get_setup(self, dataset_name):\n for potential_setup in self.setup:\n for dataset in potential_setup[\"datasets\"]:\n if dataset_name in dataset:\n test_setup = potential_setup\n self.io_args.color = os.path.join(self.io_args.input_root, dataset)\n return test_setup" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts (typically) overlapping regular patches from a grayscale image Changing the offset and stride parameters will result in images reconstructed by reconstruct_from_grayscale_patches having different dimensions! Callers should pad and unpad as necessary!
def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ): px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0])) l, t = np.meshgrid( np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]), np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) ) l = l.ravel() t = t.ravel() x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1])) y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1])) return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l)
[ "def extract_patches(\n image,\n size,\n stride = 1,\n):\n if size == stride:\n # This function is reshape + transpose based and is always the fastest, but\n # of course only works if size == stride.\n return extract_patches_nonoverlapping(image, size, pad=False)\n return extract_patches_conv2d(image, size, stride, padding=\"VALID\")", "def reconstruct_from_patches_2d(patches, image_size,step=16):\n countstep_i=0\n countstep_j=0\n i_h, i_w = image_size[:2]\n p_h, p_w = patches.shape[1:3]\n img = np.zeros(image_size)\n # compute the dimensions of the patches array\n n_h = i_h - p_h + 1\n n_w = i_w - p_w + 1\n print(\"Number of patches = %d, Patch Shape W H= (%d, %d)\" % (patches.shape[0], n_h, n_w))\n for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):\n \t#img[i:i + p_h, j:j + p_w] += p\n \tif i % step==0 and j %step==0:\n \timg[i:i + p_h, j:j + p_w] = p\n \tprint(\"i and j = (%d, %d)\" % (i, j))\n \tcountstep_i+=1\n \tcountstep_j+=1\n print (countstep_j)\n return img \n\n for i in range(i_h):\n for j in range(i_w):\n # divide by the amount of overlap\n # XXX: is this the most efficient way? memory-wise yes, cpu wise?\n #if i % 10==0 and j %10==0:\n img[i, j] /= float(min(i + 1, p_h, i_h - i) *\n min(j + 1, p_w, i_w - j))\n return img", "def _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size,\n n_crops, h, w,\n c, scale_id, max_seq_len):\n p = tf.image.extract_patches(\n image, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1],\n [1, 1, 1, 1],\n padding='SAME')\n\n p = tf.reshape(p, [n_crops, -1, patch_size * patch_size * c])\n\n count_h = _ceil_divide_int(h, patch_stride)\n count_w = _ceil_divide_int(w, patch_stride)\n\n # Shape (num_patches, 1)\n spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)\n # Shape (1, num_patches, 1)\n spatial_p = tf.expand_dims(spatial_p, axis=0)\n # Shape (n_crops, num_patches, 1)\n spatial_p = tf.tile(spatial_p, (n_crops, 1, 1))\n spatial_p = tf.cast(spatial_p, dtype=p.dtype)\n # Shape (n_crops, num_patches, 1)\n scale_p = tf.ones_like(spatial_p, dtype=p.dtype) * scale_id\n # Shape (n_crops, num_patches, 1)\n mask_p = tf.ones_like(spatial_p, dtype=p.dtype)\n\n # Concatenating is a hacky way to pass both patches, positions and input\n # mask to the model.\n # Shape (n_crops, num_patches, patch_size * patch_size * c + 3)\n out = tf.concat([p, spatial_p, scale_p, mask_p], axis=2)\n if max_seq_len >= 0:\n out = _pad_or_cut_to_max_seq_len(out, max_seq_len)\n out = tf.reshape(out,\n [n_crops, max_seq_len, c * patch_size * patch_size + 3])\n else:\n out = tf.reshape(out, [n_crops, -1, c * patch_size * patch_size + 3])\n return out", "def _extract_equidistant_patches(X, image_size, patch_size, stride_size):\n nm_patches = Coates._patches_per_image(image_size=image_size, stride_size=stride_size)\n\n # single patch indices (aka single patch connectivity map)\n indices_patch = np.mod(np.arange(0, np.prod(patch_size)), patch_size[1]) \\\n + (np.arange(0, np.prod(patch_size)) // patch_size[1]) * image_size[1]\n\n # preallocate memory for patches indices (aka connectivity map)\n indices_patches = np.zeros(shape=(nm_patches + (np.prod(patch_size), )), dtype=int)\n\n for y in range(nm_patches[0]):\n for x in range(nm_patches[1]):\n indices_patches[y, x, :] = y * stride_size[0] * image_size[1] + x * stride_size[1] + indices_patch\n\n # indices matrix to array\n indices_patches = indices_patches.reshape((np.prod(nm_patches) * np.prod(patch_size), )).astype(int)\n\n patches = X[..., np.array(indices_patches, dtype=int)]\n\n return Coates._reshape_arrays_to_images(patches, image_size=(np.prod(nm_patches), np.prod(patch_size)))", "def recreate_from_patches(data):\n overlap_height = (PATCHES * PATCH_HEIGHT - IMG_HEIGHT) // (PATCHES - 1) # Overlap of patches along y axis\n step_size_height = PATCH_HEIGHT - overlap_height # Step size along y axis\n\n overlap_width = (PATCHES * PATCH_WIDTH - IMG_WIDTH) // (PATCHES - 1) # Overlap of patches along x axis\n step_size_width = PATCH_WIDTH - overlap_width # Step size along x axis\n\n whole_images = []\n i = 0\n while i < len(data):\n image = np.zeros((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)) # Create an empty image to pin patches on\n\n for h in range(PATCHES - 1):\n for w in range(PATCHES - 1):\n # Insert patches into image starting from top left corner, without the patches touching right or bottom border\n if h > 0: # First row has no overlap with patches above them\n if overlap_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_height], 0.5,\n data[i - PATCHES][step_size_height:], 0.5, 0)\n\n # Insert into patch where it overlaps\n rest = data[i][overlap_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n\n if w == PATCHES - 2: # If we are at the second to last patch, overlap may be calculated different\n i += 1\n continue\n\n else:\n i += 1\n if overlap_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_width)]], 0.5,\n data[i - 1][:,\n [i for i in range(PATCH_WIDTH - overlap_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert into next patch\n rest = data[i][:, [i for i in range(overlap_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch which touches right border on this height, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert array of overlap into patch, where it overlaps\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n i += 1\n\n for w in range(PATCHES - 1):\n # Insert patches from the bottom border, may overlap more\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n\n # Insert patch in the bottom right corner, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap along x axis with mean values form overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n\n # Insert array of overlap into patch\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2, IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n whole_images.append(\n image) # All corresponding patches are pinned inside the image, therefore this image is finished\n\n return whole_images", "def get_patches(input_fold, output_folder='', output_np_folder='', masks_folder='',\n patch_shape=(512, 512), crop_shape=None, stride=(100, 100), keep_dark_region_ratio=0.01):\n\n assert os.path.isdir(input_fold)\n if input_fold[-1] != '/':\n input_fold += '/'\n if output_folder != '':\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n if output_folder[-1] != '/':\n output_folder += '/'\n if output_np_folder != '':\n assert os.path.isdir(output_np_folder)\n if output_np_folder[-1] != '/':\n output_np_folder += '/'\n if masks_folder != '':\n assert os.path.isdir(masks_folder)\n if masks_folder[-1] != '/':\n masks_folder += '/'\n assert type(patch_shape) is tuple\n assert type(patch_shape[0]) is int and type(patch_shape[1]) is int\n assert patch_shape[0] > 0 and patch_shape[1] > 0\n\n if crop_shape != None:\n crop_bef_h = (patch_shape[0] - crop_shape[0]) // 2\n crop_bef_w = (patch_shape[1] - crop_shape[1]) // 2\n\n assert type(stride) is tuple\n assert type(stride[0]) is int and type(stride[1]) is int\n assert stride[0] > 0 and stride[1] > 0\n\n img_count = 0\n for subdir, dir, files in os.walk(input_fold):\n for file in files:\n if file.split('.')[-1] not in ['tiff', 'tif', 'png', 'jpg', 'JPEG', 'jpeg']:\n continue\n\n img_patches = []\n mask_patches = []\n\n img_file = os.path.join(subdir, file)\n full_img = skimage.io.imread(img_file)\n\n if masks_folder != '':\n mask_file = os.path.join(masks_folder, file.split('.')[0] + '_mask.' + file.split('.')[-1])\n # mask_file = os.path.join(masks_folder, file.split('.')[0] + '.' + file.split('.')[-1])\n assert os.path.isfile(mask_file)\n full_mask = skimage.io.imread(mask_file)\n # assert full_img.shape[0] == full_mask.shape[0] and full_img.shape[1] == full_mask.shape[1]\n\n for row in range(0, full_img.shape[0] - patch_shape[0], stride[0]):\n for col in range(0, full_img.shape[1] - patch_shape[1], stride[1]):\n crop_img = full_img[row:row + patch_shape[0], col:col + patch_shape[1]]\n # do not save patches where the number of black pixels is higher than skip_dark_region_ratio\n if (crop_img == 0).sum() / crop_img.size > keep_dark_region_ratio:\n continue\n\n img_patches.append(crop_img)\n\n if masks_folder != '':\n crop_mask = full_mask[row:row + patch_shape[0], col:col + patch_shape[1]]\n mask_patches.append(crop_mask)\n\n if output_folder != '':\n full_path = os.path.join(output_folder, file.split('.')[0])\n if not os.path.isdir(full_path):\n os.mkdir(full_path)\n for ind, img_patch in enumerate(img_patches):\n skimage.io.imsave(os.path.join(full_path, str(img_count + ind).zfill(6) + '.png'), img_patch)\n if crop_shape != None:\n crop_patch_out = os.path.join(output_folder, 'crops', file.split('.')[0])\n if not os.path.isdir(crop_patch_out):\n os.makedirs(crop_patch_out)\n crop_patch = img_patch[crop_bef_h:crop_bef_h + crop_shape[0],\n crop_bef_w:crop_bef_w + crop_shape[1]]\n skimage.io.imsave(os.path.join(crop_patch_out, str(img_count + ind).zfill(6) + '.png'),\n crop_patch)\n\n if masks_folder != '':\n mask_output_folder = os.path.join(output_folder, 'masks', file.split('.')[0])\n if not os.path.isdir(mask_output_folder):\n os.makedirs(mask_output_folder)\n for ind, mask_patch in enumerate(mask_patches):\n if crop_shape != None:\n mask_patch = mask_patch[crop_bef_h:crop_bef_h + crop_shape[0],\n crop_bef_w:crop_bef_w + crop_shape[1]]\n skimage.io.imsave(os.path.join(mask_output_folder, str(img_count + ind).zfill(6) + '.png'),\n mask_patch.astype('uint8'))\n\n img_count += len(img_patches)\n\n if output_np_folder != '':\n full_path = os.path.join(output_folder, file.split('.')[0])\n if not os.path.isdir(full_path):\n os.mkdir(full_path)\n img_patches_np = np.asarray(img_patches, dtype='float32')\n np.save(os.path.join(full_path, 'img_patches'), img_patches_np)\n mask_patches_np = np.asarray(mask_patches)\n np.save(os.path.join(full_path, 'mask_patches'), mask_patches_np)\n\n return img_patches, mask_patches", "def extract_img_patches(img, patch_size):\n padded_img = pad_img(img, patch_size)\n color = 1\n if len(padded_img.shape) > 2:\n color = padded_img.shape[2]\n patches = view_as_blocks(padded_img, (patch_size, patch_size, color))\n patches = patches.reshape(-1, patch_size, patch_size, color)\n np.random.shuffle(patches)\n return patches", "def extract_patches(image, patchshape, overlap_allowed=0.1, cropvalue=None, crop_fraction_allowed=0.1):\r\n jump_cols = int(patchshape[1] * overlap_allowed)\r\n jump_rows = int(patchshape[0] * overlap_allowed)\r\n\r\n # Restrict ourselves to the rectangle containing non-cropped pixels\r\n if cropvalue is not None:\r\n rows, cols = np.where(image != cropvalue)\r\n rows.sort()\r\n cols.sort()\r\n active = image[rows[0]:rows[-1], cols[0]:cols[-1]]\r\n else:\r\n active = image\r\n\r\n rowstart = 0\r\n colstart = 0\r\n\r\n # Array tracking where we've already taken patches.\r\n covered = np.zeros(active.shape, dtype=bool)\r\n patches = []\r\n regions = []\r\n while rowstart <= active.shape[0] - patchshape[0]:\r\n # Record whether or not e've found a patch in this row,\r\n # so we know whether to skip ahead.\r\n got_a_patch_this_row = False\r\n colstart = 0\r\n while colstart <= active.shape[1] - patchshape[1]:\r\n # Slice tuple indexing the region of our proposed patch\r\n region = (slice(rowstart, rowstart + patchshape[0]),\r\n slice(colstart, colstart + patchshape[1]))\r\n\r\n # The actual pixels in that region.\r\n patch = active[region]\r\n\r\n # The current mask value for that region.\r\n cover_p = covered[region]\r\n if cropvalue is None or \\\r\n frac_eq_to(patch, cropvalue) <= crop_fraction_allowed and \\\r\n frac_eq_to(cover_p, True) <= overlap_allowed:\r\n # Accept the patch.\r\n patches.append(patch)\r\n regions.append(region)\r\n # Mask the area.\r\n covered[region] = True\r\n\r\n # Jump ahead in the x direction.\r\n colstart += jump_cols\r\n got_a_patch_this_row = True\r\n # print \"Got a patch at %d, %d\" % (rowstart, colstart)\r\n else:\r\n # Otherwise, shift window across by one pixel.\r\n colstart += 1\r\n\r\n if got_a_patch_this_row:\r\n # Jump ahead in the y direction.\r\n rowstart += jump_rows\r\n else:\r\n # Otherwise, shift the window down by one pixel.\r\n rowstart += 1\r\n\r\n # Return a 3D array of the patches with the patch index as the first\r\n # dimension (so that patch pixels stay contiguous in memory, in a\r\n # C-ordered array).\r\n return np.concatenate([pat[np.newaxis, ...] for pat in patches], axis=0),regions", "def gen_patches(imgs, window_size, patch_size):\n padding_size = int((window_size - patch_size) / 2)\n\n patches = np.asarray(\n [img_crop(imgs[i], patch_size, patch_size, patch_size, padding_size) for i in range(imgs.shape[0])])\n print(patches.shape)\n\n return patches.reshape(-1, patches.shape[2], patches.shape[3], patches.shape[4])", "def extract_patches(data,patch_dim):\n \n m = data.shape[0]\n im_x = data.shape[1]\n im_y = data.shape[2]\n \n assert im_x%float(patch_dim)==0 and im_y%float(patch_dim)==0, \\\n \"patch_size must divide x and y dimensions of image\"\n\n numpatchs = m*(im_x/patch_dim)*(im_y/patch_dim)\n patch_size = patch_dim**2\n\n patches = np.empty((patch_size,numpatchs))\n p=0\n for i in range(data.shape[0]):\n image = data[i,...]\n for x in np.r_[0:im_x:patch_dim]:\n for y in np.r_[0:im_y:patch_dim]:\n patch = image[x:x+patch_dim,y:y+patch_dim]\n patches[:,p] = patch.ravel()\n p+=1\n \n return patches", "def divide_image_to_patches(img, patch_size):\n\n assert len(img.shape) == 3 and img.shape[-1] == 3\n\n height, width, n_channels = img.shape\n coordinates = _get_top_left_coordinates(height, width, patch_size)\n\n patches = []\n\n for top, left in coordinates:\n patches.append(img[top:top + patch_size, left:left + patch_size])\n\n return np.array(patches).astype('uint8')", "def get_patches(image, label, coordmaps, sample, num_pos = 100, num_neg = 100, all_patches=False, patch_shape= (48,48,48), spacing=(24,24,24), start_idx = 0):\n image_shape = np.shape(image)\n cn_size = image_shape[0]\n sg_size = image_shape[1]\n cr_size = image_shape[2]\n ax_size = image_shape[3]\n\n if not all_patches:\n idx_pos = np.stack(np.where(label[0, ...] > 0))\n \n # Only include points not near boundary\n #sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n #idx_pos = idx_pos[:,sg_idx[0]]\n #cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n #idx_pos = idx_pos[:, cr_idx[0]]\n #ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n #idx_pos = idx_pos[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_pos[0].shape[0], num_pos, replace = False)\n cpts_pos_sampled = idx_pos[:, idx_rand] \n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n for i in range(num_pos):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + i)\n \n # For negative points\n idx_neg = np.stack(np.where(label[0, ...]==0), axis = 0)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n idx_neg = idx_neg[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n idx_neg = idx_neg[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n idx_neg = idx_neg[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_neg[0].shape[0], num_neg, replace = False)\n cpts_neg_sampled = idx_neg[:, idx_rand] \n \n for i in range(num_neg):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + num_pos + i)\n \n cpts = np.concatenate((cpts_pos_sampled, cpts_neg_sampled), axis = 1)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, cpts, start_idx + num_pos + i\n\n else:\n \n idx = p.grid_center_points(image.shape[1:], spacing)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx[0]) & (idx[0] < (sg_size - (patch_shape[0]/2))))\n idx = idx[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx[1]) & (idx[1] < (cr_size - (patch_shape[1]/2))))\n idx = idx[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx[2]) & (idx[2] < (ax_size - (patch_shape[2]/2))))\n idx = idx[:, ax_idx[0]]\n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n \n for i in range(idx.shape[1]):\n \n idx1_sg = idx[0][i] - int(patch_shape[0]/2)\n idx1_cr = idx[1][i] - int(patch_shape[1]/2)\n idx1_ax = idx[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, idx, len(image_patch_list)", "def extractPatches(output,filename,maskname, classes, level, patchSize,j, background):\n # Opening the files\n im = op.OpenSlide(filename)\n imload = im.read_region((0,0), level, im.level_dimensions[level])\n print(\"Image dimension : \", im.level_dimensions[level])\n mask = Image.open(maskname)\n if(imload.size != mask.size):\n mask = mask.resize(imload.size)\n imArray = np.array(imload)\n maskArray = np.array(mask)\n halfPatch = patchSize//2\n \n #Preprocess\n maskArray_back = addBackground(imArray, maskArray)\n extract_mask = maskArray_back if background else maskArray\n imArray = np.lib.pad(imArray, ((halfPatch, halfPatch), (halfPatch, halfPatch),(0,0)), 'reflect')\n maskArrayPad = np.lib.pad(extract_mask, ((halfPatch, halfPatch), (halfPatch, halfPatch)), 'reflect')\n np.putmask(maskArrayPad, maskArrayPad==1, 255)\n # Extraction\n for key, tup in classes.items():\n print(\"Extracting patches for \", key)\n # classes with the number of patches specified\n img_class = tup[1]\n if(img_class>0):\n print(\"Extracting \", img_class, \" patches \")\n indices = np.where(maskArray_back==tup[0])\n sample = random.sample(range(len(indices[0])), img_class)\n maskClass = np.array(maskArrayPad) #TODO : remove this ? \n for i in sample:\n x=indices[0][i]\n y=indices[1][i]\n x2 = x+patchSize\n y2 = y+patchSize\n croppedIm = imArray[x:x2,y:y2,0:3]\n croppedMask = maskClass[x:x2,y:y2] \n # create the images\n imageName = output + \"/\" + key + \"/image_\" + str(j) + \".png\"\n imageNameMask = output + \"/\" + key + \"/image_\" + str(j) +\"_mask.png\"\n misc.imsave(imageName,croppedIm)\n misc.imsave(imageNameMask,croppedMask)\n os.chmod(imageName , 0o777)\n os.chmod(imageNameMask, 0o777)\n j+=1\n if(j%100==0):\n print(\"\",j,\" patches extracted\")\n else:\n # classes with connected components\n nb_patches = (0 - tup[1]) + 1\n maskClass = np.array(maskArray_back)\n np.putmask(maskClass,maskClass!=tup[0],0)\n maskClass = measure.label(maskClass)\n bb_labels = measure.regionprops(maskClass)\n nb_extract = maskClass.max()\n extract_sample = range(len(bb_labels))\n try:\n max_class= tup[2]\n if(max_class<maskClass.max()):\n nb_extract = max_class\n extract_sample = random.sample(extract_sample,nb_extract)\n except:\n pass \n for i in extract_sample:\n bb = bb_labels[i].bbox\n x_center = int((bb[0]+bb[2]) / 2) + halfPatch\n y_center = int((bb[1]+bb[3]) / 2) + halfPatch\n division = 2\n shifting = [(0,-patchSize/division),(-patchSize/division,0),(0,patchSize/division),(patchSize/division,0), \\\n (-patchSize/division,-patchSize/division), (-patchSize/division,patchSize/division), \\\n (patchSize/division,patchSize/division), (patchSize/division,-patchSize/division)\n ]\n # shifting\n for h in range(0,nb_patches):\n x = x_center\n y = y_center\n if(h==0):\n None\n else:\n if(len(shifting)>0):\n rd = random.randint(0, len(shifting)-1)\n shift = shifting[rd]\n x+=shift[0]\n y+=shift[1]\n del shifting[rd]\n else:\n x += random.randint(-patchSize/division,patchSize/division) \n y += random.randint(-patchSize/division,patchSize/division)\n x1 = int(x-patchSize/2)\n x2 = int(x+patchSize/2)\n y1 = int(y-patchSize/2)\n y2 = int(y+patchSize/2)\n # cropping\n croppedIm = imArray[x1:x2,y1:y2,0:3]\n croppedMask = maskArrayPad[x1:x2,y1:y2]\n # create the images if needed\n imageName = output + \"/\" + key + \"/image_\" + str(j) + \".png\"\n imageNameMask = output + \"/\" + key + \"/image_\" + str(j) +\"_mask.png\"\n misc.imsave(imageName,croppedIm)\n misc.imsave(imageNameMask,croppedMask)\n os.chmod(imageName , 0o777)\n os.chmod(imageNameMask, 0o777)\n j+=1\n if(j%100==0):\n print(\"\",j,\" patches extracted\")\n return j", "def patches_sampling(self, image, patch_size, stride):\n h, w = image.shape[2:4]\n patches = []\n for i in range(0, h - patch_size + 1, stride):\n for j in range(0, w - patch_size + 1, stride):\n patches.append(image[:, :, i:i + patch_size, j:j + patch_size])\n patches = torch.cat(patches, dim=0).to(self.device)\n return patches", "def extract_neighborhoods(imgs, PATCH_SIZE, nr_additional_patches):\n data = []\n for i in range(len(imgs)):\n img_patches = img_crop_neighborhoods(imgs[i], PATCH_SIZE, PATCH_SIZE, nr_additional_patches)\n for j in range(len(img_patches)):\n data.append(img_patches[j])\n return data", "def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError('image_size should has 2 or 3 elements')\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i]))\n if scan_interval[i] != 0 else 1 for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices", "def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(\"image_size should has 2 or 3 elements\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1\n for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices", "def img_crop_neighborhoods(im, w, h, nr_additional_patches):\n is_2d = len(im.shape) < 3\n list_patches = []\n im = numpy.asarray(im)\n imgwidth = im.shape[0]\n imgheight = im.shape[1]\n add_p_w = nr_additional_patches * w\n add_p_h = nr_additional_patches * h\n \n #wrap the image with a 0 boundary\n if is_2d:\n wraped_img = zeros((imgwidth+add_p_w*2,imgheight+add_p_h*2))\n wraped_img[add_p_w:add_p_w+imgwidth, add_p_h:add_p_h+imgheight] = im\n else:\n wraped_img = zeros((imgwidth+add_p_w*2,imgheight+add_p_h*2, 3))\n wraped_img[add_p_w:add_p_w+imgwidth, add_p_h:add_p_h+imgheight, :] = im\n \n # extract\n for i in range(0,imgheight, h):\n for j in range(0,imgwidth,w):\n if is_2d:\n im_patch = wraped_img[j:j+w+add_p_w*2, i:i+h+add_p_h*2]\n else:\n im_patch = wraped_img[j:j+w+add_p_w*2, i:i+h+add_p_h*2, :]\n list_patches.append(im_patch)\n return list_patches", "def _extract_patches(img, patch_s):\n def np_extract_patches(img):\n orig = np.array(img.shape[:2])\n new = patch_s[0] * np.ceil(orig / patch_s[0]).astype(int)\n points = new - orig\n img = np.pad(img, [(0, points[0]), (0, points[1]), (0, 0)],\n mode='constant')\n patches = view_as_blocks(img, tuple(patch_s)).astype(np.float32)\n patches = patches.reshape(-1, *patch_s)\n return patches\n\n patches = tf.numpy_function(np_extract_patches, [img], tf.float32)\n return patches" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
assert json schema for requests from api.openweathermap.org
def validate_schema_openweathermap(self, actual, schema): resources_dir = os.path.abspath(os.getcwd()) relative_schema_path = valid_json_schema if schema == 'Valid' else error_json_schema schema_data = open(os.path.join(resources_dir, relative_schema_path)) self.validate_schema(actual, json.load(schema_data)) return self
[ "def test_api_schema(self):\n response = self.client.get(\"/api/schema\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(json.loads(response.content)[\"_meta\"][\"title\"], \"Marsha API\")", "def test_trucks_api(self):\n resp = self.app.get('/trucks')\n self.assertEqual(resp.status_code, 200)\n\n # ensure proper JSON is returned\n data = json.loads(resp.data)\n assert 'resp' in data\n for item in data['resp']:\n # address is not actually required\n assert 'name' in item\n assert 'fooditems' in item\n assert 'latitude' in item\n assert 'longitude' in item\n assert 'schedule' in item", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def test_suite_schema():\n r = requests.get(BASE_URL + ';schema')\n assert r.ok\n assert r.json()", "def test_view_bug_valid_schema():\n\n url = \"https://bugs.info:4000/api/v1/bug\"\n schema = {\n \"bug_id\": 1234,\n \"title\": \"The service is hanging\",\n \"description\": \"service hangs after casting a string\",\n \"status\": \"OPEN\",\n }\n bug_id = schema[\"bug_id\"]\n r = requests.get(url, bug_id)\n # verify 200 request status code\n assert r.status_code == 200\n response_body = r.json()\n # if response isn't the same as the schema an exception will be raised\n validate(instance=response_body, schema=schema)", "def test_sanity():\n schema = get_schema('business.json')\n print(schema)\n assert schema", "def valid_response_schema():\r\n response_schema={\r\n \"id\": int,\r\n \"name\": str,\r\n \"description\": Or(None, str),\r\n \"format_group\": str\r\n \r\n } \r\n return response_schema", "def assert_json_result(response, json, schema):\n assert response.status_code == 200\n assert json['id'] == schema.id\n assert json['type'] == schema.type\n assert json['uri'] == schema.uri", "def test_parse_weather_weather_simple_json(self):\n\n # Parse the data.\n actual = timeseries.parse_weather(self.weather_simple)\n\n # Ensure actual and expected results are equal.\n pd.testing.assert_frame_equal(actual, self.weather_simple_expected)", "def test_scrape_weather_data(self):\n url = 'https://www.wunderground.com/history/airport/KFTY/' \\\n '2017/10/11/DailyHistory.html?req_city=Atlanta&' \\\n 'req_state=GA&req_statename=Georgia&reqdb.zip=30301&' \\\n 'reqdb.magic=1&reqdb.wmo=99999'''\n json_answer = wunderground_scraper.scrape_weather_data(url)\n expected_answer = '{\"Actual Max Temperature\": \"86F\", ' \\\n '\"Actual Mean Temperature\": \"76F\", ' \\\n '\"Actual Min Temperature\": \"66F\", ' \\\n '\"Average Max Temperature\": \"75F\", ' \\\n '\"Average Mean Temperature\": \"64F\", ' \\\n '\"Average Min Temperature\": \"53F\", ' \\\n '\"Record Max Temperature\": \"104F (1999)\", ' \\\n '\"Record Min Temperature\": \"32F (2000)\"}'\n self.assertEqual(json_answer, expected_answer)", "def test_get_request_json_improperly_formatted(self):\n response = self.client.post(\n '/json_request/',\n data=self.request_dict\n )\n response_json = json.loads(response.content.decode('utf-8'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_json, None)", "def simple_response_validation(data_dict):\n try:\n assert 'before' in data_dict\n assert 'after' in data_dict\n assert 'ref' in data_dict\n assert 'repository' in data_dict\n assert 'full_name' in data_dict['repository']\n assert 'head_commit' in data_dict\n assert 'timestamp' in data_dict['head_commit']\n assert 'author' in data_dict['head_commit']\n assert 'pusher' in data_dict\n assert 'name' in data_dict['pusher']\n except AssertionError:\n print \"Invalid format data object received or github api \" \\\n \"format changed\"\n traceback.print_exc()\n raise web.webapi.BadRequest()", "def test_api_response(self):\n\n try:\n awx = AerisWeather(app_id=app_id,\n client_id=client_id,\n client_secret=client_secret)\n\n endpoint = Endpoint(endpoint_type=EndpointType.ALERTS,\n location=RequestLocation(postal_code=\"55124\"),\n action=None,\n filter_=[RequestFilter.ALERTS.ALL],\n sort=None,\n params=None,\n query=None)\n\n alerts_list = awx.request(endpoint=endpoint)\n\n for alert in alerts_list: # type: AlertsResponse\n assert alert.place is not None\n timestamps = alert.timestamps\n assert type(timestamps) == AlertTimestamps\n assert timestamps.issued is not None\n includes = alert.includes\n assert type(includes) is AlertIncludes\n assert includes.wxzones is not None\n assert alert.active is True\n\n except URLError as url_err:\n print(\"URL Error: \" + url_err.reason)\n raise url_err\n\n except AerisError as aeris_err:\n print(\"AerisError: \" + str(aeris_err))\n raise aeris_err\n\n except Exception as ex:\n print(ex.args)\n raise ex", "def test__json__(self):\n json = self.placement.__json__()\n\n self.validate_test(json[\"name\"] == \"test_name\")\n self.validate_test(json[\"location\"] == \"test_location\")\n self.validate_test(json[\"orchestration_id\"] == \"test_orchestration_id\")", "def test_api_schema(self):\n response = self.client.get(\"/api/schema/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Content-Type\"), \"application/vnd.oai.openapi; charset=utf-8\"\n )\n self.assertEqual(\n response.get(\"Content-Disposition\"), 'inline; filename=\"Marsha API.yaml\"'\n )", "def test_api_contents(self):\n self.assertEqual(self.get_json, [{'content': 'test app',\n 'user': {'email': 'test@djangoapp.com',\n 'first_name': 'Diego',\n 'follower_count': 0,\n 'last_name': 'Test',\n 'username': 'diego'}}])", "def test_data_valid_api_locations(test_data_valid_api_locations_timeline):\n data = test_data_valid_api_locations_timeline\n data[\"timelines\"] = {}\n return data", "def check_schema(self, response):\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result", "def test_no_input(self):\n resp = SearchTest.client.get('/api/search/')\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"No Input Test Error\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of nonempty dicts/lists or other objects
def recursive_count(o): if isinstance(o, dict): c = 0 for v in o.values(): c += recursive_count(v) return c elif isinstance(o, list): c = 0 for v in o: c += recursive_count(v) return c else: return 1
[ "def __len__(self):\n return len(self._dicts)", "def test_count_empty() -> None:\n assert count([]) == {}", "def count(d):\n return sum(len(v) for v in d.values())", "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def get_types_count():\n return len(type_dict.keys())", "def __len__(self):\n ks = self.__dict__.keys()\n return len([k for k in ks if not k.startswith('__')\\\n and not isinstance(k, Struct)])", "def len(result):\n\tl = 0;\n\tfor (dn, item) in result:\n\t\tif dn == None :\n\t\t\tcontinue\n\t\tl += 1\n\treturn l", "def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)", "def count_values(dct):\n n = 0\n for k, v in dct.items():\n if isinstance(v, dict):\n n += count_values(v)\n else:\n n += 1\n return n", "def count_objects(self):\n count = 0\n for uri, po in self.query_objects.items():\n if po[\"endpoint_obj\"] is not None:\n count += 1\n return ObjectCount(count)", "def get_dict_data_len(x_dict: Dict[Any, Collection]):\n return check_all_same_length(*x_dict.values())", "def __len__(self) -> int:\n\n # do a DFS to count the number of leaf nodes\n count = 0\n stack = [self._data]\n while stack:\n node = stack.pop()\n if isinstance(node, NestedDict):\n node = node._data\n if isinstance(node, Mapping):\n stack.extend(node.values())\n else:\n count += 1\n\n return count", "def __len__(self):\n count = 0\n for name, ps in self.ppredicates.items():\n count += len(ps)\n for name, ps in self.npredicates.items():\n count += len(ps)\n return count", "def length(self):\n # TODO: Loop through all buckets\n # TODO: Count number of key-value entries in each bucket\n count = 0\n\n for bucket in self.buckets:\n for item in bucket.items():\n count += 1\n return count", "def dictLenght(value):\r\n if isinstance(value, dict):\r\n lenght = 0\r\n for keys in value:\r\n for val in value[keys]:\r\n lenght += 1\r\n return lenght", "def how_many(aDict):\n count = 0\n for vals in aDict.values():\n count += len(vals)\n return count", "def dictLenght(value):\n if isinstance(value, dict):\n lenght = 0\n for keys in value:\n for val in value[keys]:\n lenght += 1\n return lenght", "def number_of_people(names_dict: dict) -> int:\n get_dict_length = sum(names_dict.values())\n return get_dict_length" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list formed by the evaluation types present in criteria.
def get_evaluation_analysis_types(self, parameters): eval_types =[] for evaluation_criteria_id in parameters["clustering"]["evaluation"]["evaluation_criteria"]: # for subcriteria in parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id]: # eval_types.append(subcriteria) eval_types.extend(parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id].keys()) return list(set(eval_types))
[ "def listCriteriaTypes():", "def listSearchCriteriaTypes():", "def listSortCriteriaTypes():", "def listCriteria():", "def types(self):\n return [term for term in self._terms\n if isinstance(term, (TypeIdentifier, String, Regex))]", "def types(self):\n ret = []\n for scope in self.scopes:\n ret.append({k: v[0] for k, v in scope.items()})\n return ret", "def atom_type_expressions(self):\n return list(set([atype.expression for atype in self.atom_types]))", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def GetCriteriaList(cls):\r\n\r\n if hasattr(cls, '_criteria_list'):\r\n return cls._criteria_list\r\n\r\n cls._criteria_list = [\r\n HealthCriteria('Errors',\r\n 'Error threshold exceeded.',\r\n ErrorCriteria,\r\n [counters.viewfinder.errors.error],\r\n 5),\r\n HealthCriteria('ReqFail',\r\n 'Failed Request threshold exceeded.',\r\n RequestsFailedCriteria,\r\n [counters.viewfinder.service.req_per_min, counters.viewfinder.service.fail_per_min],\r\n 5),\r\n HealthCriteria('OpRetries',\r\n 'Operation retry threshold exceeded.',\r\n OperationRetriesCriteria,\r\n [counters.viewfinder.operation.ops_per_min, counters.viewfinder.operation.retries_per_min],\r\n 5),\r\n HealthCriteria('MissingMetrics',\r\n 'Metrics collection failed.',\r\n MissingMetricsCriteria,\r\n [],\r\n 3),\r\n ]\r\n return cls._criteria_list", "def report_type_choices():\n\n rts = report_types()\n rcs = report_categories()\n return [(c, [(rt.report_type, rt.name) for rt in rts if rt.category == c]) for c in rcs]", "def quiz_type_results_display(self):\n return self.QUIZ_TYPES_FOR_RESULTS[self.quiz_type]", "def improper_type_expressions(self):\n return list(set([atype.expression for atype in self.improper_types]))", "def get_assessor_input_types(self):\n assessor_inputs = [\n i\n for i in list(self.proc_inputs.values())\n if i['artefact_type'] == 'assessor'\n ]\n assessors = [i['types'] for i in assessor_inputs]\n\n return list(itertools.chain.from_iterable(assessors))", "def filter_evaluations_by_type(self, type_):\n from .evaluation import Evaluation\n from .code_component import CodeComponent\n\n joined_eval = join(\n Evaluation.t, CodeComponent.t,\n ((Evaluation.m.trial_id == CodeComponent.m.trial_id) &\n (Evaluation.m.code_component_id == CodeComponent.m.id))\n )\n joined = join(\n Activation.t, joined_eval,\n ((Evaluation.m.trial_id == Activation.m.trial_id) &\n (Evaluation.m.activation_id == Activation.m.id))\n )\n query = (\n select([CodeComponent.m.name, Evaluation.m.repr])\n .select_from(joined)\n .where((Activation.m.trial_id == self.trial_id) &\n (Activation.m.id == self.id) &\n (CodeComponent.m.type == type_))\n )\n for result in relational.session.execute(query):\n yield result", "def types():\n with qdb.sql_connection.TRN:\n sql = \"\"\"SELECT artifact_type, description,\n can_be_submitted_to_ebi,\n can_be_submitted_to_vamps, is_user_uploadable\n FROM qiita.artifact_type\n ORDER BY artifact_type\"\"\"\n qdb.sql_connection.TRN.add(sql)\n return qdb.sql_connection.TRN.execute_fetchindex()", "def getType(self, terms):\n\n\t\treturn [i for i in xrange(len(self.toTYPE)) if terms in self.toTYPE[i]]", "def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c", "def list_evaluators():\n # import _model_evaluation_registry inside function to avoid circuit importing\n from mlflow.models.evaluation.evaluator_registry import _model_evaluation_registry\n\n return list(_model_evaluation_registry._registry.keys())", "def get_assessment_part_search_record_types(self):\n return # osid.type.TypeList" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the 'details' field of a clustering.
def analysis_function_details(self,clustering): return clustering.details
[ "def getClusterInfo(self):\n pass", "def get_cluster_details(cluster):\n cmd = f\"ocm describe cluster {cluster} --json=true\"\n out = run_cmd(cmd)\n return json.loads(out)", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def detail(self):\n return self._detail", "def getDetailsJSON(self):\n return self.__detailsJSON", "def get_cluster_details(cluster):\n out = run_cmd(f\"ibmcloud ks cluster get --cluster {cluster} -json\")\n return json.loads(out)", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def _get_esxcluster_proxy_details():\n det = __salt__[\"esxcluster.get_details\"]()\n return (\n det.get(\"vcenter\"),\n det.get(\"username\"),\n det.get(\"password\"),\n det.get(\"protocol\"),\n det.get(\"port\"),\n det.get(\"mechanism\"),\n det.get(\"principal\"),\n det.get(\"domain\"),\n det.get(\"datacenter\"),\n det.get(\"cluster\"),\n )", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def get_line_item_summary_details(self):\n raise NotImplementedError", "def __repr__(self):\n return (\n f'GalaxyCluster {self.unique_id}: '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self.galcat)} source galaxies'\n )", "def get_description(self, identifier, node_details=None):\n return structures_module.nodes.get_description(self.khoros_object, identifier, node_details)", "def disk_detail(self):\n return self._disk_detail", "def get_details(disease):\n\treturn d_desc_map[disease]", "def get_detail(self, *ident):\n if not self._on_detail_page():\n sel.force_navigate('cloud_provider', context={'provider': self})\n return details_page.infoblock.text(*ident)", "def print_details(self):\n print(\"\\nId: {}\".format(self.id))\n print(\"Name: {}\".format(self.name.upper()))\n print(\"Size: {}\".format(self.weight))\n print(\"Description: {}\".format(self.description))", "def analysis_details(self, analysis):\n\n ana = rivet.AnalysisLoader.getAnalysis(analysis)\n\n if ana:\n details = {\n 'authors': ana.authors(),\n 'bibKey': ana.bibKey(),\n 'bibTeX': ana.bibTeX(),\n 'collider': ana.collider(),\n 'description': ana.description(),\n 'experiment': ana.experiment(),\n 'inspireId': ana.inspireId(),\n 'name': ana.name(),\n 'references': ana.references(),\n 'requiredBeams': ana.requiredBeams(),\n 'requiredEnergies': ana.requiredEnergies(),\n 'runInfo': ana.runInfo(),\n 'spiresId': ana.spiresId(),\n 'status': ana.status(),\n 'summary': ana.summary(),\n 'year': ana.year()\n }\n self._ws.put(['analysis_details', details])", "def describe(self):\n return pd.Series(self.hmap).describe()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of elements that are clusterized in this clustering (which may not be the total number of elements of the dataset if there were noisy elements)
def analysis_function_total_elements(self,clustering): return clustering.total_number_of_elements
[ "def n_clusters(self):\n return len(self.clusters)", "def numConnectedElements(self):\n \n pass", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def count_elements_in_dataset(dataset):\n\n return dataset.count()", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def num_elements(self):\n return self.subset.num_elements()", "def nclusters(self):\n return self._nclusters", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def get_n_clusters(self) :\n return self.n_clusters", "def valency(self):\n return len(self.neighbors())", "def number_of_elements(self):\n return self.__number_of_elements", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def number_of_members(self):\n\n return np.size(self.cluster_members)", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def element_count(self):\n\n return len(self.elements)", "def calc_element_count(self):\n\t\t# i = - (m * ln(1 - (x/m))) / k\n\t\tx = float(self.bit_array.count())\n\t\treturn int(ceil(- (float(self.size) * log(1.0 - (x / float(self.size)))) / float(self.hash_count)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the percentage of elements of the clustering that are in the 4 bigger clusters.
def analysis_function_top_4(self,clustering): clustering.sort_clusters_by_size() total = 0 percents = clustering.get_population_percent_of_n_bigger_clusters(4) for p in percents: total = total+p return total
[ "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def purity_score(clusters, classes):\n\n A = np.c_[(clusters, classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:, 0]):\n z = A[A[:, 0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def davies_bouldin_index(self):\n\n total_clusters_dispersion = np.zeros(self.n_clusters)\n\n for data_index in range(self.n_samples):\n\n total_clusters_dispersion[self.labels[data_index]] += euclidean_distance(self.data[data_index],self.clusters_mean[self.labels[data_index]])\n\n\n mean_clusters_dispersion = total_clusters_dispersion / self.clusters_size\n #mean_clusters_dispersion = np.sqrt(self.WGSS_clusters/self.clusters_size)\n\n sum_Mk = 0.\n\n for cluster_i in range(self.n_clusters):\n max_Mk = 0.\n for cluster_j in range(self.n_clusters):\n if cluster_i != cluster_j :\n Mk = (mean_clusters_dispersion[cluster_i] + mean_clusters_dispersion[cluster_j])/euclidean_distance(self.clusters_mean[cluster_i],self.clusters_mean[cluster_j])\n if Mk > max_Mk :\n max_Mk = Mk\n \n sum_Mk += max_Mk\n\n return sum_Mk/self.n_clusters", "def compute_clustering_score():\n # TODO: Implement simple clustering\n raise NotImplementedError()", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses", "def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")", "def compute_cluster_class_fractions(k_means_model, y):\n\n n_classes = y.shape[1]\n class_labels = utils.one_hot_to_index(y)\n cluster_labels = k_means_model.labels_\n\n class_clustroid_counts = np.zeros((n_classes, K))\n for i in range(len(class_labels)):\n class_clustroid_counts[class_labels[i], cluster_labels[i]] += 1\n\n class_clustroid_fractions = class_clustroid_counts / np.sum(class_clustroid_counts, axis=1).reshape(n_classes, 1)\n\n print(\"\\n---- Class Clustroid Distribution ----\")\n for i in range(n_classes):\n print(\"Class {}: {}\".format(i, class_clustroid_fractions[i, :]))", "def wordsByClusteringCoefficient(G):\n\n\tc=nx.clustering(G)\n\n\thola=[]\n\tfor w in c:\n\t\tif c[w]!=0 and c[w]!=1:\n\t\t\thola.append((c[w],w))\n\thola.sort()\n\n\tprint(\"\\n\\nCLUSTERING COEFFICIENT :\")\n\tprint(\"------------------------\")\n\n\tprint(\"Lowest 15 :\")\n\tfor i in range(0,15):\n\t\tprint(hola[i][1],end=\" \")\n\n\tprint(\"\\n\\nHighest 15 :\")\n\tfor i in range(len(hola)-1,len(hola)-1-15,-1):\n\t\tprint(hola[i][1],end=\" \")\n\n\tprint()", "def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)", "def calc_percentages(self):\n self.beyond_lower.calc_percentage(self.total_entries)\n for b in self.buckets:\n b.calc_percentage(self.total_entries)\n self.beyond_upper.calc_percentage(self.total_entries)", "def percentage_of_three_pointers_from_corner(self):\n return self._percentage_of_three_pointers_from_corner", "def clust_strength(mat,groups):\n cluster_strengths = []\n for group in range(len(np.unique(groups))):\n this_cluster = mat[groups==group,:]\n this_cluster_mean = np.mean(this_cluster,axis=0)\n all_dists = mat - this_cluster_mean\n out_dists = np.linalg.norm(all_dists[groups!=group],axis=1)\n in_dists = np.linalg.norm(all_dists[groups==group],axis=1)\n this_strength = np.mean(out_dists)/np.mean(in_dists)\n cluster_strengths.append(this_strength)\n \n return np.mean(cluster_strengths)", "def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)", "def getPercentageDistributionOfMeasureFromCollectivity(self, listOfArrays, listOfCollectivities):\n assert len(listOfArrays) == len(listOfCollectivities)\n highestWhole = []\n for arr, arrCollectivity in zip(listOfArrays, listOfCollectivities):\n assert arr.shape == arrCollectivity.shape\n highestCollectivityPosition = np.argmax(np.abs(arrCollectivity))\n highestWhole.append(np.abs(arr[highestCollectivityPosition]))\n highestWholeDiscretized = self.calcDiscretizedCount01(highestWhole)\n highestWholeDiscretizedPercentage = (highestWholeDiscretized.astype(np.float64) / highestWholeDiscretized.sum()) * 100\n return highestWholeDiscretizedPercentage", "def resolve_using_lower_divergence( cluster ):\n prev_cres = None\n # Sort by divergence ascending. Treat simple repeats as if they are the\n # highest possible divergence\n cluster = sorted(cluster, key=lambda annot: annot[16] if annot[16] >= 0 else 101)\n for i in range(len(cluster)):\n a_res = cluster[i]\n a_start = a_res[5]\n a_end = a_res[6]\n if ( a_start == 0 and a_end == 0 ):\n continue\n a_len = a_end - a_start + 1\n a_div = a_res[16]\n for j in range(i+1,len(cluster)):\n b_res = cluster[j]\n b_start = b_res[5]\n b_end = b_res[6]\n b_score = b_res[0]\n if ( b_start == 0 and b_end == 0 ):\n continue\n b_len = b_end - b_start + 1\n b_div = b_res[16]\n max_start = max( a_start, b_start)\n min_end = min( a_end, b_end )\n overlap = min_end - max_start + 1\n if ( overlap > 0 ):\n if ( a_len == overlap or b_len == overlap ):\n # Containment (a in b or b in a ), remove lower\n # score\n cluster[j][5] = 0\n cluster[j][6] = 0\n else:\n # Overlap\n if ( a_start < b_start ):\n # Trim left side\n cluster[j][5] += overlap\n else:\n # Trim right side\n cluster[j][6] -= overlap", "def get_susceptibility(clusters):\n\n # If there is no or only one cluster then there is no finite cluster\n if len(clusters) <= 1:\n return np.nan\n\n # Remove largest, i.e. infinite, cluster\n clusters.remove(max(clusters))\n\n sizes = np.array(list(set(clusters)))\n n_s = []\n\n for size in sizes:\n n_s.append(clusters.count(size))\n\n temp = sizes * n_s\n S = np.sum(sizes * temp) / np.sum(temp)\n\n return S", "def AverageCluster(C):\n n = len(C)\n c = sum(C)/n\n return c" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the percent of noise elements in the dataset.
def analysis_function_noise_level(self, clustering, total_elements): return 100.-(clustering.total_number_of_elements/float(total_elements))*100.
[ "def getNoiseVar(img,fraction=0.95):\n last_val = np.percentile(img,fraction)\n #si(img<last_val,title=\"Pixel values considered as noise\")\n return np.var(img[img<last_val])", "def water_percentage(self):\n water = 1.00\n for ingredient in self.ingredients:\n water -= ingredient.ratio\n return round(water, 2)", "def white_percent(img):\n return cv2.countNonZero(img) / get_size(img)", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def sampling_percentage(self) -> float:\n return pulumi.get(self, \"sampling_percentage\")", "def noise_level(data):\n length=len(data) - 2\n dev=[]\n for i in range(1,length - 1):\n dev.append((abs(data[i] - data[i-1]) + abs(data[i] - data[i + 1]))/2)\n dev.sort()\n return dev[round(0.9*length)]", "def get_signal2noise_ratio(self) -> float:", "def mask_percent(img):\n if (len(img.shape) == 3) and (img.shape[2] == 3):\n img = np.sum(img, axis=-1)\n\n mask_percentage = 100 * (1 - np.count_nonzero(img) / img.size )\n return mask_percentage", "def noisefit2(self):\n #1/100 seems to be to small!! noise has low frequency\n noiseregion=self.data[1][:len(self.data[1])/100]\n sigma=np.sqrt(np.mean([noiseregion[i]**2 for i in range(len(noiseregion))])-(np.mean(noiseregion))**2)\n self.noise_level=sigma\n return sigma", "def noise(self, freq: int, /) -> None:", "def get_percent_of_lifespace_labeled_outliers(self):\n return self.get_percent_of_values_labeled_outliers()", "def sampling_percentage(self) -> Optional[float]:\n return pulumi.get(self, \"sampling_percentage\")", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def noise_label_discovery(dve_out, noise_idx):\n # Sorting\n divide = 20 # Per 100/20 percentile\n sort_idx = np.argsort(dve_out)\n neg_sort_idx = np.argsort(-dve_out)\n\n # Output initialization\n output_perf = np.zeros([divide, 2])\n\n # For each percentile\n for itt in range(divide):\n # from low to high data values\n output_perf[itt, 0] = len(np.intersect1d(sort_idx[:int((itt+1)* \\\n len(dve_out)/divide)], noise_idx)) \\\n / len(noise_idx)\n # from high to low data values\n output_perf[itt, 1] = len(np.intersect1d(neg_sort_idx[:int((itt+1)* \\\n len(dve_out)/divide)], noise_idx)) \\\n / len(noise_idx)\n # Returns TPR of discovered noisy samples\n return output_perf", "def generate_noise(self, dataset: GeneratedDataset) -> None:", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def findNoiseLevel(data, nwin = 10, winsize = 1000):\n ndet = np.size(data,0)\n ndata = np.size(data,1)\n step = int(ndata/nwin)\n if nwin*winsize > ndata:\n winsize = step\n noises = np.zeros(ndet)\n for i in range(ndet):\n ns = []\n for j in range(nwin):\n ns.append(np.var(data[i][j*step:j*step+winsize]))\n noises[i] = np.median(ns)\n return noises", "def _get_percent_out(poem_word_list, data_word_list):\n\n total = len(poem_word_list)\n count = 0\n data_word_list = set(data_word_list)\n for word in poem_word_list:\n if word not in data_word_list:\n count += 1\n return float(count)/float(total)", "def sampling_percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"sampling_percentage\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the mean cluster size.
def analysis_function_mean_cluster_size(self,clustering): sizes = get_cluster_sizes(clustering.clusters)[1] return numpy.mean(sizes)
[ "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def meanContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['meanContig']", "def mean_cluster(labelled_cluster):\n sum_of_points = sum_cluster(labelled_cluster)\n mean_of_points = sum_of_points * (1.0 / len(labelled_cluster))\n return mean_of_points", "def average_class_size(self):\n return self._average_class_size", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def AverageCluster(C):\n n = len(C)\n c = sum(C)/n\n return c", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def get_cluster_sizes(clusters):\n total_elements = 0\n cluster_sizes = []\n for c in clusters:\n size = c.get_size()\n total_elements = total_elements + size\n cluster_sizes.append(size)\n return total_elements,cluster_sizes", "def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)", "def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk", "def get_n_clusters(self) :\n return self.n_clusters", "def average_size(self):\n sizes = []\n for i in range(self.params.num_trees):\n with tf.device(self.device_assigner.get_device(i)):\n sizes.append(self.trees[i].size())\n return tf.reduce_mean(tf.pack(sizes))", "def n_clusters(self):\n return len(self.clusters)", "def calculate_train_cluster_sizes():\n return calculate_cluster_sizes(os.path.expanduser(\"resources/legal_filter_train\"))", "def Mean_SizeT(ClusterResults):\r\n return np.mean([np.ma.average(CR.Size95T, weights=CR.Members) for CR in ClusterResults])", "def nclusters(self):\n return self._nclusters", "def graph_data_size_avg(self) -> float:\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)", "def group_size_avg(self, group_type):\n\n sizes = [len(hh) for hh in list(self.groups[group_type].values())]\n return np.mean(sizes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method create a project in pivotal tracker
def create_project(): client = RequestManager() project_name = "".join(choices(string.ascii_letters + string.digits, k=10)) client.set_method("POST") client.set_endpoint("/projects") body = {"name": project_name} client.set_body(json.dumps(body)) response = client.execute_request() STORED_ID['project_id'] = response.json()['id']
[ "def project_create(project):\n client.project.create(project)", "def cmdop_projectcreate ( DbConnection, logger, project, icoordsys) :\n try :\n logger.info(\"Running cmdop_projectcreate\")\n command = PROJECTCREATE + \" -project \" + \"\\\"\" + str(project) + \"\\\"\" + \" -icoordsys \" + str(icoordsys)\n BathyExecutablesLib.cmdop_createproject ( logger, command )\n except Exception, err:\n logger.critical( \"Running cmdop_projectcreate failed:ERROR: %s\\n\" % str(err))\n raise", "def test_create_project(self):\n pass", "def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p", "def test_create_project_request(self):\n pass", "def test_project_create(default_domino_client):\n new_project_name = f\"project-{str(uuid.uuid4())}\"\n response = default_domino_client.project_create(new_project_name)\n assert response.status_code == 200, f\"{response.status_code}: {response.reason}\"\n\n project_list = default_domino_client.projects_list()\n assert any(\n p[\"name\"] == new_project_name for p in project_list\n ), f\"Unable to retrieve new project!\\n{pformat(project_list)}\"\n\n default_domino_client.project_archive(new_project_name)", "def test_projects_create(self):\n pass", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def test_iam_project_create(self):\n pass", "def create_project_info(data):\n\t\n\tproject = ProjectInfo()\n\tproject.name = data['name']\n\tproject.description = data['description']\n\tproject.start_date = data['start_date']\n\tproject.end_date = data['end_date']\n\tproject.save()\n\tprint ('Inserted')\n\treturn True", "def new_project_created_activity(pk):\n print(f'New project created with primary key: {pk}')\n print('Creating related feed activity...')\n activity = ProjectCreatedActivity(project=Project.objects.get(pk=pk))\n activity.save()", "def create_project(self):\n url = self.get_url_create_project()\n logger.info(\"Creating project '{}' on page: {} with metdata file: {}\"\n .format(self.projectname, url, self.metadata))\n self.driver.get(url)\n self.click_ele(\"//input[@id='app_title']\")\n self.type_ele(\"//input[@id='app_title']\", self.projectname)\n\n # @TODO: should this be a class attribute?\n project_purpose = PROJECT_PURPOSE_RESEARCH\n self.select_ele(\"//select[@id='purpose']\", project_purpose)\n\n if PROJECT_PURPOSE_OTHER == project_purpose:\n self.type_ele(\"//*[@id='purpose_other_text']\", PROJECT_PURPOSE_OTHER_SPECIFY)\n self.click_ele(\"//input[@value=' Create Project ']\")\n\n elif PROJECT_PURPOSE_RESEARCH == project_purpose:\n # When the purpose is `Research` an extra checkbox is required: `Area of research`\n self.click_ele(\"//input[@id='purpose_other[7]']\")\n self.click_ele(\"//input[@value=' Create Project ']\")\n try:\n # confirm warning...\n self.click_ele(\"//span[@class='ui-button-text'][text() = 'I Agree']\")\n except:\n logger.warn(\"There was no confirmation window when creating the project\")\n\n # Set as longitudinal\n self.click_ele(\"//button[@id='setupLongiBtn']\")\n self.projectid = self.get_project_id()", "def newproject():\n yesnobool = lambda v: (v == 'yes') or False \n boolyesno = lambda v: 'yes' if v else 'no'\n \n path = fabric_op.prompt('Project directory path (example: /Users/jdoe/superproject):')\n \n capture = fabric_op.prompt(\n 'Capture command output? (yes/no)', \n default=boolyesno(DEFAULTS['CAPTURE']))\n \n gae = fabric_op.prompt(\n 'Is a Google App Engine project? (yes/no)', \n default=boolyesno(DEFAULTS['GAE']))\n \n _create_project(str(path), yesnobool(capture), yesnobool(gae))", "def create_project(self):\n project = ldtmaya.create_surfacing_project()\n self.update_ui_projects()", "def create(**kwargs):\n kwargs[\"id\"] = 0\n return _alter_project(**kwargs)", "def _create_project(org, project_name):\n project = Project(\n org=org,\n name=project_name\n )\n project.save()\n return project", "def test_create_project(self):\n with self.sudo('manager-a'):\n project = self.env['project.project'].with_context({'tracking_disable': True}).create({\n 'name': 'Project Company A',\n 'partner_id': self.partner_1.id,\n })\n self.assertEqual(project.company_id, self.env.user.company_id, \"A newly created project should be in the current user company\")\n\n with self.switch_company(self.company_b):\n with self.assertRaises(AccessError, msg=\"Manager can not create project in a company in which he is not allowed\"):\n project = self.env['project.project'].with_context({'tracking_disable': True}).create({\n 'name': 'Project Company B',\n 'partner_id': self.partner_1.id,\n 'company_id': self.company_b.id\n })\n\n # when allowed in other company, can create a project in another company (different from the one in which you are logged)\n with self.allow_companies([self.company_a.id, self.company_b.id]):\n project = self.env['project.project'].with_context({'tracking_disable': True}).create({\n 'name': 'Project Company B',\n 'partner_id': self.partner_1.id,\n 'company_id': self.company_b.id\n })", "def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response", "def test_add_project(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Static method for delete all projects.
def delete_all_projects(): client = RequestManager() client.set_method("GET") client.set_endpoint("/projects") response = client.execute_request() for project in response.json(): try: ProjectHelper.delete_project(project["id"]) except TypeError: LOGGER.info(project)
[ "def clear(self):\n for project in Project.objects:\n project.delete()", "def __remove_all_projects__():\n p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)\n p.wait()", "def delete(self):\n args = {\"id\": self.id}\n _perform_command(self.owner, \"project_delete\", args)\n del self.owner.projects[self.id]", "def delete_project(arn=None):\n pass", "def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None", "def test_projects_delete(self):\n pass", "def delete_project(request, pk):\n p = Project.objects.get(pk=pk)\n aps = p.apostila_set.all()\n for a in aps:\n # desociates project and Apostila\n a.project.remove(p)\n p.delete()\n return redirect('url_projects')", "def test_delete_project(self):\n pass", "def test_clear_all_workspaces_delete(self):\n pass", "def clear_projects(self):\r\n self._projects = []\r\n self.comboProject.clear()\r\n self.comboProject.hide()", "def tearDownClass(cls):\n projects = ['arc_project_for_testing_delete_after_usage1', 'arc_project_for_testing_delete_after_usage2',\n 'ar c', 'ar:c', 'ar<c', 'ar%c']\n for project in projects:\n project_directory = os.path.join(arc_path, 'Projects', project)\n shutil.rmtree(project_directory)", "def deleteproject():\n cust_id = request.values.get(\"cust_id\")\n removeCustomer(cust_id)\n\n return created_request(\"Good\")", "def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)", "def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()", "def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def test_iam_project_delete(self):\n pass", "def delete_project(self):\n selected_project = pm.PyNode(self.list_projects.currentItem().text())\n ldtmaya.delete_surfacing_project(selected_project)\n self.update_ui_projects()", "def deleteProject(self,id):\n \n #self.projects.delete(id)\n #return newProject", "def prune_projects(cls, obj):\r\n\r\n dead = obj.projects - set([x.id() for x in sublime.windows()])\r\n for key in dead:\r\n obj.projects.remove(key)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator that returns 403 status if user isn't logged in instead of redirecting to the LOGIN_URL
def login_required_403(view): @wraps(view) def dec_view(request, *args, **kwargs): if not request.user.is_authenticated(): return JsonResponse({"detail": "You have to log in"}, status=403) return view(request, *args, **kwargs) return dec_view
[ "def not_authenticated(func):\n def decorated(request, *args, **kwargs):\n if request.user.is_authenticated():\n next_ = request.GET.get(REDIRECT_FIELD_NAME,\n LOGIN_REDIRECT_URL)\n return HttpResponseRedirect(next_)\n return func(request, *args, **kwargs)\n return decorated", "def login_required(func):\n @wraps(func)\n def decorator(*args, **kargs):\n if not 'username' in session:\n return redirect(url_for('login'))\n return func(*args, **kargs)\n return decorator", "def login_required_or_forbidden(view_func):\n def wrapped_view(request, *posargs, **kwargs):\n if not request.user.is_authenticated():\n raise PermissionDenied('You must be logged in')\n return view_func(request, *posargs, **kwargs)\n return wrapped_view", "def non_agent_login_required(fn):\n def wrapper(request, *args, **kwargs):\n userprofile = request.session.get(\"userprofile\", None)\n if (\n request.user.is_authenticated() and not\n userprofile.is_agent):\n return fn(request, *args, **kwargs)\n else:\n return HttpResponseForbidden(request)\n\n return wrapper", "def authenticated_403(self):\n if self.get_current_user() is None:\n raise web.HTTPError(403)", "def authenticated_403(self):\n if self.current_user is None:\n raise web.HTTPError(403)", "def not_authenticated(func):\r\n def decorated(request, *args, **kwargs):\r\n if request.user.is_authenticated():\r\n next = request.GET.get(\"next\", \"/\")\r\n return HttpResponseRedirect(next)\r\n return func(request, *args, **kwargs)\r\n return decorated", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('base.login'))\n return view(**kwargs)\n\n return wrapped_view", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n abort(403)\n else:\n return redirect(url_for('security.login', next=request.url))", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def auth_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n return func(request)\n return wrapper", "def check_authentication(func):\n def decorated_func(request):\n return func(request) if request.user.is_authenticated else HttpResponseRedirect(reverse('login'))\n\n return decorated_func", "def login_required_for_token(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.USER is None:\n return redirect(url_for(\"api_v1_login\", next=request.url))\n return f(*args, **kwargs)\n\n return decorated_function", "def _handle_view(self, *args, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def login_view(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except MultipassException as e:\n return get_state().multipass.handle_auth_error(e, True)\n\n return decorator", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Login with an accesscode
def accesscode(request, code): employee = Employee.objects.get(access_code=code) user = employee.user user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, user) return HttpResponseRedirect('/')
[ "def login():", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def login_in():", "def login(self):", "def login(arl):\n return deezer.login_via_arl(arl)", "def mfa_login(self, mfacode):\n\n try:\n\n response = self.post(\"/authentication/login\",\n {\"user\": self.user, \"password\": self.password, \"token\": int(mfacode)})\n if response.status_code == 200:\n print(\"{0}: Orchestrator MFA login success\".format(self.url))\n # get and set X-XSRF-TOKEN\n for cookie in response.cookies:\n if cookie.name == \"orchCsrfToken\":\n self.headers[\"X-XSRF-TOKEN\"] = cookie.value\n return True\n else:\n print(\"{0}: Orchestrator MFA login failed: {1}\".format(self.url, response.text))\n return False\n except:\n print(\"{0}: Exception - unable to connect to Orchestrator\".format(self.url))\n return False", "def _login(self, login):\n self._tokens.clear()\n name, password = login\n\n params = {\"action\": \"query\", \"meta\": \"tokens\", \"type\": \"login\"}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n try:\n token = result[\"query\"][\"tokens\"][\"logintoken\"]\n except KeyError:\n raise exceptions.LoginError(\"Couldn't get login token\")\n\n params = {\"action\": \"login\", \"lgname\": name, \"lgpassword\": password,\n \"lgtoken\": token}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n\n res = result[\"login\"][\"result\"]\n if res == \"Success\":\n self._tokens.clear()\n self._save_cookiejar()\n return\n if res == \"Illegal\":\n e = \"The provided username is illegal.\"\n elif res == \"NotExists\":\n e = \"The provided username does not exist.\"\n elif res == \"EmptyPass\":\n e = \"No password was given.\"\n elif res == \"WrongPass\" or res == \"WrongPluginPass\":\n e = \"The given password is incorrect.\"\n else:\n e = \"Couldn't login; server says '{0}'.\".format(res)\n raise exceptions.LoginError(e)", "def custom(code: str, state: str):\n\tpath = frappe.request.path[1:].split(\"/\")\n\tif len(path) == 4 and path[3]:\n\t\tprovider = path[3]\n\t\t# Validates if provider doctype exists\n\t\tif frappe.db.exists(\"Social Login Key\", provider):\n\t\t\tlogin_via_oauth2(provider, code, state, decoder=decoder_compat)", "def simple_login_post(backend, request):\n username = request.params.get(\"username\")\n password = request.params.get(\"password\")\n\n\n if backend.simple_login(username, password):\n token = backend.issue_access_token()\n # set header, optionally redirect\n return\n # login failure\n return", "def login(self):\n self.client.login(username='john', password='john')", "def login(self):\n \n self.br.open(\"http://kanji.koohii.com/login\")\n self.br.form = list(self.br.forms())[0]\n self.br[\"username\"] = USER\n self.br[\"password\"] = PASSWORD\n my_response = self.br.submit()\n print \"Login successful\"", "def login(request):\n\n if request.method == \"POST\":\n username = request.POST.get(\"username\", \"\")\n password = request.POST.get(\"password\")\n next = request.GET.get(\"next\", \"/index/\")\n v_code = request.POST.get(\"v_code\")\n if v_code.upper() == request.session.get(\"v_code\"):\n user = auth.authenticate(request, username=username, password=password)\n if user:\n auth.login(request, user)\n return redirect(next)\n else:\n return render(request, \"login.html\", {\"error_msg\": \"*用户或者密码错误!\", \"user\": username})\n else:\n return render(request, \"login.html\", {\"code_msg\": \"*验证码有误!\", \"user\": username})\n\n return render(request, \"login.html\", {\"user\": \"\"})", "def steam_login(username, password, two_factor_code):\n steam_client = SteamClient() # Make steam client object\n steam_client.login(username, password, two_factor_code=two_factor_code) # Log in\n if not steam_client.logged_on: # Login failed\n raise SteamLoginException('Login failed.')\n return steam_client", "def login(self) -> None:\n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.url, self.login_secret = sObj.getAuthorizeURL()\n print(self.url)\n self.oauth_token = input('token: ')\n self.oauth_verifier = input('verifier: ')", "async def login_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = await crud.user.authenticate(\n username=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=\"Incorrect credentials\")\n elif not user.is_active:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Inactive user\")\n elif not user.is_email_verified:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Please verify your account via email\")\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": create_access_token(\n data={\"user_id\": user.id}, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def login():\n # if we are already logged in, go back to were we came from\n if g.user is not None:\n return redirect(oid.get_next_url())\n if request.method == 'POST':\n openid = request.form.get('openid')\n if openid:\n pape_req = pape.Request([])\n return oid.try_login(openid, ask_for=['email', 'nickname'],\n ask_for_optional=['fullname'],\n extensions=[pape_req])\n return render_template('login/login.html', next=oid.get_next_url(),\n error=oid.fetch_error())", "def Login(self, username, password, onSuccess, onFailure):\n pass", "def _login(self, session):\n payload = {\n 'controller': 'Overview',\n 'action': 'Login',\n 'id': '0',\n 'idTextPassword': self.password\n }\n r = session.post(self.baseurl + '/cgi-bin/Hn_login.cgi', data=payload)\n lines = r.text.split('\\n')\n for line in lines:\n if 'msgLoginPwd_err' in line:\n return False\n\n return True", "def authenticateMidas(self):\r\n self.communicator = pydas.core.Communicator(self.url)\r\n self.token = self.communicator.login_with_api_key(self.cur_email, self.cur_apikey)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for all employees (in company) or for current user dependent on employee role
def all_employees(request, company_id=None): current_employee = Employee.objects.get(user__pk=request.user.pk) company_super_user = current_employee.isCompanySuperUserOrHigher() if company_id: company = Company.objects.get(pk=company_id) else: company = current_employee.company if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: raise PermissionDenied() change_company_form = ChangeCompanyForm(initial=dict(company=company)) return TemplateResponse( request, 'all_employees.html', { 'user': request.user, 'company_super_user': company_super_user, 'company': company, 'change_company_form': change_company_form, } )
[ "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def show_employer_index():\n if 'username' not in flask.session:\n return flask.redirect(flask.url_for(\"show_login\"))\n\n # get all openings that belong to employer\n openings_query = \"\"\"\n SELECT *\n FROM employers, openings\n WHERE employers.email = openings.email\n \"\"\"\n openings_query = job_board.model.get_db().execute(openings_query).fetchall()\n\n context = {\"openings\": openings_query}\n\n return flask.render_template(\"employer_index.html\", **context)", "def all(self, request, *args, **kwargs):\n user = self.request.user\n role_user_id = getattr(user, user.role).id\n homework_role_dict = {\n 'teacher': Homework.objects.filter(lection__course__teachers__id=role_user_id),\n 'student': Homework.objects.filter(lection__course__students__id=role_user_id),\n }\n queryset = homework_role_dict[user.role]\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def employee_obj(request):\r\n \"\"\" Usage- in case of 'ifm-ldap server and EmployeeMaster table are in consideration. Else use the django user. \"\"\"\r\n return (EmployeeMaster.objects.get(user=request.user))", "def get_role(self, employee):\n\n company_id = self.context.get('company_id')\n company_member = CompanyMember.objects.get(user_id=employee.id,\n company_id=company_id)\n return BaseCompanyMemberSerializer(company_member, read_only=True).data", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def employee_requests_all(request):\n #user_id = request.GET.get('user_id')\n user = get_user(request)\n if user:\n user_requests = UserProjectRequest.objects.filter(employee=user.id)\n user_project_request_serializer = UserProjectRequestSerializer(user_requests, many=True)\n return Response({\"user_requests\":user_project_request_serializer.data}, status=status.HTTP_200_OK)\n return Response({\"message\": \"User not found\"}, status=status.HTTP_400_BAD_REQUEST)", "def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )", "def show_employees_list(request):\n if request.method == \"GET\":\n employee = Employee.objects.all()\n if \"name\" in request.GET:\n name = request.GET[\"name\"]\n employee = employee.filter(fullName__icontains=name)\n if \"empID\" in request.GET:\n emp_id = request.GET[\"empID\"]\n employee = employee.filter(empID__icontains=emp_id)\n if employee:\n return render(request, 'deletedetails/employeesDelete.html', {\"employees\": employee, \"values\":request.GET})\n messages.error(\n request, 'Cant find employee with entered detail')\n return redirect('listEmployees')\n return render(request, 'deletedetails/employeesDelete.html')", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def employees_only(titles=None):\n def employee_wrapper(view_func):\n @wraps(view_func)\n def wrapped(*args, **kwargs):\n if not current_user.is_employee:\n abort(401)\n if titles is not None:\n emp = Employee.query.filter_by(user_id=current_user.id).first()\n if emp is None:\n abort(503)\n if emp.title not in titles:\n abort(401)\n return view_func(*args, **kwargs)\n return wrapped\n return employee_wrapper", "def filter_employee_request_permission_wise(self, queryset):\n user = self.request.user.person_group\n if user:\n if PersonGroup.EMPLOYEE.value == user:\n queryset = queryset.filter(\n request_by=self.request.user\n )\n\n elif PersonGroup.MANAGE.value == user:\n queryset = queryset.filter(\n request_status=RequestStatus.REVIEWED.value\n )\n return queryset", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def get_employees(self):\n self.employee_list = []\n try:\n employees = self.db['employees'].all()\n # loop through what we get back from DB\n for emp in self.db['employees']:\n self.employee_list.append(\n employee.Employee(int(emp['id']), str(emp['name']), str(emp['password']), int(emp['role'])))\n except:\n print(\"error\")\n self.statusbar.showMessage(\"Error loading employee data\", 4000)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for all employees current user is a manager for with empty development plan
def get_manager_employees(request): current_employee = Employee.objects.get(user__pk=request.user.pk) manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all() if manager_employees: emp_list=[] for emp in manager_employees: emp_data={} emp_data["id"] = emp.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["manager_id"] = emp.manager.id # emp_data["status_questions"] = emp.status_questions # employee_role = EmployeeRole.objects.filter(employee=emp).all() # name_role_list = [] # for obj in employee_role: # name_role_list.append(obj.role.name) # emp_data["roles"] = name_role_list emp_list.append(emp_data) data = {"employees:": emp_list} return JsonResponse(status=201, data=data) else: return JsonResponse("The user with id={} isn't a manager for any user".format(current_employee.user.id), status=404)
[ "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def test_ReportingPeriodDetailView_current_employee_set_false(self):\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 2\n )", "def test_ReportingPeriodDetailView_current_employee_set_false(self):\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n ),\n user=self.regular_user\n )\n self.assertEqual(\n len(response.html.find_all('tbody')), 2\n )", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def is_employee():\n return _is_member('uw_employee')", "def is_managers(user):\r\n\r\n if Managers.objects.filter(user=user):\r\n return True\r\n return is_administrator(user)", "def test_managers_who_does_nothing(self):\n # Add 2 managers who do nothing\n self.manager_id = self._add_person(\"Manager\", \"ARRAY['Database']\", 30)\n self.manager_id1 = self._add_person(\"Manager\", \"ARRAY['AI']\", 30)\n\n # Run the query\n q = self.generate_query('view_manager_report', ())\n res = self.execute_query(q)\n assert len(res) == 2, f'There is suppose to be 2 entries {res}'", "def controlpanel_view_mentees():\n\n search = SearchForm(request.form)\n user_type = 'Mentee'\n stats_dict = get_stats()\n queries = db.session.query(User, Mentee).filter(User.is_active == True).\\\n join(Mentee, User.user_id == Mentee.user_id).all()\n\n if request.method == 'POST' and search.validate_on_submit():\n return search_results(search, 'mentee')\n return render_template('admin/admin_view_users.html', search=search, queries=queries, user_type=user_type,\n stats_dict=stats_dict, type='mentees', title=\"Mentees\")", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def filter_employee_request_permission_wise(self, queryset):\n user = self.request.user.person_group\n if user:\n if PersonGroup.EMPLOYEE.value == user:\n queryset = queryset.filter(\n request_by=self.request.user\n )\n\n elif PersonGroup.MANAGE.value == user:\n queryset = queryset.filter(\n request_status=RequestStatus.REVIEWED.value\n )\n return queryset", "def query_usr_manager(self, manager_name):", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def get_fieldsets(self, request, obj=None):\n fieldsets = super(EmployeeAdmin, self).get_fieldsets(request, obj)\n current_user = request.user\n user_groups = [v.name for v in current_user.groups.all()]\n if EmployeeAdmin.GROUP_NAMES['ADMINS'] not in user_groups and not current_user.is_superuser:\n fieldsets = filter(lambda fieldset: fieldset[0] != _('Permissions'), fieldsets)\n fieldsets = filter(lambda fieldset: fieldset[0] != _('Important dates'), fieldsets)\n return fieldsets", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def employee_obj(request):\r\n \"\"\" Usage- in case of 'ifm-ldap server and EmployeeMaster table are in consideration. Else use the django user. \"\"\"\r\n return (EmployeeMaster.objects.get(user=request.user))", "def employee_requests_all(request):\n #user_id = request.GET.get('user_id')\n user = get_user(request)\n if user:\n user_requests = UserProjectRequest.objects.filter(employee=user.id)\n user_project_request_serializer = UserProjectRequestSerializer(user_requests, many=True)\n return Response({\"user_requests\":user_project_request_serializer.data}, status=status.HTTP_200_OK)\n return Response({\"message\": \"User not found\"}, status=status.HTTP_400_BAD_REQUEST)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for creating employee in company
def create_employee(request, company_id): company = Company.objects.get(pk=company_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: logUnauthorizedAccess("User tried to create_employee", request) raise PermissionDenied() form = EmployeeForm(request, initial=dict(company=company)) form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company) # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter( # Q(company=company) | Q(company__isnull=True)) # data = { # 'employee_form': form.cleaned_data, # 'company': company.cleaned_data["name"] # } return TemplateResponse( request, 'mus/create_employee_form.html', { 'employee_form': form, } ) # data = { # 'employee_form': form.cleaned_data, # 'company': company.cleaned_data["name"] # } # return JsonResponse(status=200, data=data)
[ "def post(self, request, *args, **kwargs):\n response = super().post(request, *args, **kwargs)\n company = self.object\n company.create_employee_data()\n return response", "def get(self, request):\n form = EmployeeForm()\n return render(request, 'employee/add-employee.html',\n {'form': form, 'func': 'Add'})", "def post(self, request):\n form = EmployeeForm(request.POST)\n if form.is_valid():\n new_employee = form.save()\n return redirect('employee:employee_detail', id=new_employee.id)\n else:\n return render(request, 'employee/add-employee.html', {'form': form, 'func': 'Add'})", "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_company():\n\n data = request.get_json()\n\n new_company = Investment(company=data[\"company\"])\n db.session.add(new_company)\n db.session.commit()\n\n return jsonify({\"message\": \"New investment company.\"})", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def certification_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n certification_form = CertificationForm()\n return render_to_response('certification_form.html', {'form': certification_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n certification_form = CertificationForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if certification_form.is_valid():\n of = certification_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('certification_form.html', \n {'form': certification_form, 'form_errors': certification_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def create_company(cls, **kwargs):\n return cls._do_call(\n 'POST', Retaincc.api_endpoint + 'companies', params=kwargs)", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def assign_employee(id):\n check_admin()\n\n employee = Employee.query.get_or_404(id)\n\n # prevent admin from being assigned a department or role\n if employee.is_admin:\n abort(403)\n\n form = EmployeeAssignForm(obj=employee)\n if form.validate_on_submit():\n employee.department = form.department.data\n employee.role = form.role.data\n employee.position = form.position.data\n db.session.add(employee)\n db.session.commit()\n flash('Đã sửa nhân viên thành công.')\n\n # redirect to the roles page\n return redirect(url_for('admin.list_employees'))\n\n return render_template('admin/employees/employee.html',\n employee=employee, form=form,\n title='Sửa nhân viên')", "def create(self, request, company_pk=None):\n\n get_object_or_404(Company, pk=company_pk)\n\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid() and serializer.save():\n return Response(\n {'interview': serializer.data}, status=status.HTTP_201_CREATED\n )\n else:\n return Response({'errors': serializer.errors},\n status=status.HTTP_400_BAD_REQUEST)", "def test_create_education_company_successful(self):\n\n self.superuser = get_user_model().objects.create_superuser(\n 'test@gmail.com',\n 'testpass'\n )\n self.client.force_authenticate(self.superuser)\n payload = {\"name\": \"Test Company\"}\n self.client.post(EDUCATION_URL, payload)\n\n exist = Company.objects.filter(\n name=payload['name'],\n ).exists()\n self.assertTrue(exist)", "def test_website_companies_create(self):\n pass", "def index():\n companydetails = CompanyDetails()\n employeeInfo = EmployeeInfo()\n\n if companydetails.validate_on_submit():\n return render_template('showData.html', form = companydetails)\n\n if employeeInfo.validate_on_submit():\n return render_template('showData.html', form1 = employeeInfo) \n\n return render_template('index.html',form1 = employeeInfo, form = companydetails)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for editing employee
def edit_employee(request, employee_id): employee = Employee.objects.get(pk=int(employee_id)) current_employee = Employee.objects.get(user__pk=request.user.pk) assert isinstance(employee, Employee) assert isinstance(current_employee, Employee) # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: # raise PermissionDenied() if not current_employee.hasAccessTo(employee): raise PermissionDenied() form = EditEmployeeForm(request.user, employee, { 'first_name': employee.user.first_name, 'last_name': employee.user.last_name, 'email': employee.user.email, 'manager': employee.manager.id if employee.manager else 0, 'language_code': employee.language_code, # 'development_plan_type': employee.development_plan_type.id, 'is_manager': employee.is_manager }) if 'manager' in form.fields: managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk) form.fields['manager'].queryset = managerQS # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter( # Q(company__pk=employee.company.pk) | Q(company__isnull=True) # ) is_me = employee.user.pk == request.user.pk return TemplateResponse( request, 'mus/edit_employee_form.html', { 'edit_employee_form': form, 'employee_id': employee_id, 'me': is_me, 'name': employee.user.get_full_name() } )
[ "def admin_edit_employee(uuid):\n form = AdminEditProfileForm()\n employee = EmployeeApiController.get_employee_by_uuid(uuid)\n form.department.choices = [(dep[\"uuid\"], dep[\"name\"]) for dep in DepartmentApiController.get_all_departments()]\n fullname = employee[\"last_name\"] + \" \" + employee[\"first_name\"]\n if form.validate_on_submit():\n department = DepartmentApiController.get_department_by_uuid(form.department.data)\n response = EmployeeApiController.patch_employee(department=department, position=form.position.data,\n salary=form.salary.data, is_admin=form.is_admin.data,\n uuid=uuid)\n if response.status_code == 200:\n flash(\"Changes have been saved\")\n else:\n flash(f\"Something went wrong while editing\")\n return redirect(url_for(\"admin_edit_employee\", uuid=uuid))\n elif request.method == \"GET\":\n form.department.process_data(employee[\"department\"][\"uuid\"])\n form.position.data = employee[\"position\"]\n form.salary.data = employee[\"salary\"]\n form.is_admin.process_data(employee[\"is_admin\"])\n return render_template(\"admin_edit_employee.html\", title=\"Edit Profile\", form=form, fullname=fullname)", "def onchange_employee(self):\n cr, uid, context = self.env.args\n emp_id = self.employee_id.id\n if emp_id:\n #If employee is there fetch the related room and bed\n bed_id = self.env['beds.beds'].search([('employee_id','=',self.employee_id.id),('room_id.accommodation_id','=',context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id", "def get(self, request):\n form = EmployeeForm()\n return render(request, 'employee/add-employee.html',\n {'form': form, 'func': 'Add'})", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def onchange_employee(self):\n self.department_id = self.employee_id.department_id.id or False\n self.company_id = self.employee_id.company_id.id or False\n self.branch_id = self.employee_id.branch_id or False", "def post(self, request):\n form = EmployeeForm(request.POST)\n if form.is_valid():\n new_employee = form.save()\n return redirect('employee:employee_detail', id=new_employee.id)\n else:\n return render(request, 'employee/add-employee.html', {'form': form, 'func': 'Add'})", "def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def Department_Detail_View(request, pk):\n department = get_object_or_404(Department, pk=pk)\n employees = Employee.objects.all()\n return render(request, 'WorkforceManagement/Department_Detail.html', {'department': department, 'employees': employees})", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def edit_view(self, request, object_id):\n kwargs = {'model_admin': self, 'object_id': object_id}\n view_class = self.edit_view_class\n return view_class.as_view(**kwargs)(request)", "def edit_education(request):\n\n item_form = EducationItemForm(request.POST)\n item_id = request.POST['item_id']\n if item_form.is_valid():\n item = item_form.save(commit=False)\n item.id = item_id\n education_item = UserProfileService.edit_education_item(request.user, item)\n html = render_to_string('mycraze/item/education.html', \n {'item': education_item, 'has_edit_permission': True})\n return HttpResponse(html)", "def offers_edit(offer_id):\n offer = offers.find_one({'_id': ObjectId(offer_id)})\n return render_template('offers_edit.html', offer=offer,\n properties=properties)", "def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)", "def assign_employee(id):\n check_admin()\n\n employee = Employee.query.get_or_404(id)\n\n # prevent admin from being assigned a department or role\n if employee.is_admin:\n abort(403)\n\n form = EmployeeAssignForm(obj=employee)\n if form.validate_on_submit():\n employee.department = form.department.data\n employee.role = form.role.data\n employee.position = form.position.data\n db.session.add(employee)\n db.session.commit()\n flash('Đã sửa nhân viên thành công.')\n\n # redirect to the roles page\n return redirect(url_for('admin.list_employees'))\n\n return render_template('admin/employees/employee.html',\n employee=employee, form=form,\n title='Sửa nhân viên')", "def edit_experience(request):\n\n item_form = ExperienceItemForm(request.POST)\n item_id = request.POST['item_id']\n if item_form.is_valid():\n item = item_form.save(commit=False)\n item.id = item_id\n experience_item = UserProfileService.edit_experience_item(request.user, item)\n html = render_to_string('mycraze/item/experience.html', \n {'item': experience_item, 'has_edit_permission': True})\n return HttpResponse(html)", "def edit_view(request):\n\n try:\n query = request.dbsession.query(MyModel)\n entry = query.filter(MyModel.id ==\n int(request.matchdict['id'])).first()\n except DBAPIError:\n return Response(db_err_msg, content_type='text/plain', status=500)\n\n if request.method == \"GET\":\n return detail_view(request)\n elif request.method == 'POST':\n if request.POST['title'] != '' or request.POST['body'] != '':\n new_title = request.POST['title']\n new_body = request.POST['body']\n entry.title = new_title\n entry.body = new_body\n # return HTTPFound(request.route_url('home'))\n else:\n error_msg = \"Cannot submit empty entry.\"\n return {'error_msg': error_msg}\n return HTTPFound(request.route_url('home'))", "def show_employees_list(request):\n if request.method == \"GET\":\n employee = Employee.objects.all()\n if \"name\" in request.GET:\n name = request.GET[\"name\"]\n employee = employee.filter(fullName__icontains=name)\n if \"empID\" in request.GET:\n emp_id = request.GET[\"empID\"]\n employee = employee.filter(empID__icontains=emp_id)\n if employee:\n return render(request, 'deletedetails/employeesDelete.html', {\"employees\": employee, \"values\":request.GET})\n messages.error(\n request, 'Cant find employee with entered detail')\n return redirect('listEmployees')\n return render(request, 'deletedetails/employeesDelete.html')", "def editRes(res_id):\n\n\treturn \"A form for editing a Restaurant\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for list of actions of (current) employee
def action_list(request, employee_id=None): if employee_id: employee = Employee.objects.get(pk=employee_id) current_employee = Employee.objects.get(user__pk=request.user.pk) if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk: raise PermissionDenied() else: employee = request.user.employee_user.first() actions = employee.action_set.all() return TemplateResponse( request, 'mus/action_list.html', dict( actions=actions, employee=employee ) )
[ "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_employee_menu(self):\n \n action_str = \"\"\n\n while True:\n print(self.LENGTH_STAR * \"*\")\n print(\"EMPLOYEES MENU\\n\")\n print(\"1 Print overview of all employees\")\n print(\"2 Pilots\")\n print(\"3 Cabin Crew\")\n print(\"B Back\\n\")\n\n action_str = self.choose_action([\"1\", \"2\" ,\"3\" ,\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"1\", \"2\", \"3\", \"b\"])\n\n if action_str == \"1\":\n self.show_overview_of_all_employees()\n\n elif action_str == \"2\":\n self.show_pilot_or_crew_menu(self.PILOT)\n\n elif action_str == \"3\":\n self.show_pilot_or_crew_menu(self.CREW)\n\n elif action_str == \"b\":\n return", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def show_employer_index():\n if 'username' not in flask.session:\n return flask.redirect(flask.url_for(\"show_login\"))\n\n # get all openings that belong to employer\n openings_query = \"\"\"\n SELECT *\n FROM employers, openings\n WHERE employers.email = openings.email\n \"\"\"\n openings_query = job_board.model.get_db().execute(openings_query).fetchall()\n\n context = {\"openings\": openings_query}\n\n return flask.render_template(\"employer_index.html\", **context)", "def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)", "def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def act_show_log_training(self):\n self.ensure_one()\n res = self.env['ir.actions.act_window'].for_xml_id('nievecus_hr_indonesia_training',\n 'nievecus_training_action2')\n res.update(\n context=dict(self.env.context, default_employee_id=self.id, search_default_parent_false=True),\n domain=[('employee_id', '=', self.id)]\n )\n return res", "def action_detail(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=int(action_id))\n # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n if not employee.hasAccessTo(action.employee):\n raise PermissionDenied()\n\n if request.method == 'POST':\n form = ActionCommentForm(request.POST)\n if form.is_valid():\n form.save(request.user, action)\n return HttpResponseRedirect('/action/%s' % action_id)\n else:\n form = ActionCommentForm()\n return TemplateResponse(\n request,\n 'mus/action_detail.html',\n dict(\n action=action,\n form=form\n )\n )", "def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)", "def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )", "def show_employees_list(request):\n if request.method == \"GET\":\n employee = Employee.objects.all()\n if \"name\" in request.GET:\n name = request.GET[\"name\"]\n employee = employee.filter(fullName__icontains=name)\n if \"empID\" in request.GET:\n emp_id = request.GET[\"empID\"]\n employee = employee.filter(empID__icontains=emp_id)\n if employee:\n return render(request, 'deletedetails/employeesDelete.html', {\"employees\": employee, \"values\":request.GET})\n messages.error(\n request, 'Cant find employee with entered detail')\n return redirect('listEmployees')\n return render(request, 'deletedetails/employeesDelete.html')", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)", "def get(self, request):\n form = EmployeeForm()\n return render(request, 'employee/add-employee.html',\n {'form': form, 'func': 'Add'})", "def open_employee_assets(self):\n owner_id = len(self) > 0 and self[0].employee_id.id or False\n if owner_id:\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Assets',\n 'res_model': 'tms.asset',\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'context': {'search_default_owner_id': owner_id},\n }\n else:\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Assets',\n 'res_model': 'tms.asset',\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'domain': [('owner_id', '=', False)],\n }", "def show_timeline(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n action = None\n if pk:\n action = workflow.actions.filter(pk=pk).first()\n\n if not action:\n # The action is not part of the selected workflow\n return redirect('home')\n logs = workflow.logs.filter(payload__action_id=action.id)\n else:\n logs = workflow.logs\n\n event_names = [\n Log.SCHEDULE_EMAIL_EXECUTE,\n Log.DOWNLOAD_ZIP_ACTION,\n Log.SCHEDULE_JSON_EXECUTE,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SCHEDULE_EMAIL_EDIT,\n Log.SCHEDULE_JSON_EDIT,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SURVEY_INPUT,\n ]\n\n # Filter the logs to display and transform into values (process the json\n # and the long value for the log name\n logs = [\n {'id': log.id,\n 'name': log.get_name_display(),\n 'modified': log.modified,\n 'payload': json.dumps(log.payload, indent=2),\n 'action_name': log.payload['action'],\n 'action_id': log.payload['action_id']}\n for log in logs.filter(name__in=event_names)\n ]\n\n return render(\n request,\n 'action/timeline.html',\n {'event_list': logs, 'action': action})", "def personalActions(self):\r\n actions = []\r\n for action in self.contextState.actions('user'):\r\n actions.append({\r\n 'id': action['id'],\r\n 'url': action['url'],\r\n 'title': action['title'],\r\n 'description': action['description'],\r\n })\r\n\r\n return actions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for detail of action
def action_detail(request, action_id): employee = request.user.employee_user.first() action = Action.objects.get(pk=int(action_id)) # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk: if not employee.hasAccessTo(action.employee): raise PermissionDenied() if request.method == 'POST': form = ActionCommentForm(request.POST) if form.is_valid(): form.save(request.user, action) return HttpResponseRedirect('/action/%s' % action_id) else: form = ActionCommentForm() return TemplateResponse( request, 'mus/action_detail.html', dict( action=action, form=form ) )
[ "def action_detail(request, action_id):\n action = shortcuts.get_object_or_404(Action, pk=action_id)\n try:\n new_action = action.action_object.content_object\n if new_action.__class__.__name__ == 'Action':\n action = new_action\n except:\n pass\n return shortcuts.render_to_response('activity/detail.html', {\n 'action': action,\n }, context_instance=template.RequestContext(request))", "def action_detail(id):\n\n action = Action.query.get_or_404(id)\n return action_schema.jsonify(action)", "def action(self):\n pass", "def action_to_view(action, modeladmin):\n def action_view(request, object_id=1, modeladmin=modeladmin, action=action):\n queryset = modeladmin.model.objects.filter(pk=object_id)\n response = action(modeladmin, request, queryset)\n if not response:\n opts = modeladmin.model._meta\n url = 'admin:%s_%s_change' % (opts.app_label, opts.model_name)\n return redirect(url, object_id)\n return response\n return action_view", "def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:\n return redirect(return_url)\n\n model = self.get_one(id)\n\n if model is None:\n flash(gettext('Record does not exist.'), 'error')\n\n if self.details_modal and request.args.get('modal'):\n template = self.details_modal_template\n else:\n template = self.details_template\n\n relationship_views = []\n for relationship in self.model_relationship_views:\n relationship_view = self.model_relationship_views[relationship]\n bp = relationship_view.blueprint\n endpoint = '{}.ajax_config'.format(relationship_view.blueprint.name)\n data = {\n 'field': relationship,\n 'title': relationship_view.title,\n 'config_url': self.get_url(endpoint, model_id=id)\n }\n relationship_views.append(data)\n\n return self.render(\n template,\n model=model,\n details_columns=self._details_columns,\n get_value=self.get_detail_value,\n relationship_views=relationship_views,\n return_url=return_url\n )", "def detail_pharmacie(request):\n\tpass", "def print_details(self):\n self.view.print_details()", "def actionURL(self):\n raise NotImplementedError()", "def getActionName(self):\n return \"%s__%s__%s\" % (self.domain,self.category,self.methodname)", "def describe_mitigation_action(actionName=None):\n pass", "def get_action(self):\n return self.action", "def status_view(self):\n return self.post(action=\"status_trn\")", "def action():\n return render_template('layout.html', title=\"Action\", tweets=recommender.get_tweets(\"best action netflix\"))", "def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value", "def detail_view(request):\n the_id = int(request.matchdict['id'])\n session = request.dbsession\n journal = session.query(Journal).get(the_id)\n if not journal:\n raise HTTPNotFound\n\n return {\n 'page': 'Journal Entry',\n 'journal': journal\n }", "def actor_detail(request, id_):\n\n template = \"actor_detail.html\"\n analyst = request.user.username\n (new_template, args) = get_actor_details(id_,\n analyst)\n if new_template:\n template = new_template\n return render_to_response(template,\n args,\n RequestContext(request))", "def detail(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n user = request.user\n return render(request, 'kvent/event-detail.html', {'event': event, 'user': user})", "def detail():\n id = request.args.get(\"id\")\n projectDetail = session.query(Projects).filter(Projects.id == id).all()\n return render_template(\"detail.html\", projectDetail=projectDetail)", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create LeaderModel and send it as a PDF to the browser
def get_leader_model_pdf(currentEmpl, employees): lm = LeaderModel() employee_actions = {} legend = [] colors = {} errors = {'noactions': []} # numbered_actions = {} for empl in employees: if not currentEmpl.hasAccessTo(empl): raise PermissionDenied() actions = empl.action_set.all() if not len(actions): errors['noactions'].append(empl) continue lkey = empl.user.first_name + " " + empl.user.last_name legend.append(lkey) if not lkey in employee_actions: employee_actions[lkey] = {} for action in actions: if not action.difficulty or not action.type: errors['noactions'].append(empl) continue circle_number = lm.addCircle(action) latest_comment = action.getLatestComment() employee_actions[lkey][circle_number] = { 'name': action.title, 'type': action.type, 'difficulty': action.getDifficultyText(), 'comment': latest_comment } if lkey not in colors: color = lm.getEmployeeColors(empl.id) colors[lkey] = "rgb({}, {}, {})".format(color[0], color[1], color[2]) if len(errors['noactions']): return errors lm_filename = path.join(settings.STATIC_ROOT, "leadermodel_{}.png".format(currentEmpl.id)) lm.writeImage(lm_filename) # # Write PDF pdfFilename = path.join(settings.FILES_ROOT, "leadermodel_{}.pdf".format(currentEmpl.id)) template = get_template('mus/leader_model_pdf.html') context = Context({ 'site_url': settings.SITE_URL, 'lm_filename': lm_filename, 'employee_actions': employee_actions, 'colors': colors, 'legend': legend }) html = template.render(context) # html = html.replace('<li>','<li><img class="square" src="http://test.nxtlvl.dk/static/img/square.png" />') result = open(pdfFilename, 'wb') pisa.pisaDocument(StringIO.StringIO( html.encode("UTF-8")), dest=result) result.close() wrapper = FileWrapper(file(pdfFilename)) response = HttpResponse(wrapper, content_type='application/pdf') response['Content-Disposition'] = 'attachment;filename=ledermodel.pdf' response['Content-Length'] = os.path.getsize(pdfFilename) return response # return HttpResponseRedirect('/employee/all/%d' % int(company_id))
[ "def pdfReceiver(request, model=''):\n\n\tinput_str = ''\n\tinput_str += parsePOST(request)\n\t# packet = io.StringIO() # write to memory\n\tpacket = io.BytesIO()\n\n\ttry:\n\t\tpisa.CreatePDF(input_str, dest=packet)\n\texcept ValueError as error:\n\t\t# triggered from the elusive invalid color value issue:\n\t\tlogging.warning(\"elusive invalid color value, defaulting html background-color to FFFFFF\")\n\t\tpisa.CreatePDF(input_str, dest=packet, default_css=\"body{background-color:#FFFFFF;}\")\n\n\n\tjid = MetabolizerCalc().gen_jid() # create timestamp\n\tresponse = HttpResponse(packet.getvalue(), content_type='application/pdf')\n\tresponse['Content-Disposition'] = 'attachment; filename=' + model + '_' + jid + '.pdf'\n\tpacket.close() # todo: figure out why this doesn't solve the 'caching problem'\n\treturn response", "def create_pdf(self) -> None:\n print(f\"making {self.pdf.title}\")\n if self.pdf.exists():\n self.pdf.path.unlink()\n\n canvas = Canvas(str(self.pdf.path))\n canvas.setAuthor(\"Luca Ercole\")\n\n for page in self.pages:\n drawing = page.drawing\n canvas.setPageSize((drawing.width, drawing.height))\n\n renderPDF.draw(drawing, canvas, 0, 0)\n\n for bookmark in page.bookmarks:\n exact_ts = bookmark.dtime\n title = exact_ts.replace(minute=30).isoformat()[:16]\n pos = bookmark.position\n\n canvas.bookmarkPage(key=str(exact_ts), fit=\"XYZ\", top=pos)\n # canvas.addOutlineEntry(title=title, key=title)\n canvas.addOutlineEntry(title=title, key=str(exact_ts))\n # canvas.drawString(0, pos, str(pos))\n\n canvas.showPage()\n\n canvas.save()\n print(\"Done\")", "def bundle(handler, model):\n\n notebook_filename = model['name']\n notebook_name = os.path.splitext(notebook_filename)[0]\n pdf_filename = '{}.pdf'.format(notebook_name)\n\n with io.BytesIO() as pdf_buffer:\n pdf_body = convert_notebook_to_pdf(model)\n pdf_buffer.write(pdf_body)\n\n handler.set_attachment_header(pdf_filename)\n handler.set_header('Content-Type', 'application/pdf')\n\n # Return the buffer value as the response\n handler.finish(pdf_buffer.getvalue())", "def generate_pdf():\n # replace the API url with the one you just created\n url = \"https://wf5u5hrui6.execute-api.us-west-2.amazonaws.com/default/pdf_generation\"\n response = requests.post(url)\n return response.text", "def generate_pdf(request):\n reg_no = request.user.username\n user = get_object_or_404(User, username=reg_no)\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n \n response = HttpResponse(mimetype='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=JAM2012_Allottment.pdf'\n \n elements = []\n doc = SimpleDocTemplate(response)\n \n formatted_time = time.ctime()\n styles = getSampleStyleSheet()\n styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))\n \n ptext = '<font size=15>JAM 2012 - Admissions.</font>' \n elements.append(Paragraph(ptext, styles[\"Justify\"]))\n elements.append(Spacer(4, 20))\n \n ptext = '<font size=12>Registration Number: %s</font>' % reg_no \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n \n data = [] \n options = get_chosen_options(user) ##Put a check to show when the options chosen is empty\n \n if not(options):\n ptext = '<font size=12>No choices were selected.</font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n doc.build(elements) \n return response \n \n ptext = '<font size=12>The choices selected by me are as follows: </font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 30))\n \n counter = 1\n for opt in options:\n data.append([counter, opt.opt_code, opt.opt_location, opt.opt_name])\n counter = counter + 1\n \n t = Table(data)\n t.setStyle(TableStyle([('GRID',(0,0),(3,len(options)),1,colors.black),\n ('TEXTCOLOR',(0,0),(0,-1),colors.green)]))\n \n elements.append(t) \n \n elements.append(Spacer(4, 30))\n \n ptext = '<font size=12>I hereby declare that the order of preference given by me for my eligible programmes is final. </font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 25))\n \n ptext = '<font size=12>Signature of the Candidate</font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 20))\n \n ptext = '<font size=12>%s</font>' % formatted_time\n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n \n doc.build(elements)\n \n return response", "def export_classic(request):\n #resume = get_object_or_404(Resume.objects.filter(id=resume_id))\n resume = get_object_or_404(Resume.objects.filter(firstname='Vijay'))\n pdf, result = export.export_pdf(resume, export.classic)\n raw_pdf = result.getvalue()\n if not pdf.err:\n return HttpResponse(raw_pdf, content_type='application/pdf')\n return HttpResponse('We had some errors.')", "def export_classic1(request):\n #resume = get_object_or_404(Resume.objects.filter(id=resume_id))\n resume = get_object_or_404(Resume.objects.filter(firstname='Vijay'))\n pdf, result = export.export_pdf(resume, export.classic1)\n raw_pdf = result.getvalue()\n if not pdf.err:\n return HttpResponse(raw_pdf, content_type='application/pdf')\n return HttpResponse('We had some errors.')", "def toPDF(Infos):\n\n\n #returnPDF = PDFDocument(\"output\")\n #returnPDF.Infos.get(\"name\")\n returnPDF = PDF(\"Courier\", Infos.get(\"name\"))\n if Infos.get('contact'):\n returnPDF.contact(Infos.get(\"contact\"))\n if Infos.get('Current position'):\n returnPDF.currentposition(Infos.get(\"Current position\"))\n if Infos.get('Education'):\n returnPDF.currentposition(Infos.get(\"Education\"))\n if Infos.get('Langue'):\n returnPDF.currentposition(Infos.get(\"Langue\"))\n returnPDF.output(\"result.pdf\", 'F')", "def list_pdf(request):\n participant_list = User.objects.all()\n disciplines = Discipline.objects.all()\n\n html = render_to_string(\n \"participantlist_pdf.html\",\n {\"participant_list\": participant_list, \"disciplines\": disciplines},\n )\n response = HttpResponse(content_type=\"application/pdf; charset=utf-8\")\n response[\"Content-Disposition\"] = 'filename=\"list.pdf\"'\n weasyprint.HTML(string=html).write_pdf(\n response,\n stylesheets=[\n weasyprint.CSS(str(settings.STATIC_ROOT) + \"/list_pdf.css\")\n ],\n )\n return response", "def create_pdf(clf):\n dot_data = StringIO.StringIO() \n tree.export_graphviz(clf, out_file=dot_data)\n graph = pydot.graph_from_dot_data(dot_data.getvalue())\n graph.write_pdf('abalone.pdf')", "def generate_pdf(proposal, template):\n # Local import, as otherwise a circular import will happen because the proposal model imports this file\n # (And the document model imports the proposal model)\n from studies.models import Documents\n\n # Change language to English for this PDF, but save the current language to reset it later\n current_language = get_language()\n activate('en')\n\n documents = {\n 'extra': []\n }\n\n for document in Documents.objects.filter(proposal=proposal).all():\n if document.study:\n documents[document.study.pk] = document\n else:\n documents['extra'].append(document)\n\n # This try catch does not actually handle any errors. It only makes sure the language is properly reset before\n # reraising the exception.\n try:\n context = {'proposal': proposal, 'BASE_URL': settings.BASE_URL, 'documents': documents}\n pdf = ContentFile(render_to_pdf(template, context))\n proposal.pdf.save('{}.pdf'.format(proposal.reference_number), pdf)\n except Exception as e:\n activate(current_language)\n raise e\n\n # Reset the current language\n activate(current_language)", "def interventionPDF(request, id):\r\n intervention = Intervention.objects.get(id=id)\r\n\r\n # Create a file-like buffer to receive PDF data.\r\n buffer = io.BytesIO()\r\n\r\n # Create the PDF object, using the buffer as its \"file.\"\r\n p = canvas.Canvas(buffer)\r\n\r\n # Draw things on the PDF. Here's where the PDF generation happens.\r\n # See the ReportLab documentation for the full list of functionality.\r\n p.drawString(260, 750, \"Fiche Intervention\")\r\n\r\n p.drawString(50, 700, \"N° Intervention :\")\r\n p.drawString(140, 700, str(intervention.id))\r\n\r\n p.drawString(50, 660, \"Technicien : \")\r\n p.drawString(120, 660, str(intervention.matricule_technicien))\r\n\r\n p.drawString(50, 620, \"Client : \")\r\n p.drawString(95, 620, str(intervention.numero_client))\r\n p.drawString(50, 600, \"Adresse : \")\r\n p.drawString(108, 600, str(intervention.numero_client.adresse))\r\n p.drawString(50, 580, \"Téléphone : \")\r\n p.drawString(118, 580, str(intervention.numero_client.telephone))\r\n p.drawString(50, 560, \"Email : \")\r\n p.drawString(95, 560, str(intervention.numero_client.email))\r\n\r\n p.drawString(50, 520, \"Date de la visite :\")\r\n p.drawString(150, 520, str(intervention.date_visite.day) + \"/\" + str(intervention.date_visite.month) + \"/\" + str(intervention.date_visite.year))\r\n p.drawString(50, 500, \"Heure de la visite :\")\r\n p.drawString(155, 500, str(intervention.heure_visite.hour) + \"h\" + str(intervention.heure_visite.minute))\r\n\r\n\r\n\r\n # Close the PDF object cleanly, and we're done.\r\n p.showPage()\r\n p.save()\r\n\r\n filename = \"Fiche Intervention N°\" + str(intervention.id) + \" .pdf\"\r\n # FileResponse sets the Content-Disposition header so that browsers\r\n # present the option to save the file.\r\n buffer.seek(0)\r\n return FileResponse(buffer, as_attachment=True, filename=filename)", "def generate_pdf(self, data, template, save_path):\n\n if save_path and not os.path.exists(save_path):\n file_path_array = save_path.split(\"/\")\n save_file_dir = file_path_array[:-1]\n\n if not os.path.exists(\"/\".join(save_file_dir)):\n os.makedirs(\"/\".join(save_file_dir))\n\n # 读取模板文件\n empty_template = preppy.getModule(template)\n\n # 渲染模板文件\n render_data = {'data': data, 'static': self.statics_dir}\n\n # 渲染PDF页面\n render_rml = empty_template.getOutput(render_data)\n\n # 生成PDF\n binary_pdf = trml2pdf.parseString(render_rml)\n\n if save_path:\n # 保存PDF\n open(save_path, 'wb').write(binary_pdf)\n\n return binary_pdf", "def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response", "def make_document(doc):\n\n # Use the model to predict an average representationof data, using the mean value of each predictor\n centred_predict = model.master_data_frame.mean().to_dict()\n _, final_im, shape, texture_im = model.predict(**centred_predict)\n\n # Call the image tidy function to change RGB texture to RGBA\n final_im = _image_tidy(final_im)\n texture = _image_tidy(texture_im)\n\n # Set up the plot for the shape coordinates ###############################\n # Define a column data source\n shape_source = ColumnDataSource( {'x': shape[:,0], 'y': shape[:,1]} )\n\n # Instantiate plot object for shape coordinates\n shape_plot = figure(title = 'Predicted Shape', y_range = (900, 0), x_range = (0, 900))\n shape_plot.cross('x', 'y', size = 10, source = shape_source)\n\n # Define hover tool and add to plot\n hover = HoverTool( tooltips = [('x', '@x'), ('y', '@y')] )\n shape_plot.add_tools(hover)\n ###########################################################################\n\n # Set up a column data source for the actual warped face ##################\n # Define a column data source\n warp_source = ColumnDataSource( {'image': [final_im]} )\n\n # Instantiate plot object for warped image - add a constant extra few pixels to make sure image is not squashed to window\n warp_image_plot = figure(title = 'Predicted Face', y_range = (0, model.image_dims[0]+150), x_range = (0, model.image_dims[1]+150))\n warp_image_plot.image_rgba(image = 'image', x=0, y=0, dw=model.image_dims[1], dh=model.image_dims[0], source=warp_source)\n\n # Set up a column data source for the texture-only face ###################\n # Define a column data source\n texture_source = ColumnDataSource( { 'image': [texture] } )\n\n # Instantiate plot object for shape-free face\n image_plot = figure(title = 'Predicted Texture', y_range = (0, model.image_dims[0]+150), x_range = (0, model.image_dims[1]+150) )\n image_plot.image_rgba( image = 'image', x=0, y=0, dw=model.image_dims[1], dh=model.image_dims[0], source=texture_source)\n ###########################################################################\n\n # Define the internal callback function to update objects interactively\n def callback(attr, old, new):\n \"\"\" Bokeh callback for updating glyphs\n \"\"\"\n\n # Iterate over the traits, get their title and their value and store in the dictionary\n predictor_dict = {}\n for slide in sliders:\n predictor_dict[ slide.title ] = slide.value\n\n # Use this dictionary to feed to the model's predict method, generating new ouput to show\n _, final_im, shape, texture = model.predict(**predictor_dict)\n\n # Fix the images for show\n final_im = _image_tidy(final_im)\n texture = _image_tidy(texture)\n\n # Update data sources with the new information\n shape_source.data = {'x':shape[:,0], 'y':shape[:,1]}\n warp_source.data = {'image':[final_im]}\n texture_source.data = {'image':[texture]}\n\n ###########################################################################\n # Set up sliders to alter properties\n sliders = []\n for trait in model.trait_list:\n\n # Get the middle and far end points by applying mean, min, and max, and rounding to zero\n avg, mini, maxi = model.master_data_frame[trait].apply(['mean', 'min', 'max']).round()\n\n slider = Slider(title = trait, start = mini, end = maxi, step = 1, value = avg)\n slider.on_change('value', callback)\n sliders.append(slider)\n\n ###########################################################################\n # Set layout according to specification of user, extract from dictionary\n layout_dict = {'combined':warp_image_plot, 'texture':image_plot, 'shape':shape_plot}\n\n layout = row([widgetbox(sliders), layout_dict[display]])\n\n # Update and add to curdoc\n doc.add_root(layout)", "def create_pdf(request, guide_number):\n template = 'pdf/guia_pdf.html'\n guide_obj = Guide.objects.get(number=guide_number)\n content_obj = Content.objects.filter(\n section__guide=guide_obj).order_by('section', 'peso')\n\n context = {\n 'guide_obj': guide_obj,\n 'content_obj': content_obj,\n }\n\n if request.GET.get('print'):\n pdf_name = 'guia-%s' % guide_number\n return render_to_pdf(request, pdf_name)\n else:\n return render_to_response(template, context,\n context_instance=RequestContext(request))", "def _create_submission(homework, course_user, pdf_file, group_members):\n reader = PyPDF2.PdfFileReader(pdf_file)\n page_count = reader.getNumPages()\n pdf_file.seek(0) # undo work of PyPDF2\n\n submission = models.Submission(assessment=homework, course_user=course_user,\n page_count=page_count, released=False, preview=False, last=True,\n time=timezone.now())\n submission.pdf.save('homework-pdf', files.File(pdf_file))\n\n submission.group_members.add(course_user)\n submission.save()\n\n if homework.groups_allowed:\n submission.group_members.add(*group_members)\n submission.save()\n\n return submission", "def generate():\n # Create the list of article from our data\n generator = GenerateLDA()\n generator.generateLDA()\n return jsonify({\"code\": 200, \"message\" : \"LDA model successfully created.\"})", "def hrm_training_event_report_pdf_export(r, **attr):\n\n from s3 import s3_fullname, s3_str\n\n record = r.record\n\n T = current.T\n db = current.db\n s3db = current.s3db\n\n current_language = T.accepted_language\n if current_language == \"es\":\n # Reach different translation\n title = s3_str(T(\"Training Event Report\"))\n else:\n title = s3_str(T(\"Training Report\"))\n\n if record.course_id:\n course_name = s3db.hrm_training_event.course_id.represent(record.course_id)\n title = \"%s: %s\" % (title, course_name)\n\n def callback(r):\n\n from gluon.html import DIV, TABLE, TD, TH, TR\n \n rtable = s3db.hrm_training_event_report\n\n date_represent = rtable.date.represent\n org_represent = s3db.org_OrganisationRepresent(parent = False,\n acronym = False)\n\n # Logo\n otable = db.org_organisation\n org_id = record.organisation_id\n org = db(otable.id == org_id).select(otable.name,\n otable.acronym, # Present for consistent cache key\n otable.logo,\n limitby=(0, 1),\n ).first()\n #if settings.get_L10n_translate_org_organisation():\n #org_name = org_represent(org_id)\n #else:\n # org_name = org.name\n\n logo = org.logo\n if logo:\n logo = s3db.org_organisation_logo(org)\n elif current.deployment_settings.get_org_branches():\n root_org = current.cache.ram(\n # Common key with auth.root_org\n \"root_org_%s\" % org_id,\n lambda: s3db.org_root_organisation(org_id),\n time_expire=120\n )\n logo = s3db.org_organisation_logo(root_org)\n\n # Read the report\n report = db(rtable.training_event_id == r.id).select(limitby = (0, 1),\n ).first()\n\n # Header\n header = TABLE(TR(TH(\"%s:\" % T(\"Name\")),\n TD(s3_fullname(report.person_id)),\n TH(\"%s:\" % T(\"Training Date\")),\n TD(date_represent(record.start_date)),\n ),\n TR(TH(\"%s:\" % T(\"Position\")),\n TD(rtable.job_title_id.represent(report.job_title_id)),\n TH(\"%s:\" % T(\"Finance Codes\")),\n TD(report.code),\n ),\n TR(TH(\"%s:\" % T(\"National Society Visited\")),\n TD(org_represent(report.organisation_id)),\n TH(\"%s:\" % T(\"Report Date\")),\n TD(date_represent(report.date)),\n ),\n TR(TH(\"%s:\" % T(\"Training Purpose\")),\n TD(report.purpose,\n _colspan = 3,\n ),\n ),\n )\n\n # Main\n main = TABLE(TR(TH(\"1. %s\" % T(\"Objectives\"))),\n TR(TD(report.objectives)),\n TR(TH(\"2. %s\" % T(\"Methodology\"))),\n TR(TD(report.methodology)),\n TR(TH(\"3. %s\" % T(\"Implemented Actions\"))),\n TR(TD(report.actions)),\n TR(TH(\"4. %s\" % T(\"About the participants\"))),\n TR(TD(report.participants)),\n TR(TH(\"5. %s\" % T(\"Results and Lessons Learned\"))),\n TR(TD(report.results)),\n TR(TH(\"6. %s\" % T(\"Follow-up Required\"))),\n TR(TD(report.followup)),\n TR(TH(\"7. %s\" % T(\"Additional relevant information\"))),\n TR(TD(report.additional)),\n TR(TH(\"8. %s\" % T(\"General Comments\"))),\n TR(TD(report.comments)),\n )\n\n output = DIV(TABLE(TR(TD(logo),\n #TD(org_name), # This isn't rtl-proof, check vol_service_record for how to handle that if-required\n )),\n TABLE(TR(TD(title))),\n TABLE(header),\n TABLE(main),\n )\n\n return output\n\n attr[\"rheader\"] = None\n\n from s3.s3export import S3Exporter\n\n exporter = S3Exporter().pdf\n pdf_title = title\n return exporter(r.resource,\n request = r,\n method = \"list\",\n pdf_title = pdf_title,\n pdf_table_autogrow = \"B\",\n pdf_callback = callback,\n **attr\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View for employee development plan details
def development_plan_details(request, development_plan_id): #, employee_id ): # employee = Employee.objects.get(user__pk=request.user.pk) # employee = Employee.objects.filter(pk=int(employee_id)).first() development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id)) current_employee = Employee.objects.filter(user__pk=request.user.pk).first() all_employees = development_plan.employee_relation.all() try: development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id)) data={} development_plan_object_list=[] dev_plan={} dev_plan["id"] = development_plan.id dev_plan["deleted"] = development_plan.deleted if development_plan.type: dev_plan["type"] = development_plan.type.name # dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\ # .finished_at dev_plan["created_at"] = development_plan.created_at dev_plan["created_by"] = development_plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) # manager_relation manager_data={} manager_data["manager_username"] = development_plan.manager_relation.user.username manager_data["manager_first_name"] = development_plan.manager_relation.user.first_name manager_data["manager_last_name"] = development_plan.manager_relation.user.last_name development_plan_object_list.append({"manager_data":manager_data}) # employee_relation employee_data={} all_employees = development_plan.employee_relation.all() if all_employees: emp_list=[] for emp in all_employees: emp_data={} emp_data["id"] = emp.user.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["status_questions"] = emp.status_questions emp_data["dev_plan_finished_at"] = DevelopmentPlanToEmployeeRelation\ .objects.get(employee=emp, development_plan = development_plan)\ .finished_at employee_role = EmployeeRole.objects.filter(employee=emp).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) emp_data["roles"] = name_role_list emp_list.append(emp_data) employee_data={"all_employees":emp_list} else: return JsonResponse(data={"details":"Any employee has Development Plan with id={}" .format(development_plan.id)}, status=404) development_plan_object_list.append({"employee_data":employee_data}) # competence_parts all_competence_parts = development_plan.competence_parts.all() competence_list = [] questions_list = [] sliders_list = [] if all_competence_parts: for comp_part in all_competence_parts: comp_part_data={} competence_d={"competence_parts": []} comp_part_data["id"] = comp_part.id comp_part_data["title"] = comp_part.title comp_part_data["description"] = comp_part.description comp_part_data["competence_status"] = comp_part.competence_status all_questions = comp_part.question_set.all() if all_questions: for question in all_questions: question_data = {} question_data["question_id"] = question.id question_data["title"] = question.title question_data["competence_part"] = question.competence_part.id answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee if answer: question_data["answer_id"] = answer.id question_data["answer"] = answer.title questions_list.append(question_data) comp_part_data["questions"] = questions_list all_sliders = comp_part.slider_set.all() if all_sliders: for slider in all_sliders: slider_data = {} slider_data["slider_id"] = slider.id slider_data["scale"] = slider.scale slider_data["competence_part"] = slider.competence_part.id answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee if slider: slider_data["answer_id"] = answer.id slider_data["answer"] = answer.slider.scale sliders_list.append(slider_data) comp_part_data["sliders"] = sliders_list comp_part_data["created_at"] = comp_part.created_at comp_part_data["created_by"] = comp_part.created_by.username comp_part_data["updated_at"] = comp_part.updated_at comp_part_data["updated_by"] = comp_part.updated_by.username competence_keys_list = ['id', 'title', 'description', 'language_code', 'status'] if not competence_list: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) else: competence_found = False for competence_dict in competence_list: if competence_dict['id'] == comp_part.competence.id: competence_dict['competence_parts'].append(comp_part_data) competence_found = True break if not competence_found: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) development_plan_object_list.append({"competences":competence_list}) else: return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet" .format(development_plan.id)}, status=404) data = {"dev_plan:": development_plan_object_list} return JsonResponse(status=201, data=data) except DevelopmentPlan.DoesNotExist: return JsonResponse(data={"details":"Development Plan with this id doesn't exist"}, status=404)
[ "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def Department_Detail_View(request, pk):\n department = get_object_or_404(Department, pk=pk)\n employees = Employee.objects.all()\n return render(request, 'WorkforceManagement/Department_Detail.html', {'department': department, 'employees': employees})", "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def plan_detail(request, pk):\n if request.method == 'GET':\n try:\n plan = DietPlan.objects.get(pk=pk)\n except ObjectDoesNotExist:\n return HttpResponse(status=404)\n serializer = DietPlanSerializer(plan)\n return Response(serializer.data)", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def show_project_details():\n\n title = request.args.get('title')\n\n title, description, max_grade = hackbright.get_project_by_title(title)\n\n student_grades = hackbright.get_grades_by_title(title)\n\n return(render_template(\"project_details.html\", title=title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades))", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def show_investments():\n logging.debug(\"Show investments\")\n investments = API.get_investments()\n keys = [\"Rating\", \"UserName\", \"Country\", \"PurchasePrice\",\n \"PrincipalRepaid\", \"Interest\", \"PurchaseDate\"]\n print_table(keys, investments)", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def plan_list(request):\n if request.method == 'GET':\n try:\n plans = DietPlan.objects.filter(owner=request.user)\n except TypeError as e:\n print(e.msg)\n return HttpResponse(status=403)\n serializer = DietPlanSerializer(plans, many=True)\n return Response(serializer.data)", "def getTestPlanByName(self, name, projectname, devkey=None):\n return self._query(\"tl.getTestPlanByName\",\\\n devKey=devkey,\\\n testplanname=name,\\\n testprojectname=projectname)", "def Department_List_View(request):\n departments = Department.objects.all()\n return render(request, 'WorkforceManagement/Department_list.html', {'departments': departments})", "def detail():\n id = request.args.get(\"id\")\n projectDetail = session.query(Projects).filter(Projects.id == id).all()\n return render_template(\"detail.html\", projectDetail=projectDetail)", "def showFloorplan(floorplan_id):\n\tfloorplans = session.query(Floorplan).filter_by(id=floorplan_id).all()\n\tunits = session.query(Unit).filter_by(floorplan_id=floorplan_id).all()\n\treturn render_template('floorplan.html', floorplans = floorplans, units = units)", "def list_plans():\n click.echo(PaymentPlan.list())", "def plan(self) -> str:\n return pulumi.get(self, \"plan\")", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a list of user's development plans for manager
def get_all_user_development_plans_for_manager(request, employee_id): current_employee = Employee.objects.get(user__pk=request.user.pk) user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all() employee = Employee.objects.filter(pk=int(employee_id)).first() if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if not current_employee.isEnsoUser() and current_employee.is_manager: raise PermissionDenied() actions = employee.action_set.all() if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]: raise PermissionDenied("Employee with id={} is not assigned to you.".format(employee_id), 401) if user_development_plans: data={} user_development_plans_list = [] for plan in user_development_plans: development_plan_object_list=[] dev_plan = {} dev_plan["id"] = plan.id dev_plan["deleted"] = plan.deleted if plan.type: dev_plan["type"] = plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = plan).finished_at dev_plan["created_at"] = plan.created_at dev_plan["created_by"] = plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) manager_data = {} manager_data["manager_username"] = plan.manager_relation.user.username manager_data["id"] = plan.manager_relation.user.id development_plan_object_list.append({"manager_data":manager_data}) user_development_plans_list.append(development_plan_object_list) else: return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan" .format(request.user.pk)}, status=404) data = {"user_development_plans:": user_development_plans_list} return JsonResponse(status=201, data=data)
[ "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plan_list_manage(request, username):\n user = User.objects.get(username=username)\n plans = Plan.objects.filter(user=user)\n return render(request, 'nutrition/plan_list_manage.html', {'plans': plans})", "def plan_list(request):\n if request.method == 'GET':\n try:\n plans = DietPlan.objects.filter(owner=request.user)\n except TypeError as e:\n print(e.msg)\n return HttpResponse(status=403)\n serializer = DietPlanSerializer(plans, many=True)\n return Response(serializer.data)", "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def list_plans():\n click.echo(PaymentPlan.list())", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def developer_list(request):\n if request.method == 'GET':\n developers = Developer.objects.all()\n serializer = DeveloperSerializer(developers, many=True)\n\n return Response(serializer.data)", "def test_provider_project_development_list(self):\n pass", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def show_medical_plans(user, age=None, smoker=None, child=None):\n\n url = 'https://api.vericred.com/plans/search'\n\n payload = {'zip_code': user.zip_code,\n 'fips_code': user.fips_code, \n 'market': user.market,\n 'applicants': [\n {'age': age,\n 'child': child,\n 'smoker': smoker\n }\n ],\n 'sort': 'premium:asc'}\n\n if (not age or not smoker or not child):\n del payload['applicants']\n payload['sort'] = 'level:asc'\n\n req = requests.post(url, json=payload, headers=HEADERS)\n all_plans = req.json()\n\n all_extracted_plans = []\n\n plans_exist = all_plans.get('plans')\n\n if plans_exist:\n for plan in all_plans['plans']:\n\n extracted_plan_data = parse_med_plans(plan)\n all_extracted_plans.append(extracted_plan_data)\n\n return all_extracted_plans", "def plans(self):\r\n return pl.Plans(self)", "def list(cls):\n return cls().requests.get('plan')", "def test_access_to_adviser_project_list_for_admin(self):\n url = reverse('projects-list')\n self.client.logout()\n self.client.login(username=\"test3@test.com\", password=\"password\")\n response = self.client.get(url)\n self.assertEqual(response.content,\n '{\"count\":2,\"next\":null,\"previous\":null,\"results\":[{\"id\":1,\"id_company\":1,'\n '\"name\":\"TestProject\",\"description\":\"Test description\",\"project_template\":null},'\n '{\"id\":2,\"id_company\":2,\"name\":\"TestProject 2\",\"description\":\"Test description\",'\n '\"project_template\":null}]}')", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def index():\n # users = User.query.filter_by(_role_code=UserRole.tefl_pending.value)\n # calls = CallLog.query.filter_by(flagged=True)\n # return render_template('admin/index.html',\n # users=users,\n # active='users',\n # calls=calls)\n users = User.query.all()\n return render_template('admin/users.html',\n users=users,\n active='users',\n title='Users',\n countries=COUNTRY_CODES)", "def test_provider_project_development_link_list(self):\n pass", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View a list of development plans for active user
def get_all_development_plans_for_user(request): current_employee = Employee.objects.get(user__pk=request.user.pk) user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all() if not current_employee: raise PermissionDenied("You don't have any employee assigned to you.", 401) if user_development_plans: data={} user_development_plans_list = [] for plan in user_development_plans: development_plan_object_list=[] dev_plan = {} dev_plan["id"] = plan.id dev_plan["deleted"] = plan.deleted if plan.type: dev_plan["type"] = plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = plan).finished_at dev_plan["created_at"] = plan.created_at dev_plan["created_by"] = plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) manager_data = {} manager_data["manager_username"] = plan.manager_relation.user.username manager_data["id"] = plan.manager_relation.user.id development_plan_object_list.append({"manager_data":manager_data}) user_development_plans_list.append(development_plan_object_list) else: return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan" .format(request.user.pk)}, status=404) data = {"user_development_plans:": user_development_plans_list} return JsonResponse(status=201, data=data)
[ "def plan_list(request):\n if request.method == 'GET':\n try:\n plans = DietPlan.objects.filter(owner=request.user)\n except TypeError as e:\n print(e.msg)\n return HttpResponse(status=403)\n serializer = DietPlanSerializer(plans, many=True)\n return Response(serializer.data)", "def list_plans():\n click.echo(PaymentPlan.list())", "def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)", "def plan_list_manage(request, username):\n user = User.objects.get(username=username)\n plans = Plan.objects.filter(user=user)\n return render(request, 'nutrition/plan_list_manage.html', {'plans': plans})", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def list(cls):\n return cls().requests.get('plan')", "def developer_list(request):\n if request.method == 'GET':\n developers = Developer.objects.all()\n serializer = DeveloperSerializer(developers, many=True)\n\n return Response(serializer.data)", "def plans(self):\r\n return pl.Plans(self)", "def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection", "def test_provider_project_development_list(self):\n pass", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def get_plans(self):\n return stripe.Plan.all()", "def test_access_to_adviser_project_list_for_admin(self):\n url = reverse('projects-list')\n self.client.logout()\n self.client.login(username=\"test3@test.com\", password=\"password\")\n response = self.client.get(url)\n self.assertEqual(response.content,\n '{\"count\":2,\"next\":null,\"previous\":null,\"results\":[{\"id\":1,\"id_company\":1,'\n '\"name\":\"TestProject\",\"description\":\"Test description\",\"project_template\":null},'\n '{\"id\":2,\"id_company\":2,\"name\":\"TestProject 2\",\"description\":\"Test description\",'\n '\"project_template\":null}]}')", "def plans(self) -> Sequence['outputs.PlanNotificationDetailsResponse']:\n return pulumi.get(self, \"plans\")", "def plan_detail(request, pk):\n if request.method == 'GET':\n try:\n plan = DietPlan.objects.get(pk=pk)\n except ObjectDoesNotExist:\n return HttpResponse(status=404)\n serializer = DietPlanSerializer(plan)\n return Response(serializer.data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View active development plan for active user
def get_active_development_plan_for_user(request): current_employee = Employee.objects.get(user__pk=request.user.pk) current_development_plan = DevelopmentPlan.objects.filter( employee_relation=current_employee, employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!! if not current_employee: raise PermissionDenied() if current_development_plan: data={} development_plan_object_list=[] dev_plan={} dev_plan["id"] = current_development_plan.id dev_plan["deleted"] = current_development_plan.deleted if current_development_plan.type: dev_plan["type"] = current_development_plan.type.name dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\ .get(employee=current_employee, development_plan = current_development_plan)\ .finished_at dev_plan["created_at"] = current_development_plan.created_at dev_plan["created_by"] = current_development_plan.created_by.username development_plan_object_list.append({"dev_plan_details":dev_plan}) # manager_relation manager_data={} manager_data["manager_username"] = current_development_plan.manager_relation.user.username manager_data["manager_first_name"] = current_development_plan.manager_relation.user.first_name manager_data["manager_last_name"] = current_development_plan.manager_relation.user.last_name development_plan_object_list.append({"manager_data":manager_data}) # employee_relation employee_data={} all_employees = current_development_plan.employee_relation.all() if all_employees: emp_list=[] for emp in all_employees: emp_data={} emp_data["id"] = emp.user.id emp_data["username"] = emp.user.username emp_data["first_name"] = emp.user.first_name emp_data["last_name"] = emp.user.last_name emp_data["status_questions"] = emp.status_questions employee_role = EmployeeRole.objects.filter(employee=emp).all() name_role_list = [] for obj in employee_role: name_role_list.append(obj.role.name) emp_data["roles"] = name_role_list emp_list.append(emp_data) employee_data={"all_employees":emp_list} else: return JsonResponse(data={"details":"Any employee has Development Plan with id={}" .format(current_development_plan.id)}, status=404) development_plan_object_list.append({"employee_data":employee_data}) # competence_parts all_competence_parts = current_development_plan.competence_parts.all() competence_list = [] questions_list = [] sliders_list = [] if all_competence_parts: for comp_part in all_competence_parts: comp_part_data={} competence_d={"competence_parts": []} comp_part_data["id"] = comp_part.id comp_part_data["title"] = comp_part.title comp_part_data["description"] = comp_part.description comp_part_data["competence_status"] = comp_part.competence_status all_questions = comp_part.question_set.all() print all_questions if all_questions: for question in all_questions: question_data = {} question_data["question_id"] = question.id question_data["title"] = question.title question_data["competence_part"] = question.competence_part.id answer = Answer.objects.filter(question__id = question.id, employee=current_employee).first() if answer: question_data["answer_id"] = answer.id question_data["answer"] = answer.title questions_list.append(question_data) comp_part_data["questions"] = questions_list all_sliders = comp_part.slider_set.all() if all_sliders: for slider in all_sliders: slider_data = {} slider_data["slider_id"] = slider.id slider_data["scale"] = slider.scale slider_data["competence_part"] = slider.competence_part.id answer = Answer.objects.filter(slider__id = slider.id, employee=current_employee).first() if slider: slider_data["answer_id"] = answer.id slider_data["answer"] = answer.slider.scale sliders_list.append(slider_data) comp_part_data["sliders"] = sliders_list comp_part_data["created_at"] = comp_part.created_at comp_part_data["created_by"] = comp_part.created_by.username comp_part_data["updated_at"] = comp_part.updated_at comp_part_data["updated_by"] = comp_part.updated_by.username competence_keys_list = ['id', 'title', 'description', 'language_code', 'status'] if not competence_list: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) else: competence_found = False for competence_dict in competence_list: if competence_dict['id'] == comp_part.competence.id: competence_dict['competence_parts'].append(comp_part_data) competence_found = True break if not competence_found: get_competence_data(competence_keys_list, comp_part.competence, competence_d, comp_part_data, competence_list) development_plan_object_list.append({"competences":competence_list}) else: return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet" .format(current_development_plan.id)}, status=404) data = {"dev_plan:": development_plan_object_list} return JsonResponse(status=201, data=data) else: return JsonResponse(data={"details": "The user with id={} doesn't have an active Development Plan" .format(current_employee.user.id)}, status=404)
[ "def plan_list_manage(request, username):\n user = User.objects.get(username=username)\n plans = Plan.objects.filter(user=user)\n return render(request, 'nutrition/plan_list_manage.html', {'plans': plans})", "def list_plans():\n click.echo(PaymentPlan.list())", "def current_plan(self):\n # return \"plan za ovaj mesec\"\n mp = MonthPlan.objects.get(group=self, month=datetime.now().month, year=datetime.now().year)\n return MonthTrainingPlan.objects.get(month_plan=mp)\n # return MonthTrainingPlan.objects.filter(group=self)", "def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def plan(self) -> str:\n return pulumi.get(self, \"plan\")", "def plan_list(request):\n if request.method == 'GET':\n try:\n plans = DietPlan.objects.filter(owner=request.user)\n except TypeError as e:\n print(e.msg)\n return HttpResponse(status=403)\n serializer = DietPlanSerializer(plans, many=True)\n return Response(serializer.data)", "def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)", "def plan(self):\n return self._plan", "def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)", "def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)", "def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text", "def user_project_view(cls, user, project):\r\n pass", "def plan(self):\n return self._result.plan()", "def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)", "def plan(self) -> global___Plan:", "def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)", "def show_user_access(self):\n self.test_runner.run_user_access_show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get or Update goal by id
def self_goal_by_id(request, goal_id): current_user = request.user fields_map = { 'goal_answers': lambda g: [ { 'id': answ.id, 'title': answ.title, "created_by": answ.created_by.username, "created_at": answ.created_at, "file": answ.file.url } for answ in g.goal_answers.all() ] } fields = ['title', 'goal_answers', 'id', 'is_achieved'] goal = Goal.objects.get(pk=goal_id) if request.method == 'POST': if goal.created_by != current_user: raise PermissionDenied("You can edit only your own goals") f = GoalForm(data=request.json_body) if not f.is_valid(): return JsonResponse(data={"detail": json.loads(f.errors.as_json())}, status=400) goal = f.save(current_user, goal) return JsonResponse( data={f: fields_map[f](goal) if f in fields_map else getattr(goal, f) for f in fields}, status=200 )
[ "def update_goal(goal_id):\n if goal_id == None:\n return make_response(404)\n\n else:\n goal = get_goal_from_id(goal_id)\n request_body = request.get_json()\n\n if \"title\" in request_body:\n goal.title = request_body[\"title\"]\n\n goal_response = goal.to_dict()\n\n db.session.commit()\n return jsonify({\"goal\": goal_response}), 200", "def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)", "def goal(self, goal_id):\r\n return Goal(self, goal_id)", "def get_goal(goal_id):\n goal = get_goal_from_id(goal_id)\n\n return jsonify({\"goal\": goal.to_dict()}), 200", "def get_goal(id, check_author=True):\n goal = (\n get_db()\n .execute(\n \"SELECT g.id AS id, title, created, author_id, username\"\n \" FROM goals g JOIN user u ON g.author_id = u.id\"\n \" WHERE g.id = ?\",\n (id,),\n )\n .fetchone()\n )\n\n if goal is None:\n abort(404, \"Gost {0} doesn't exist.\".format(id))\n\n if check_author and goal[\"author_id\"] != g.user[\"id\"]:\n abort(403)\n\n return goal", "def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)", "def updateOne(id):\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to Find the given client'}\n\n # Update the URL\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n update(Followup).\n where(Followup.columns.id == id).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Update the given client'}\n return {'status': \"Update Succesful\"}", "def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()", "def sync_goal(self, goal: str) -> Goal:\n # Handle a double sync\n if goal in self.synced:\n return self.synced[goal]\n # Handle the case where the goal is not configured\n try:\n goal_obj = self.goals[goal]\n except KeyError:\n goal_obj = None\n logging.getLogger(__name__).error(\n \"Configuration error: Referenced goal '%s' does not exist.\",\n goal,\n )\n exit(-1)\n # Update the goal value\n value = self.get_value(goal_obj)\n\n if goal_obj[\"source\"] != \"asana\":\n upd = self.asana.set_metric_current_value(goal_obj[\"goal_id\"], value)\n logging.getLogger(__name__).info(\n \"Updating goal '%s' value to '%s'\",\n goal, value,\n )\n else:\n upd = self.asana.get_metric_current_value(goal_obj[\"goal_id\"])\n\n upd = self.asana.update_goal_status(goal_obj[\"goal_id\"], upd.assess_status())\n # Remember we just synced this Goal\n self.synced[goal] = upd\n return upd", "def delete(id):\n get_goal(id)\n db = get_db()\n db.execute(\"DELETE FROM goals WHERE id = ?\", (id,))\n db.commit()\n\n return redirect(url_for(\"goals.goals\"))", "def put(self, id):\n data = request.json\n update_another_entity(id, data)\n return None, 204", "def put(self, id):\n\n adm = Administration()\n learningprofilegroup = LearningProfileGroup.from_dict(api.payload)\n print('main aufruf')\n\n if learningprofilegroup is not None:\n \"\"\"Hierdurch wird die id des zu überschreibenden (vgl. Update) Person-Objekts gesetzt.\"\"\"\n\n learningprofilegroup.set_id(id)\n adm.save_learningprofile_group(learningprofilegroup)\n return '', 200\n\n else:\n return '', 500", "def update_ship(id):\n data = request.get_json()\n print(data)\n for ship in db['ships']:\n if ship['id'] == id:\n if data['name']:\n ship['name'] == data['name']\n if data['age']:\n ship['age'] == data['age']\n return ship, status.HTTP_202_ACCEPTED\n return {}, status.HTTP_404_NOT_FOUND", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def put(self, id):\n data = request.json\n update_scenario(id, data)\n return None, 204", "def put(self,id):\n adm = Administration()\n s = Suggestion.from_dict(api.payload)\n if s is not None:\n s.set_id(id)\n adm.save_suggestion(s)\n return s, 200\n\n else:\n return '', 500", "def update_checkout_with_id(id):\n checkout = Checkout.query.get(id)\n\n score = request.json['score']\n combo = request.json['combo']\n\n checkout.score = score\n checkout.combo = combo\n\n db.session.commit()\n\n return checkout_schema.jsonify(checkout)", "def test_update_goal(self):\n pass", "def update_drone_id(id_):\r\n # GET current drone object\r\n drone = get_drone()\r\n # Update the drone id\r\n drone[\"DroneID\"] = id_\r\n\r\n # Update drone object\r\n update_drone(drone)\r\n print(\"DroneID updated successfully\", id_)\r\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function takes a csv file as an argument deduplicates the file and writes the deduplicated dataset to a csv file if a path for the output file is provided as the second argument It returns the deduplicated dataframe Parameters , type, return values
def dataDedup_csv(infile, outfile=None): if fpath.isfile(infile): dataset = pd.read_csv(infile, sep=',', dtype='unicode') dedup_dataset = dataset.drop_duplicates() if outfile!=None: dedup_dataset.to_csv(outfile, encoding='utf-8', index=False, header=False) return dedup_dataset else: print("file \"%s\" does not exist... or is not a file..." %(infile))
[ "def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_False=np.where(dup==False)\n\t\n no_dup=data.loc[dup_False]\n\n len_no_dup=no_dup.shape[0]\n len_dup_False_indx=len(dup_False[0])\n\n try:\n assert len_no_dup == len_dup_False_indx\n except AssertionError:\n print(\"Removal of duplicates and creation of new output failed.\")\n print(\"Length of no duplicated indices does not match the subsampled main dataframe... function failiure :(\")\n\n\t\n if header_rows !=0: \n frames = [header, no_dup]\n no_dup = pd.concat(frames)\n\n if sep_type==\"\":\n no_dup.to_csv(out_file, sep=\"\\t\", header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))\n else:\n no_dup.to_csv(out_file, sep=sep_type, header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))", "def check_duplicates(in_file, sep_type=\"\", header_rows=0):\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_True=np.where(dup==True)\n len_dup_True_indx=len(dup_True[0])\n\n if len_dup_True_indx == 0:\n print(\"No duplicated rows in %s\" %(in_file))\n else:\n print(\"%i duplicated rows found in %s\" %(len_dup_True_indx, in_file))", "def remove_duplicated_lines():\n\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n unique_lines = []\n # compare line be line\n with open(os.path.join(work_folder, \"tempfile.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\")) as infile:\n for line in infile:\n if line not in unique_lines:\n outfile.write(line)\n unique_lines.append(line)\n # replace files\n shutil.copyfile(os.path.join(work_folder, 'tempfile.csv'), os.path.join(\n work_folder, \"filtered_merged_history_KMDW.csv\"))\n # remove temp file\n os.remove(os.path.join(work_folder, \"tempfile.csv\"))", "def create_unique_file(files_to_concat: list) -> pd.DataFrame:\n dfs_to_concat = []\n\n print(f'Number of files: {len(files_to_concat)}')\n\n for file in files_to_concat:\n\n year = int(file[0])\n month = file[1]\n filepath = file[2]\n\n # Use pd.read_csv to solve some problems with files\n # engine: python - This parameter is slower compared to c-engine but handle but handle\n # some problematic characters better\n # sep=\"[\\t;]\" - using python-engine it's possible to use regular expressions to define the sep char, where\n # python identify the char to use with each file.\n # skiprows = 1 - As the columns have different names in many files, I just combine header=None with skiprows=1\n # with this, just data is read.\n actual_df = pd.read_csv(filepath, engine='python', sep=\"[\\t;]\", skiprows=1, header=None, dtype='category')\n\n # File 2017-Dezembro.csv has duplicate columns so an if is necessary here just to solve this problem.\n if month == 'Dezembro' and year == 2017:\n\n del(actual_df[7])\n actual_df.columns = [n for n in range(12)]\n\n # Creating two new columns with month and year for each file.\n actual_df['month'], actual_df['year'] = zip(*[(month, year) for n in range(len(actual_df))])\n\n print(f'Processing file: {filepath}')\n\n dfs_to_concat.append(actual_df)\n\n # Concat all files into unique_df\n unique_df = pd.concat(dfs_to_concat, axis=0, ignore_index=True)\n\n return unique_df", "def list_all_duplicates(folder: str,\n to_csv: bool = False,\n csv_path: str = './',\n ext: str = None,\n fastscan: bool = False) -> pd.DataFrame:\n duplicate_files = create_table(folder, ext, pre=fastscan)\n duplicate_files = duplicate_files[duplicate_files['hash'].duplicated(keep=False)]\n duplicate_files.sort_values(by='hash', inplace=True)\n\n if to_csv is True:\n save_csv(csv_path, duplicate_files)\n\n return duplicate_files", "def csv_writer(data, path):\n if os.path.exists(path):\n employee_list = read_employee_data(path)\n unique_key= [e.name+str(e.phone) for e in employee_list]\n with open(path, \"a\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n if (line[0]+line[4] in unique_key):\n print(\"Duplicate employee\")\n continue\n else:\n writer.writerow(line)\n else:\n with open(path, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)\n print(\"Wrting Done\")", "def check_errors(csv_file):\n\n logger.info(\"Checking %s.\", csv_file)\n\n errors_found = False\n errors_file = f\"{os.path.splitext(csv_file)[0]}_errors.csv\"\n deduplicated_file = f\"{os.path.splitext(csv_file)[0]}_deduplicated.csv\"\n\n with open(csv_file, 'r', encoding=\"UTF-8\") as input_file,\\\n open(deduplicated_file, 'w', encoding=\"UTF-8\") as dedup,\\\n open(errors_file, 'w', encoding=\"UTF-8\") as errors:\n\n reader = csv.reader(input_file, delimiter=',')\n dedup_writer = csv.writer(dedup)\n error_writer = csv.writer(errors)\n line = 1\n entries = set()\n for row in reader:\n\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n # Record any incorrect classifications.\n if not row[1].lower() == \"normal\" and not row[1].lower() == \"anomaly\":\n error_writer.writerow(\n [line, row[0], row[1], \"INVALID_CLASSIFICATION\"])\n errors_found = True\n\n # Write first image entry to dedup file and record duplicates.\n key = row[0]\n if key not in entries:\n dedup_writer.writerow(row)\n entries.add(key)\n else:\n error_writer.writerow([line, row[0], row[1], \"DUPLICATE\"])\n errors_found = True\n line += 1\n\n if errors_found:\n logger.info(\"Errors found check %s.\", errors_file)\n else:\n os.remove(errors_file)\n os.remove(deduplicated_file)\n\n return errors_found", "def make_clean_csv(panda_df, dest_path_name):\n panda_df.to_csv(dest_path_name)\n return True", "def check_duplicated_data(self, path, target):\n files_in_path = [file for file in self.get_csv_in_path(path)]\n print(\"check duplicated for file {} in path {} , files\".format(target, path))\n if target in files_in_path:\n print('The {} is already exist'.format(target))\n return True\n return False", "def dedup_file(in_fname, out_fname):\n with open(in_fname, 'r') as in_file, open(out_fname, 'w') as out_file:\n lines, n_lines, n_duplicates = get_lines(in_file)\n lines = list(lines)\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'deduplicated {in_fname}, removed {n_duplicates} duplicates out of {n_lines} lines')\n return n_lines, n_duplicates", "def compareDuplicates(self):\n\n if self._collHpssDuplicates.count() == 0:\n return\n\n with open(\"toBeDeleted.txt\", \"w\") as toBeDeleted:\n\n for duplicate in self._collHpssDuplicates.find({}):\n if duplicate['isInTarFile'] == True:\n continue\n\n orig = self._collHpssPicoDsts.find_one({'filePath': duplicate['filePath']})\n\n if orig['fileSize'] != duplicate['fileSize']:\n print('Is NOT equal: orig {0} - duplicate {1} : {2}'.format(orig['fileSize'],duplicate['fileSize'], duplicate['filePath']))\n# print(duplicate['fileFullPath'], file=toBeDeleted)\n continue\n\n print(duplicate['fileFullPath'], file=toBeDeleted)\n self._collHpssDuplicates.delete_one({'_id': duplicate['_id']})", "def _check_duplicate_id_csv(self):\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True", "def dedup_and_count(sam_file, dedup_file):\n umis = set()\n barcode = \"\"\n count = 0\n with open(sam_file, \"r\") as fp:\n with gzip.open(dedup_file, \"w\") as out:\n\n for line in fp:\n\n # don't read header\n if line.startswith(\"@\"):\n continue\n\n # read in important details\n fields = line.split(\"\\t\")\n line_barcode = fields[2]\n umi = fields[0].split(\":\")[-1]\n \n # check if current barcode or not\n if line_barcode != barcode:\n # save out latest\n if barcode != \"\":\n result = \"{}\\t{}\\n\".format(barcode, count)\n out.write(result)\n \n # and reset variables\n umis = set([umi])\n barcode = line_barcode\n count = 1\n else:\n # check if umi present or not\n if umi in umis:\n continue\n # add to count\n umis.add(umi)\n count += 1\n \n return", "def clean_data(df):\n df.drop_duplicates(inplace=True)\n return df", "def compress_dups(data, column):\n idx = defaultdict(list)\n for row in data:\n idx[row[column]].append(row)\n\n dedup = []\n\n for idx_row in sorted(idx.items()):\n dedup.append(avg_rows(idx_row[1]))\n return dedup", "def RemoveDuplicates():\n \n Infile = OptionsInfo[\"Infile\"]\n Outfile = OptionsInfo[\"Outfile\"]\n DuplicatesOutfile = OptionsInfo[\"DuplicatesOutfile\"]\n \n CountMode = OptionsInfo[\"CountMode\"]\n UseChirality = OptionsInfo[\"UseChirality\"]\n \n # Setup a molecule reader...\n MiscUtil.PrintInfo(\"\\nProcessing file %s...\" % Infile)\n Mols = RDKitUtil.ReadMolecules(Infile, **OptionsInfo[\"InfileParams\"])\n \n # Set up a molecule writer...\n Writer = None\n DuplicatesWriter = None\n if not CountMode:\n Writer = RDKitUtil.MoleculesWriter(Outfile, **OptionsInfo[\"OutfileParams\"])\n if Writer is None:\n MiscUtil.PrintError(\"Failed to setup a writer for output fie %s \" % Outfile)\n DuplicatesWriter = RDKitUtil.MoleculesWriter(DuplicatesOutfile, **OptionsInfo[\"OutfileParams\"])\n if DuplicatesWriter is None:\n MiscUtil.PrintError(\"Failed to setup a writer for output fie %s \" % DuplicatesOutfile)\n \n MiscUtil.PrintInfo(\"Generating files %s and %s...\" % (Outfile, DuplicatesOutfile))\n\n # Process molecules...\n MolCount = 0\n ValidMolCount = 0\n \n UniqueMolCount = 0\n DuplicateMolCount = 0\n \n CanonicalSMILESMap = {}\n Compute2DCoords = OptionsInfo[\"OutfileParams\"][\"Compute2DCoords\"]\n\n for Mol in Mols:\n MolCount += 1\n \n if Mol is None:\n continue\n \n if RDKitUtil.IsMolEmpty(Mol):\n MolName = RDKitUtil.GetMolName(Mol, MolCount)\n MiscUtil.PrintWarning(\"Ignoring empty molecule: %s\" % MolName)\n continue\n \n ValidMolCount += 1\n \n CanonicalSMILES = Chem.MolToSmiles(Mol, isomericSmiles = UseChirality, canonical = True)\n \n if Compute2DCoords:\n if not CountMode:\n AllChem.Compute2DCoords(Mol)\n \n if CanonicalSMILES in CanonicalSMILESMap:\n DuplicateMolCount += 1\n if not CountMode:\n DuplicatesWriter.write(Mol)\n else:\n UniqueMolCount += 1\n CanonicalSMILESMap[CanonicalSMILES] = CanonicalSMILES\n if not CountMode:\n Writer.write(Mol)\n \n if Writer is not None:\n Writer.close()\n \n if DuplicatesWriter is not None:\n DuplicatesWriter.close()\n \n MiscUtil.PrintInfo(\"\\nTotal number of molecules: %d\" % MolCount)\n MiscUtil.PrintInfo(\"Number of valid molecules: %d\" % ValidMolCount)\n MiscUtil.PrintInfo(\"Number of ignored molecules: %d\" % (MolCount - ValidMolCount))\n\n MiscUtil.PrintInfo(\"\\nTotal number of unique molecules: %d\" % UniqueMolCount)\n MiscUtil.PrintInfo(\"Total number of duplicate molecules: %d\" % DuplicateMolCount)", "def load_combine_and_dedupe_from_csv_files(\n self,\n df,\n list_of_csv_files_or_folders_and_corresponding_configs\n ):\n if not isinstance(list_of_csv_files_or_folders_and_corresponding_configs, list):\n raise transform_errors.InputDataTypeError(\n f\"list_of_csv_files_or_folders_and_corresponding_configs must be of list type.\")\n\n df = pd.DataFrame()\n print(f\"\\nData in the following CSV files will be combined using corresponding configs:\")\n for f_c in self._get_list_of_csv_files_and_configs_to_process(\n list_of_csv_files_or_folders_and_corresponding_configs):\n print(f_c)\n cur_df = pd.read_csv(f_c[0],\n delimiter=f_c[1]['delimiter'],\n encoding=f_c[1]['encoding'],\n quoting=f_c[1]['quoting'])\n df = pd.concat([df, cur_df])\n\n return df.drop_duplicates(ignore_index=True)", "def remove_dup_trial():\n df_temp = load_data()\n df_temp = df_temp.reset_index()\n\n ## expanded unique drugs to get a dataframe we can group by\n # logic is to append a duplicate row, change its values, do\n # this for all matching rows, then remove all the original rows.\n dupes = []\n for idx, item_list in enumerate(df_temp[\"Primary Drugs\"]):\n split_list = item_list.split(\",\")\n if len(split_list) > 1:\n for i in split_list:\n # Appends a new row to data for a unique design value\n df_temp = df_temp.append(df_temp.iloc[idx, :])\n # 3 is the column number of where Trial Design lives\n # Changes the value of that specific cell to a single design value\n df_temp.iloc[-1, -3] = i\n dupes.append(idx)\n # we reset index so we can drop proper rows in next line\n df_temp2 = df_temp.reset_index(drop=True)\n df_trials = df_temp2.drop(dupes, axis=0)\n\n # Removes white space from drug names\n df_trials[\"Primary Drugs\"] = df_trials[\"Primary Drugs\"].apply(lambda x: x.strip())\n\n return df_trials", "def remove_duplicates(df):\n cache = set()\n\n def is_unique(row):\n rowstr = row.to_string()\n if rowstr in cache:\n return False\n cache.add(rowstr)\n return True\n\n return df[df.apply(is_unique, axis=1)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function checks for the size of a dataframe and splits it into parts containing approximately 1 million records as the default number of records for each dataframe.It also provides the option of writing the split dataframes to the disk. Parameters , type, return values
def dataFrameSplit(df, norec=1000000, outfile= None): # calculation of the no. of rows of the dataframe df_rsz = len(df.index) if df_rsz>norec: no_splits = np.ceil(df_rsz/norec) dfarr = np.array_split(df,no_splits) return dfarr else: print("The dataframe doesn't have sufficient records") # printing to disk when if outfile!=None: i=0 for arr in dfarr: arr.to_csv("D:\\ddf"+str(i+1)+".csv",encoding='utf-8', index=False, header=False) i = i+1
[ "def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]\n\n return segments", "def test_03_dataframe_to_dataframe_w_chunksize(self):\n _, err = _iquery(\"store(flatten(DF1, cells_per_chunk:5), DF3)\")\n assert not err, err\n self._array_cleanups.append('DF3')\n check_v_sum('DF3')\n nchunks = chunk_count(vaid_of('DF3'))\n prt(\"DF3 has\", nchunks, \"chunks\")\n assert nchunks < self._df1_chunks, \"DF3 did not get dense!\"", "def _sized_dataframes(dataframe: pd.DataFrame) -> tuple:\n\n def log_size_estimate(num_bytes):\n if num_bytes == 0:\n return \"0MB\"\n elif round(num_bytes / float(1 << 20), 2) == 0.0:\n return \"<0.1MB\"\n else:\n return str(round(num_bytes / float(1 << 20), 2)) + \"MB\"\n\n # get first row size\n row_size_est = sys.getsizeof(dataframe.head(1))\n # get number of rows\n num_rows = int(dataframe.shape[0])\n frame_size_est = row_size_est * num_rows\n # at scale dataframes seem to compress around 3.5-4.5 times as parquet.\n # TODO: should build a real regression to calc this!\n compression_ratio = 4\n # 60MB compressed is ideal for Spectrum\n ideal_size = compression_ratio * (60 * float(1 << 20))\n # short circut if < ideal size\n batch_log_message = \"\"\"row size estimate: {row_size_est} bytes.\nnumber of rows: {num_rows} rows\nframe size estimate: {frame_size_est} bytes\ncompression ratio: {compression_ratio}:1\nideal size: {ideal_size} bytes\n\"\"\"\n logger.debug(batch_log_message)\n if ideal_size > frame_size_est:\n return tuple([dataframe])\n\n # math the number of estimated partitions\n sized_frames = []\n num_partitions = int(row_size_est * num_rows / ideal_size)\n rows_per_partition = int(num_rows / num_partitions)\n # for each partition do the thing\n for index, lower in enumerate(range(0, num_rows, rows_per_partition)):\n lower = lower if lower == 0 else lower + 1\n if index + 1 == num_partitions:\n upper = num_rows\n else:\n upper = lower + rows_per_partition\n sized_frames.append(dataframe[lower:upper])\n logger.info(f\"sized out {len(sized_frames)} dataframes.\")\n return tuple(sized_frames)", "def splitFeatureToPartsBasedOn112record(self,testSampleFilename, stepSize=4, groupSize=28):\n testsetDF=pd.read_csv(testSampleFilename)\n testsetDF=testsetDF.drop_duplicates(['content'])\n testsetDF.reset_index()\n print(testsetDF['label'])\n baseCounter=0\n # stepSize=4\n # groupSize=36\n #totalSize=(testsetDF.shape[0])\n # iterations=4\n overList=[]\n smallList=[]\n counter=1\n for k in range(0,groupSize,stepSize):\n smallList = []\n baseCounter=k\n for i in range(0,stepSize):\n print('Processing index')\n for j in range(baseCounter,baseCounter+stepSize):\n smallList.append(j)\n print(j,end=',')\n #Extract to dataframe\n baseCounter=baseCounter+groupSize\n print('\\n')\n overList.append(smallList)\n df=testsetDF.iloc[smallList]\n newfilename=testSampleFilename.split('.')[0]+'.features.'+str(counter).zfill(3)\n print(df['label'])\n df.to_csv(newfilename, ',', mode='w', header=True, index=False,columns=('filename','content','label','vector'))\n counter=counter+1\n # print(df)\n\n print(overList)\n print(len(overList))\n pass", "def splitFeatureToParts(self,testSampleFilename, stepSize=4, groupSize=36):\n testsetDF=pd.read_csv(testSampleFilename)\n testsetDF=testsetDF.drop_duplicates(['content'])\n testsetDF.reset_index()\n print(testsetDF['label'])\n baseCounter=0\n # stepSize=4\n # groupSize=36\n #totalSize=(testsetDF.shape[0])\n # iterations=4\n overList=[]\n smallList=[]\n counter=1\n for k in range(0,groupSize,stepSize):\n smallList = []\n baseCounter=k\n for i in range(0,stepSize):\n print('Processing index')\n for j in range(baseCounter,baseCounter+stepSize):\n smallList.append(j)\n print(j,end=',')\n #Extract to dataframe\n baseCounter=baseCounter+groupSize\n print('\\n')\n overList.append(smallList)\n df=testsetDF.iloc[smallList]\n newfilename=testSampleFilename.split('.')[0]+'.features.'+str(counter).zfill(3)\n print(df['label'])\n df.to_csv(newfilename, ',', mode='w', header=True, index=False,columns=('filename','content','label','vector'))\n counter=counter+1\n # print(df)\n\n print(overList)\n print(len(overList))\n pass", "def _create_chunks_non_binary(self):\r\n if self._file_path[-4:] in {'.txt', '.csv'}:\r\n self._chunks = pd.read_csv(self._file_path, chunksize=self._chunk_size, dtype=self._dtype, **self._kwargs)\r\n elif self._file_path[-3:] == '.h5':\r\n self._chunks = pd.read_hdf(self._file_path, chunksize=self._chunk_size, **self._kwargs)\r\n if self._split_by is not None:\r\n msg = \"Build the summary based on the split-by column ...\"\r\n self._log_and_print(self._log_queue, msg)\r\n dfs = []\r\n for chunk in self._chunks:\r\n df = self._summarize_by_column(chunk)\r\n dfs.append(df)\r\n self.split_by_sum_df = self._merge(dfs)\r\n self.split_by_sum_df.sort_values(by='min', inplace=True)\r\n if self._file_path[-4:] in {'.txt', '.csv'}:\r\n self._reader = pd.read_csv(self._file_path, iterator=True, dtype=self._dtype, **self._kwargs)\r\n elif self._file_path[-3:] == '.h5':\r\n self._reader = pd.read_hdf(self._file_path, iterator=True, **self._kwargs)", "def split_df(dfm, chunk_size):\n indices = index_marks(dfm.shape[0], chunk_size)\n return np.array_split(dfm, indices)", "def slice_pandify(bigdf_iter, numsplits, df_filepath):\n \n global numschools # Access numschools from within function (this is roughly 7000)\n wheresplit = int(round(float(numschools)/float(numsplits))) # Get number on which to split (e.g., 1000) based on total number of schools data. This splitting number will be used to iterate over numsplits\n logging.info(\"Splitting on the number \" + str(wheresplit))\n \n for num in tqdm(range(numsplits), desc=\"Loading \" + str(numsplits) + \" DF slices\"): # Wrap iterator with tqdm to show progress bar\n startnum, endnum = wheresplit*int(num),wheresplit*int(num+1)\n try:\n dfslice = pd.DataFrame()\n dfslice = bigdf_iter.get_chunk(wheresplit) # Get next chunk of rows \n #logging.info(str(dfslice.keys()))\n #logging.info(str(dfslice.info()))\n dfslice = dfslice[dfslice.ADDRESS14 != 'ADDRESS14'] # Clean out any cases of header being written as row\n\n #dfslice = bigdf_iter.iloc[startnum:endnum,:]\n #print(\"Loading DF parsing output for slice #\" + str(num) + \" of \" + str(numschools) + \" school websites, from #\" + str(startnum) + \"-\" + str(endnum) + \"...\")\n logging.info(\"Loading parsing output for slice #\" + str(num) + \" of \" + str(numschools) + \" school websites, from #\" + str(startnum) + \"-\" + str(endnum) + \"...\")\n \n '''if num==0: # Save first slice to new file (overwriting if needed)\n dfslice = pandify_webtext(dfslice) # Load parsed output into the DF\n logging.info(\"Slice #\" + str(num) + \" loaded! Saving file...\")\n dfslice.to_csv(df_filepath, mode=\"w\", index=False, header=dfslice.columns.values, sep=\"\\t\", encoding=\"utf-8\")\n print(\"Slice #\" + str(num) + \" saved to \" + df_filepath + \"!\")\n logging.info(\"Slice #\" + str(num) + \" saved to \" + df_filepath + \"!\")\n if num<20:\n pass\n elif num==20:\n dfslice = pandify_webtext(dfslice) # Load parsed output into the DF\n logging.info(dfslice[[\"SCH_NAME\", \"FOLDER_NAME\", \"html_file_count\"]])\n print(dfslice[[\"SCH_NAME\", \"FOLDER_NAME\", \"html_file_count\"]])\n sys.exit()\n else:\n dfslice = pandify_webtext(dfslice) # Load parsed output into the DF\n logging.info(\"Slice loaded! Saving file...\")\n dfslice.to_csv(df_filepath, mode=\"a\", index=False, header=False, sep=\"\\t\", encoding=\"utf-8\")\n print(\"Slice #\" + str(num) + \" saved to \" + df_filepath + \"!\")\n logging.info(\"Slice #\" + str(num) + \" saved to \" + df_filepath + \"!\")'''\n \n # Skip sites that present parsing problems--namely #3361, which has 10K+ html pages and is in /vol_b/data/wget/parll_wget/Gentilly_Terrace_Elementary_School_LA/www.brothermartin.com\n if num==3361: #(284 or 441 or 593 or 594 or 595 or 596 or 1159 or 1218 or 1219 or 1271 or 1297 or 1303 or 1667 or 1861 or 3361 or 4467 or 4836 or 4871 or 4910 or 5418): # or num==441 or num==593: # Skip Primavera_-_Online_AZ', which is slice #284 if numsplits = 6752\n continue # Move on to next slice\n # TO DO: Clean out excess HTML (e.g., blog posts) in wget downloads for these schools\n \n dfslice = convert_df(dfslice) # Make this DF as memory-efficient as possible by appropriately converting column dtypes \n dfslice = pandify_webtext(dfslice) # Load parsed output into the DF\n logging.info(dfslice[[\"FOLDER_NAME\", \"html_file_count\"]])\n logging.info(\"Slice #\" + str(num) + \" loaded! Saving file...\")\n \n if num==0: # Save first slice to new file (overwriting if needed)\n dfslice.to_csv(df_filepath, mode=\"w\", index=False, header=dfslice.columns.values, sep=\"\\t\", encoding=\"utf-8\")\n else: # Append next slice to existing file\n dfslice.to_csv(df_filepath, mode=\"a\", index=False, header=False, sep=\"\\t\", encoding=\"utf-8\")\n #save_datafile(dfslice,df_filepath,\"CSV\") # BROKEN function--Save slice to file--should work whether writing new file or appending to CSV\n \n logging.info(\"Slice #\" + str(num) + \" saved to \" + df_filepath + \"!\")\n del dfslice # Free memory by deleting this temporary, smaller slice\n \n except Exception as e:\n logging.critical(\"\\nERROR! Script failed to load parsing output into DataFrame slice #\" + str(num) + \" of \" + str(numsplits) + \", for schools #\" + str(startnum) + \"-\" + str(endnum) + \".\\n\" + str(e))\n print(\" ERROR! Script failed to load parsing output into DataFrame slice #\" + str(num) + \" of \" + str(numsplits) + \", for schools #\" + str(startnum) + \"-\" + str(endnum) + \".\", str(e))\n #sys.exit()\n continue\n \n return", "def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b", "def split_by_indel_size(df, indel_class):\n if indel_class == \"s\":\n return df[df[\"indel_size\"] == 1].reset_index(drop=True)\n else:\n return df[df[\"indel_size\"] > 1].reset_index(drop=True)", "def split_data(hdf5_path, split_size, output_dir=None):\n f = h5py.File(hdf5_path)\n n = 0\n # Find the largest n.\n for k,v in f.iteritems():\n n = max(n, v.value.shape[0])\n\n if output_dir is None:\n output_dir = os.path.splitext(hdf5_path)[0]\n os.mkdir(output_dir)\n\n # Copy subsequences of the data to smaller files.\n width = int(np.ceil(np.log10(n / split_size)))\n for i,j in enumerate(range(0, n, split_size)):\n outfile = '{dir}/{num:{fill}{width}}.h5'.format(\n dir=output_dir, num=i, fill='0', width=width)\n print(outfile)\n fout = h5py.File(outfile, 'w')\n for k,v in f.iteritems():\n subset = v[j:j+split_size]\n fout.create_dataset(k, data=subset, dtype=v.dtype)\n fout.close()", "def return_size(df):\n return round(sys.getsizeof(df) / 1e9, 2)", "def make_big_frame():\n \n # pull each csv into a list\n dfs = []\n #insert path here\n #put in fullpath to file, and filetype = csv, txt, etc.\n path = \"/home/rafa/github/For Businesses/Infinite-Agency/BG/*.csv\" \n count = 0\n for fname in glob.glob(path):\n count += 1\n print 'file number:{}'.format(count)\n dfs.append(pd.read_csv(fname, low_memory=False))\n \n # creates a set of their column lengths\n shape_set = set([df.shape[1] for df in dfs])\n big_frame = pd.concat(dfs, ignore_index = True, axis = 0)\n \n if len(shape_set) > 1:\n print 'Unequal Columns!'\n else:\n print 'Columns are equal length :)\\n Shape = {}'.format(big_frame.shape)\n \n\tbig_frame.dropna(axis = 1, how = 'all', inplace=True)\n\tdrop_nonunique(big_frame)\n\tprint 'concat is complete: shape = {}'.format(big_frame.shape)\n\tbig_frame.drop_duplicates(inplace = True, subset = ['ConversionID'])\n\tbig_frame.drop_duplicates(inplace = True, subset = ['ConversionID'])\n\tbig_frame.reset_index(inplace=True)\n\n\tprint 'Deleted duplicates and Reset Index, new shape = {}'.format(big_frame.shape)\n\tdate_list = make_datelist(big_frame)\n\tbig_frame = change_date(big_frame, date_list)\n\tbig_frame = make_path_columns(big_frame)\n\treturn big_frame", "def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM", "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def df2db_separate(self, df: pd.DataFrame, tab_name):\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n\n max_df_size = 50000\n\n dfs = df_split(df, batch_size=max_df_size)\n num_piece = len(dfs)\n\n dfs[0].to_sql(tab_name, self.engine, method='multi', index=False)\n if num_piece > 1:\n for pdf in dfs[1:]:\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))\n pdf.to_sql(tab_name + '_tmp', self.engine, method='multi', index=False)\n self.execute(\"INSERT INTO TABLE {tn} SELECT * FROM {tt}\".format(\n tn=tab_name, tt=tab_name + '_tmp'\n ))\n print(len(pdf))\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))", "def split_data(df, train_size=.7, seed=123):\n # Create the train and test sets\n train, test = train_test_split(df, train_size=train_size, random_state=seed)\n\n return train, test", "def make_bedfiles():\n df = pd.read_csv(\"%s.length\" % ref, sep='\\t', header=None)\n thresh = math.ceil(sum(df[1]) / globals()['jobs_per_pool'])\n lines = []\n fcount = 0\n fsum = 0\n for count,row in enumerate(df.index):\n contig, length = list(df.loc[row, :])\n fsum += length\n lines.append([contig, str(length)])\n if fsum >= thresh or count + 1 == len(df.index):\n make_bedfile(lines, fcount)\n lines = []\n fcount += 1\n fsum = 0\n return fcount", "def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Embed words in a sequence using GLoVE model
def __glove_embed__(sequence, model): embedded = [] for word in sequence: embedded.append(model[word]) return embedded
[ "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def embed_text(self, text):\n\n marked_text = \"[CLS] \" + text + \" [SEP]\"\n # Tokenize sentence with the BERT tokenizer\n tokenized_text = self.tokenizer.tokenize(marked_text)\n\n\n # Map the token strings to their vocabulary indeces\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n\n\n # Mark each of the tokens as belonging to sentence \"1\"\n segments_ids = [1] * len(tokenized_text)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n\n with torch.no_grad():\n outputs = self.model(tokens_tensor, segments_tensors)\n hidden_states = outputs[2]\n\n token_embeddings = torch.stack(hidden_states, dim=0)\n # Remove dimension 1, the \"batches\".\n token_embeddings = torch.squeeze(token_embeddings, dim=1)\n # Swap dimensions 0 and 1. to tokens x layers x embedding\n token_embeddings = token_embeddings.permute(1,0,2)\n\n # choose to concatenate last four layers, dim 4x 768 = 3072\n token_vecs_cat= [torch.cat((token[-1], token[-2], token[-3], token[-4]), dim=0) for token in token_embeddings]\n # drop the CLS and the SEP tokens and embedding\n token_vecs_cat=token_vecs_cat[1:-1]\n tokenized_text =tokenized_text[1:-1]\n\n # chose to summarize the last four layers\n #token_vecs_sum=[torch.sum(token[-4:], dim=0) for token in token_embeddings]\n\n # sentence embedding\n # Calculate the average of all token vectors for the second last layers\n sentence_embedding = torch.mean(hidden_states[-2][0], dim=0)\n\n return token_vecs_cat, sentence_embedding, tokenized_text", "def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)", "def rev_gen( model, vocab, start_word=SOS):\n print('Generating sample review .....................')\n with torch.no_grad():\n word_idx = vocab.w2i[start_word]\n all_words = []\n all_words.append(start_word)\n while word_idx != vocab.w2i[EOS]:\n word_tensor = Data2tensor.idx2tensor([[word_idx]])\n hidden = model.init_hidden(word_tensor.size(0))\n output, hidden = model(word_tensor, hidden)\n label_prob, label_pred = model.inference(output)\n word_idx = label_pred.data[0][0].data.numpy()[0]\n all_words.append(vocab.i2w[word_idx])\n\n\n return ' '.join(all_words)", "def augment_squad(\n squad_path: Path,\n output_path: Path,\n glove_path: Path = Path(\"glove.txt\"),\n model: str = \"bert-base-uncased\",\n tokenizer: str = \"bert-base-uncased\",\n multiplication_factor: int = 20,\n word_possibilities: int = 20,\n replace_probability: float = 0.4,\n device: Union[str, torch.device] = \"cpu:0\",\n batch_size: int = 16,\n):\n device = torch.device(device)\n # loading model and tokenizer\n transformers_model = AutoModelForMaskedLM.from_pretrained(model)\n transformers_model.to(device)\n transformers_tokenizer = AutoTokenizer.from_pretrained(tokenizer, use_fast=False)\n # load glove for words that do not have one distinct token, but are split into subwords\n word_id_mapping, id_word_mapping, vectors = load_glove(glove_path=glove_path, device=device)\n\n # load squad dataset\n with open(squad_path, \"r\") as f:\n squad = json.load(f)\n\n topics = []\n\n for topic in tqdm(squad[\"data\"]):\n paragraphs = []\n for paragraph in topic[\"paragraphs\"]:\n # make every question unanswerable as answer strings will probably not match and aren't relevant for distillation\n for question in paragraph[\"qas\"]:\n question[\"answers\"] = []\n context = paragraph[\"context\"]\n contexts = augment(\n word_id_mapping=word_id_mapping,\n id_word_mapping=id_word_mapping,\n vectors=vectors, # type: ignore [arg-type]\n model=transformers_model,\n tokenizer=transformers_tokenizer,\n text=context,\n multiplication_factor=multiplication_factor,\n word_possibilities=word_possibilities,\n replace_probability=replace_probability,\n device=device,\n batch_size=batch_size,\n )\n paragraphs_ = []\n for context in contexts:\n new_paragraph = deepcopy(paragraph)\n new_paragraph[\"context\"] = context\n paragraphs_.append(new_paragraph)\n paragraphs += paragraphs_\n topic[\"paragraphs\"] = paragraphs\n topics.append(topic)\n squad[\"topics\"] = topics\n\n # save new dataset\n with open(output_path, \"w\") as f:\n json.dump(squad, f)", "def getGloveEmbeddings(model, wrd_list, embed_dims):\n embed_list = []\n\n for wrd in wrd_list:\n if wrd in model.vocab:\n embed_list.append(model.word_vec(wrd))\n else:\n embed_list.append(np.random.randn(embed_dims)) # Generates a random vector for words not in vocab\n\n return np.array(embed_list, dtype=np.float32)", "def Emojify_V2(input_shape, word_to_vec_map, word_to_index):\n\n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\n sentence_indices = Input(shape = input_shape, dtype = 'int32')\n\n # Create the embedding layer pretrained with GloVe Vectors (鈮? line)\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\n\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\n embeddings = embedding_layer(sentence_indices) \n\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # Be careful, the returned output should be a batch of sequences.\n X = LSTM(128, return_sequences=True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(128, return_sequences=False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(0.5)(X)\n # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.\n X = Dense(5, activation='softmax')(X)\n # Add a softmax activation\n X = Activation('softmax')(X)\n\n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs=sentence_indices ,outputs=X)\n\n ### END CODE HERE ###\n\n return model", "def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings", "def generate_sentence(model, opener_words):\n\n sentence=[]\n #sentences between 3 and 15 words\n length= random.randint(3,6)\n keys=model.keys()\n bigram=random.choice(list(keys))\n\n #choose a first word that can be a starter word\n while bigram[0] not in opener_words:\n bigram=random.choice(list(keys))\n #iterate until sentence is correct length\n for i in range(0,length):\n matches=[]\n found=False\n while not found:\n\n #search in keys for key[0] to match the bigram[1]\n for key in keys:\n regex=re.compile(r\"\\b%s\\b\"%bigram[1])\n result=regex.match(key[0])\n if result:\n matches.append(key)\n found=True\n if not found:\n matches=[]\n i=0\n bigram=random.choice(list(keys))\n sentence.pop()\n\n #add first member of bigram to sentence list\n sentence.append(bigram[1])\n #choose next bigram from the list of matches\n bigram=random.choice(matches)\n\n #combine strings from list\n return \" \".join(sentence)", "def Emojify_V2(input_shape, word_to_vec_map, word_to_index):\n\n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).\n sentence_indices = Input(shape=input_shape, dtype = 'int32')\n\n # Create the embedding layer pretrained with GloVe Vectors (鈮? line)\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\n\n # Propagate sentence_indices through your embedding layer, you get back the embeddings\n embeddings = embedding_layer(sentence_indices)\n\n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # Be careful, the returned output should be a batch of sequences.\n X = LSTM(128, return_sequences=True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # Be careful, the returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(128, return_sequences=False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(0.5)(X)\n # Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.\n X = Dense(5, activation='softmax')(X)\n # Add a softmax activation\n X = Activation('softmax')(X)\n\n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs=sentence_indices ,outputs=X)\n\n ### END CODE HERE ###\n\n return model", "def Emojify_V2(input_shape, word_to_vec_map, word_to_index):\n \n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph.\n # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).\n sentence_indices = Input(shape = input_shape, dtype = 'int32')\n \n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\n # def pretrained_embedding_layer(word_to_vec_map, word_to_index): # return embedding_layer\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\n \n # Propagate sentence_indices through your embedding layer\n # (See additional hints in the instructions).\n embeddings = embedding_layer(sentence_indices) \n \n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # The returned output should be a batch of sequences.\n X = LSTM(units = 128, return_sequences = True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(rate = 0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # The returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(units = 128, return_sequences = False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(rate = 0.5)(X) \n # Propagate X through a Dense layer with 5 units\n X = Dense(units = 5)(X)\n # Add a softmax activation\n X = Activation(activation = 'softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs = sentence_indices, outputs = X)\n \n ### END CODE HERE ###\n \n return model", "def generate(fit_model):\n pred_indices = []\n pred_words = []\n # Replace start_index with actual start token\n start_index = random.randint(0, len(text) - maxlen - 1)\n current_vec = glove_matrix.get_vec(start_index)\n\n for iteration in range(NUM_PRED_WORDS):\n preds = fit_model.predict(current_vec, verbose=0)\n pred_index = sample(preds)\n pred_indices = pred_indices + [next_index]\n pred_words = pred_words + [glove_matrix.get_word(pred_index)]\n current_vec = glove_matrix.get_vec(pred_index)\n\n assert NUM_PRED_WORDS == len(pred_words)\n return pred_words", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def lemmatize_verbs(self):\n lemmas = []\n # lemmas = \"\"\n for word in self.words:\n lemma = wn.lemmatize(word, pos='v')\n lemmas.append(lemma)\n # lemmas += f\"{lemma} \"\n self.words = lemmas\n return self", "def load_glove_embeddings():\n #if you are running on the CSE machines, you can load the glove data from here\n #data = open(\"/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n\n word_index_dict = {}\n word_index_dict['UNK'] = 0\n embeddings = np.ndarray(shape=(500001, batch_size), dtype='float32')\n embeddings_list = []\n i = 1\n for line in data:\n load_array = line.split()\n # Sets the word to the 0th value in array\n word = load_array[0] \n # Other values are the assigned index\n values = np.asarray(load_array[1:], dtype='float32')\n # Put values in row of array\n embeddings[i] = values\n # E.g. word_index_dict[\"the\"] = 0\n word_index_dict[word] = i\n i = i+1\n data.close()\n return embeddings, word_index_dict", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n\n ### START CODE HERE ###\n # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n emb_matrix = np.zeros((vocab_len, emb_dim))\n\n # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n for word, index in word_to_index.items():\n emb_matrix[index, :] = word_to_vec_map[word]\n\n # Define Keras embedding layer with the correct output/input sizes, make it trainable.\n # Use Embedding(...). Make sure to set trainable=False.\n embedding_layer = Embedding(vocab_len,emb_dim, trainable=False)\n ### END CODE HERE ###\n\n # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.build((None,))\n\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n\n return embedding_layer", "def generate_phonetic_embs_from_words(words, char_to_index_path, phone_to_index_path):\n print 'Generating phonetic embeddings for GloVe words'\n char_to_index = pickle.load(open(char_to_index_path, 'rb'))\n phone_to_index = pickle.load(open(phone_to_index_path, 'rb'))\n character_vocab_size = len(char_to_index)\n phoneme_vocab_size = len(phone_to_index)\n model_inputs, model_outputs = build_chars_to_phonemes_model(character_vocab_size, phoneme_vocab_size)\n [tf_words, tf_batch_size] = model_inputs\n [tf_phonemes, lstm_hidden_state] = model_outputs\n tf_phonetic_emb = tf.concat(1, lstm_hidden_state)\n\n np_word_indices = convert_words_to_indices(words, char_to_index)\n print np_word_indices\n # Prove words converted to indices correctly by reversing the process and printing.\n index_to_char = invert_dictionary(char_to_index)\n print 'Example GloVe words recreated from indices:'\n for i in range(130, 140):\n np_word = np_word_indices[i, :]\n char_list = []\n for j in np_word:\n if j in index_to_char:\n char_list.append(index_to_char[j])\n word = ''.join(char_list)\n print word,\n print\n\n sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=GPU_OPTIONS))\n saver = tf.train.Saver(max_to_keep=10)\n # Restore model from previous save.\n ckpt = tf.train.get_checkpoint_state(CHAR_2_PHONE_MODEL_DIR)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print(\"No checkpoint found!\")\n return -1\n\n np_phonetic_emb = sess.run(tf_phonetic_emb, feed_dict={tf_words: np_word_indices,\n tf_batch_size: len(words),})\n\n print np_phonetic_emb.shape\n print np.mean(np.abs(np_phonetic_emb))\n\n return np_phonetic_emb", "def embed(self, word: Any) -> dy.Expression:\n raise NotImplementedError('embed must be implemented in Embedder subclasses')", "def generate_sentence(word1, word2, length, vocab, model):\n reverse_vocab = {idx: word for word, idx in vocab.items()}\n output_string = np.zeros((1, length), dtype=np.int)\n output_string[:, 0: 2] = vocab[word1], vocab[word2]\n\n for end in range(2, length):\n start = end - 2\n output_string[:, end] = np.argmax(model(output_string[:, start:end]), axis=1)\n text = [reverse_vocab[i] for i in list(output_string[0])]\n \n print(\" \".join(text))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }